From f41e0e5fd93df73dd272b9e9ba2232fdad0aafd6 Mon Sep 17 00:00:00 2001 From: Alexxxxxx <118710506+alexgao001@users.noreply.github.com> Date: Wed, 24 Jul 2024 17:22:59 +0800 Subject: [PATCH] feat: implement the gnfd peer for bsc nodes block syncing from Greenfield (#1) * feat: implement the gnfd peer for bsc nodes block syncing from Greenfield * fix: tolerate the err --- .github/workflows/docker-release.yml | 7 +- .github/workflows/evm-tests.yml | 56 - .github/workflows/integration-test.yml | 26 - .golangci.yml | 3 + README.md | 332 +- accounts/abi/bind/base_test.go | 594 --- accounts/abi/bind/bind_test.go | 2166 ---------- accounts/abi/bind/util_test.go | 139 - cmd/clef/consolecmd_test.go | 124 - cmd/clef/run_test.go | 109 - cmd/devp2p/internal/ethtest/suite_test.go | 155 - cmd/ethkey/message_test.go | 65 - cmd/ethkey/run_test.go | 54 - cmd/evm/t8n_test.go | 576 --- cmd/geth/accountcmd_test.go | 378 -- cmd/geth/attach_test.go | 83 - cmd/geth/consolecmd_test.go | 162 - cmd/geth/exportcmd_test.go | 46 - cmd/geth/genesis_test.go | 198 - cmd/geth/initnetwork_test.go | 147 - cmd/geth/logging_test.go | 237 - cmd/geth/main.go | 4 +- cmd/geth/pruneblock_test.go | 253 -- cmd/geth/run_test.go | 120 - cmd/utils/flags.go | 7 +- cmd/utils/history_test.go | 185 - consensus/clique/clique_test.go | 131 - consensus/clique/snapshot_test.go | 507 --- consensus/parlia/parlia.go | 3 +- console/bridge_test.go | 48 - console/console_test.go | 322 -- core/bench_test.go | 331 -- core/block_validator_test.go | 272 -- core/blockarchiver/client.go | 229 + core/blockarchiver/config.go | 12 + core/blockarchiver/converter.go | 321 ++ core/blockarchiver/service.go | 207 + core/blockarchiver/types.go | 193 + core/blockchain.go | 242 +- core/blockchain_diff_test.go | 429 -- core/blockchain_notries_test.go | 226 - core/blockchain_reader.go | 16 +- core/blockchain_repair_test.go | 2018 --------- core/blockchain_sethead_test.go | 2191 ---------- core/blockchain_snapshot_test.go | 720 --- core/blockchain_test.go | 4566 -------------------- core/chain_indexer_test.go | 246 -- core/chain_makers_test.go | 260 -- core/dao_test.go | 159 - core/data_availability_test.go | 436 -- core/eip3529tests/eip3529_ethash_test.go | 140 - core/eip3529tests/eip3529_parlia_test.go | 151 - core/eip3529tests/eip3529_test_util.go | 80 - core/genesis_test.go | 342 -- core/headerchain.go | 73 +- core/headerchain_test.go | 116 - core/state_prefetcher_test.go | 149 - core/state_processor_test.go | 431 -- core/txindexer_test.go | 243 -- core/vote/vote_pool_test.go | 275 -- eth/backend.go | 13 +- eth/catalyst/simulated_beacon_test.go | 141 - eth/downloader/downloader_test.go | 1335 ------ eth/downloader/modes.go | 9 +- eth/downloader/queue_test.go | 474 -- eth/downloader/testchain_test.go | 231 - eth/ethconfig/config.go | 51 +- eth/ethconfig/gen_config.go | 7 + eth/filters/filter_test.go | 392 -- eth/gasprice/feehistory_test.go | 91 - eth/gasprice/gasprice_test.go | 230 - eth/handler.go | 124 +- eth/handler_bsc_test.go | 259 -- eth/handler_eth_test.go | 855 ---- eth/handler_test.go | 400 -- eth/protocols/bsc/handler.go | 30 +- eth/protocols/bsc/protocol.go | 1 + eth/protocols/eth/handler.go | 4 + eth/protocols/eth/handler_test.go | 656 --- eth/protocols/eth/handshake_test.go | 90 - eth/protocols/eth/protocol.go | 1 + eth/protocols/snap/handler_fuzzing_test.go | 163 - eth/protocols/snap/protocol.go | 1 + eth/protocols/trust/handler_test.go | 273 -- eth/sync_test.go | 196 - eth/tracers/api_test.go | 998 ----- ethclient/ethclient_test.go | 780 ---- ethclient/gethclient/gethclient_test.go | 570 --- ethclient/simulated/backend_test.go | 308 -- ethclient/simulated/options_test.go | 74 - go.mod | 3 +- go.sum | 422 ++ graphql/graphql_test.go | 487 --- internal/cmdtest/test_cmd.go | 300 -- internal/ethapi/api_test.go | 2295 ---------- miner/miner_test.go | 337 -- miner/payload_building_test.go | 160 - miner/worker_test.go | 413 -- p2p/peer.go | 3 +- p2p/server.go | 3 +- resource/greenfield-peer.png | Bin 0 -> 102556 bytes tests/block_test.go | 94 - tests/difficulty_test.go | 111 - 103 files changed, 1623 insertions(+), 34073 deletions(-) delete mode 100644 .github/workflows/evm-tests.yml delete mode 100644 .github/workflows/integration-test.yml delete mode 100644 accounts/abi/bind/base_test.go delete mode 100644 accounts/abi/bind/bind_test.go delete mode 100644 accounts/abi/bind/util_test.go delete mode 100644 cmd/clef/consolecmd_test.go delete mode 100644 cmd/clef/run_test.go delete mode 100644 cmd/devp2p/internal/ethtest/suite_test.go delete mode 100644 cmd/ethkey/message_test.go delete mode 100644 cmd/ethkey/run_test.go delete mode 100644 cmd/evm/t8n_test.go delete mode 100644 cmd/geth/accountcmd_test.go delete mode 100644 cmd/geth/attach_test.go delete mode 100644 cmd/geth/consolecmd_test.go delete mode 100644 cmd/geth/exportcmd_test.go delete mode 100644 cmd/geth/genesis_test.go delete mode 100644 cmd/geth/initnetwork_test.go delete mode 100644 cmd/geth/logging_test.go delete mode 100644 cmd/geth/pruneblock_test.go delete mode 100644 cmd/geth/run_test.go delete mode 100644 cmd/utils/history_test.go delete mode 100644 consensus/clique/clique_test.go delete mode 100644 consensus/clique/snapshot_test.go delete mode 100644 console/bridge_test.go delete mode 100644 console/console_test.go delete mode 100644 core/bench_test.go delete mode 100644 core/block_validator_test.go create mode 100644 core/blockarchiver/client.go create mode 100644 core/blockarchiver/config.go create mode 100644 core/blockarchiver/converter.go create mode 100644 core/blockarchiver/service.go create mode 100644 core/blockarchiver/types.go delete mode 100644 core/blockchain_diff_test.go delete mode 100644 core/blockchain_notries_test.go delete mode 100644 core/blockchain_repair_test.go delete mode 100644 core/blockchain_sethead_test.go delete mode 100644 core/blockchain_snapshot_test.go delete mode 100644 core/blockchain_test.go delete mode 100644 core/chain_indexer_test.go delete mode 100644 core/chain_makers_test.go delete mode 100644 core/dao_test.go delete mode 100644 core/data_availability_test.go delete mode 100644 core/eip3529tests/eip3529_ethash_test.go delete mode 100644 core/eip3529tests/eip3529_parlia_test.go delete mode 100644 core/eip3529tests/eip3529_test_util.go delete mode 100644 core/genesis_test.go delete mode 100644 core/headerchain_test.go delete mode 100644 core/state_prefetcher_test.go delete mode 100644 core/state_processor_test.go delete mode 100644 core/txindexer_test.go delete mode 100644 eth/catalyst/simulated_beacon_test.go delete mode 100644 eth/downloader/downloader_test.go delete mode 100644 eth/downloader/queue_test.go delete mode 100644 eth/downloader/testchain_test.go delete mode 100644 eth/filters/filter_test.go delete mode 100644 eth/gasprice/feehistory_test.go delete mode 100644 eth/gasprice/gasprice_test.go delete mode 100644 eth/handler_bsc_test.go delete mode 100644 eth/handler_eth_test.go delete mode 100644 eth/handler_test.go delete mode 100644 eth/protocols/eth/handler_test.go delete mode 100644 eth/protocols/eth/handshake_test.go delete mode 100644 eth/protocols/snap/handler_fuzzing_test.go delete mode 100644 eth/protocols/trust/handler_test.go delete mode 100644 eth/sync_test.go delete mode 100644 eth/tracers/api_test.go delete mode 100644 ethclient/ethclient_test.go delete mode 100644 ethclient/gethclient/gethclient_test.go delete mode 100644 ethclient/simulated/backend_test.go delete mode 100644 ethclient/simulated/options_test.go delete mode 100644 graphql/graphql_test.go delete mode 100644 internal/cmdtest/test_cmd.go delete mode 100644 internal/ethapi/api_test.go delete mode 100644 miner/miner_test.go delete mode 100644 miner/payload_building_test.go delete mode 100644 miner/worker_test.go create mode 100644 resource/greenfield-peer.png delete mode 100644 tests/block_test.go delete mode 100644 tests/difficulty_test.go diff --git a/.github/workflows/docker-release.yml b/.github/workflows/docker-release.yml index 205a52c0e5..aa1d0bd55e 100644 --- a/.github/workflows/docker-release.yml +++ b/.github/workflows/docker-release.yml @@ -7,7 +7,8 @@ on: - v* env: - IMAGE_NAME: bsc + IMAGE_NAME: bsc-client + IMAGE_SOURCE: https://github.com/${{ github.repository }} jobs: # Push image to GitHub Packages. @@ -21,14 +22,14 @@ jobs: - name: Build image run: | docker build . \ - --label "org.opencontainers.image.source=${{ secrets.IMAGE_SOURCE }}" \ + --label "org.opencontainers.image.source=${IMAGE_SOURCE}" \ --label "org.opencontainers.image.revision=$(git rev-parse HEAD)" \ --label "org.opencontainers.image.version=$(git describe --tags --abbrev=0)" \ --label "org.opencontainers.image.licenses=LGPL-3.0,GPL-3.0" \ -f ./Dockerfile -t "${IMAGE_NAME}" - name: Log into registry - run: echo "${{ secrets.PACKAGE_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin + run: echo "${{ secrets.GH_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin - name: Push image run: | diff --git a/.github/workflows/evm-tests.yml b/.github/workflows/evm-tests.yml deleted file mode 100644 index a7a025d8c1..0000000000 --- a/.github/workflows/evm-tests.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: EVM Test - -on: - push: - branches: - - master - - develop - - pull_request: - branches: - - master - - develop - -jobs: - evm-test: - strategy: - matrix: - go-version: [1.21.x] - os: [ubuntu-latest] - runs-on: ${{ matrix.os }} - steps: - - name: Install Go - uses: actions/setup-go@v3 - with: - go-version: ${{ matrix.go-version }} - - - name: Checkout code - uses: actions/checkout@v3 - - - uses: actions/cache@v3 - with: - # In order: - # * Module download cache - # * Build cache (Linux) - # * Build cache (Mac) - # * Build cache (Windows) - path: | - ~/go/pkg/mod - ~/.cache/go-build - ~/Library/Caches/go-build - ~\AppData\Local\go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - - name: EVM Test - env: - CGO_CFLAGS: "-O -D__BLST_PORTABLE__" - CGO_CFLAGS_ALLOW: "-O -D__BLST_PORTABLE__" - ANDROID_HOME: "" # Skip android test - run: | - git submodule update --init --depth 1 --recursive - go mod download - cd tests - sed -i -e 's/\/\/ bt.skipLoad/bt.skipLoad/g' block_test.go - bash -x run-evm-tests.sh diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml deleted file mode 100644 index ff12eb4bec..0000000000 --- a/.github/workflows/integration-test.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Integration Test - -on: - push: - branches: - - master - - develop - - pull_request: - branches: - - master - - develop - -jobs: - truffle-test: - strategy: - matrix: - os: [ubuntu-latest] - runs-on: ${{ matrix.os }} - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Truffle test - run: | - make truffle-test diff --git a/.golangci.yml b/.golangci.yml index 90e5c591f6..77dc26224f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -44,6 +44,9 @@ linters-settings: issues: exclude-rules: + - path: ./ + linters: + - unused - path: core/state/metrics.go linters: - unused diff --git a/README.md b/README.md index 9e38330f44..71a707b7d2 100644 --- a/README.md +++ b/README.md @@ -1,318 +1,70 @@ -## BNB Smart Chain +# BSC Client -The goal of BNB Smart Chain is to bring programmability and interoperability to BNB Beacon Chain. In order to embrace the existing popular community and advanced technology, it will bring huge benefits by staying compatible with all the existing smart contracts on Ethereum and Ethereum tooling. And to achieve that, the easiest solution is to develop based on go-ethereum fork, as we respect the great work of Ethereum very much. +## Background -BNB Smart Chain starts its development based on go-ethereum fork. So you may see many toolings, binaries and also docs are based on Ethereum ones, such as the name “geth”. +The Greenfield Community has introduced the Block Archiver(https://github.com/bnb-chain/greenfield-bsc-archiver), +BSC historical block data is now accessible on Greenfield. To fullfill the need of BSC node operators requiring full sync from the genesis block, +and provide a more efficient synchronization, the Greenfield Peer is introduced. -[![API Reference]( -https://pkg.go.dev/badge/github.com/ethereum/go-ethereum -)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) -[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/z2VpC455eU) +## How Greenfield Peer Works -But from that baseline of EVM compatible, BNB Smart Chain introduces a system of 21 validators with Proof of Staked Authority (PoSA) consensus that can support short block time and lower fees. The most bonded validator candidates of staking will become validators and produce blocks. The double-sign detection and other slashing logic guarantee security, stability, and chain finality. +The diagram below illustrates the functionality of the Greenfield Peer. While the Greenfield peer does not participate in +other operations within the BSC network, it solely provides block data to BSC nodes. It does not persist any data on its own; +instead, when it receives requests (GetBodies and GetHeaders) from other BSC nodes, it fetches a bundle of blocks (# of blocks determined +by the Block Archiver Service) from Greenfield and caches them in memory. This ensures the Greenfield peer delivers block data +to BSC nodes efficiently. -Cross-chain transfer and other communication are possible due to native support of interoperability. Relayers and on-chain contracts are developed to support that. BNB Beacon Chain DEX remains a liquid venue of the exchange of assets on both chains. This dual-chain architecture will be ideal for users to take advantage of the fast trading on one side and build their decentralized apps on the other side. **The BNB Smart Chain** will be: +![gnfd peer](/resource/greenfield-peer.png) -- **A self-sovereign blockchain**: Provides security and safety with elected validators. -- **EVM-compatible**: Supports all the existing Ethereum tooling along with faster finality and cheaper transaction fees. -- **Interoperable**: Comes with efficient native dual chain communication; Optimized for scaling high-performance dApps that require fast and smooth user experience. -- **Distributed with on-chain governance**: Proof of Staked Authority brings in decentralization and community participants. As the native token, BNB will serve as both the gas of smart contract execution and tokens for staking. +## How to Run Greenfield Peer -More details in [White Paper](https://www.bnbchain.org/en#smartChain). - -## Key features - -### Proof of Staked Authority -Although Proof-of-Work (PoW) has been approved as a practical mechanism to implement a decentralized network, it is not friendly to the environment and also requires a large size of participants to maintain the security. - -Proof-of-Authority(PoA) provides some defense to 51% attack, with improved efficiency and tolerance to certain levels of Byzantine players (malicious or hacked). -Meanwhile, the PoA protocol is most criticized for being not as decentralized as PoW, as the validators, i.e. the nodes that take turns to produce blocks, have all the authorities and are prone to corruption and security attacks. - -Other blockchains, such as EOS and Cosmos both, introduce different types of Deputy Proof of Stake (DPoS) to allow the token holders to vote and elect the validator set. It increases the decentralization and favors community governance. - -To combine DPoS and PoA for consensus, BNB Smart Chain implement a novel consensus engine called Parlia that: - -1. Blocks are produced by a limited set of validators. -2. Validators take turns to produce blocks in a PoA manner, similar to Ethereum's Clique consensus engine. -3. Validator set are elected in and out based on a staking based governance on BNB Beacon Chain. -4. The validator set change is relayed via a cross-chain communication mechanism. -5. Parlia consensus engine will interact with a set of [system contracts](https://docs.bnbchain.org/bnb-smart-chain/staking/overview/#system-contracts) to achieve liveness slash, revenue distributing and validator set renewing func. - - -### Light Client of BNB Beacon Chain - -To achieve the cross-chain communication from BNB Beacon Chain to BNB Smart Chain, need introduce a on-chain light client verification algorithm. -It contains two parts: - -1. [Stateless Precompiled contracts](https://github.com/bnb-chain/bsc/blob/master/core/vm/contracts_lightclient.go) to do tendermint header verification and Merkle Proof verification. -2. [Stateful solidity contracts](https://github.com/bnb-chain/bsc-genesis-contract/blob/master/contracts/TendermintLightClient.sol) to store validator set and trusted appHash. - -## Native Token - -BNB will run on BNB Smart Chain in the same way as ETH runs on Ethereum so that it remains as `native token` for BSC. This means, -BNB will be used to: - -1. pay `gas` to deploy or invoke Smart Contract on BSC -2. perform cross-chain operations, such as transfer token assets across BNB Smart Chain and BNB Beacon Chain. - -## Building the source - -Many of the below are the same as or similar to go-ethereum. - -For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/getting-started/installing-geth). - -Building `geth` requires both a Go (version 1.21 or later) and a C compiler (GCC 5 or higher). You can install -them using your favourite package manager. Once the dependencies are installed, run +### Build ```shell make geth ``` -or, to build the full suite of utilities: +### Config the connection to Block archiver -```shell -make all -``` +The Greenfield Peer will integrate with Block Archiver as backend, so need to config the Block Archiver service in the config file. +take the following config for testnet Block-Archiver as an example: -If you get such error when running the node with self built binary: -```shell -Caught SIGILL in blst_cgo_init, consult /bindinds/go/README.md. -``` -please try to add the following environment variables and build again: -```shell -export CGO_CFLAGS="-O -D__BLST_PORTABLE__" -export CGO_CFLAGS_ALLOW="-O -D__BLST_PORTABLE__" +```toml +[Eth.BlockArchiverConfig] +RPCAddress = "https://gnfd-bsc-archiver-testnet.bnbchain.org" +SPAddress = "https://gnfd-testnet-sp2.bnbchain.org" +BucketName = "testnet-bsc-block" +BlockCacheSize = 1000000 ``` -## Executables +- RPCAddress: the RPC address of the Block Archiver service +- SPAddress: the SP address of the bucket on Greenfield which serves the block data +- BucketName: the bucket name on Greenfield which serves the block data +- BlockCacheSize: the cache size of the block data, note that Greenfield Peer will cache the block data in memory -The bsc project comes with several wrappers/executables found in the `cmd` -directory. +### Run -| Command | Description | -| :--------: | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **`geth`** | Main BNB Smart Chain client binary. It is the entry point into the BSC network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It has the same and more RPC and other interface as go-ethereum and can be used by other processes as a gateway into the BSC network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | -| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. | -| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. | -| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | -| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | -| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | -| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | - -## Running `geth` - -Going through all the possible command line flags is out of scope here (please consult our -[CLI Wiki page](https://geth.ethereum.org/docs/fundamentals/command-line-options)), -but we've enumerated a few common parameter combos to get you up to speed quickly -on how you can run your own `geth` instance. - -### Hardware Requirements - -The hardware must meet certain requirements to run a full node on mainnet: -- VPS running recent versions of Mac OS X, Linux, or Windows. -- IMPORTANT 3 TB(Dec 2023) of free disk space, solid-state drive(SSD), gp3, 8k IOPS, 500 MB/S throughput, read latency <1ms. (if node is started with snap sync, it will need NVMe SSD) -- 16 cores of CPU and 64 GB of memory (RAM) -- Suggest m5zn.6xlarge or r7iz.4xlarge instance type on AWS, c2-standard-16 on Google cloud. -- A broadband Internet connection with upload/download speeds of 5 MB/S - -The requirement for testnet: -- VPS running recent versions of Mac OS X, Linux, or Windows. -- 500G of storage for testnet. -- 4 cores of CPU and 16 gigabytes of memory (RAM). - -### Steps to Run a Fullnode - -#### 1. Download the pre-build binaries -```shell -# Linux -wget $(curl -s https://api.github.com/repos/bnb-chain/bsc/releases/latest |grep browser_ |grep geth_linux |cut -d\" -f4) -mv geth_linux geth -chmod -v u+x geth - -# MacOS -wget $(curl -s https://api.github.com/repos/bnb-chain/bsc/releases/latest |grep browser_ |grep geth_mac |cut -d\" -f4) -mv geth_macos geth -chmod -v u+x geth -``` - -#### 2. Download the config files ```shell -//== mainnet -wget $(curl -s https://api.github.com/repos/bnb-chain/bsc/releases/latest |grep browser_ |grep mainnet |cut -d\" -f4) -unzip mainnet.zip - -//== testnet -wget $(curl -s https://api.github.com/repos/bnb-chain/bsc/releases/latest |grep browser_ |grep testnet |cut -d\" -f4) -unzip testnet.zip +./geth --config ./config.toml --datadir ./node ``` -#### 3. Download snapshot -Download latest chaindata snapshot from [here](https://github.com/bnb-chain/bsc-snapshots). Follow the guide to structure your files. - -Note: If you encounter difficulties downloading the chaindata snapshot and prefer to synchronize from the genesis block on the Chapel testnet, remember to include the additional flag `--chapel` when initially launching Geth. - -#### 4. Start a full node -```shell -./geth --config ./config.toml --datadir ./node --cache 8000 --rpc.allow-unprotected-txs --history.transactions 0 - -## It is recommend to run fullnode with `--tries-verify-mode none` if you want high performance and care little about state consistency -## It will run with Hash-Base Storage Scheme by default -./geth --config ./config.toml --datadir ./node --cache 8000 --rpc.allow-unprotected-txs --history.transactions 0 --tries-verify-mode none +## How to interact with Greenfield Peer as a BSC node -## It runs fullnode with Path-Base Storage Scheme. -## It will enable inline state prune, keeping the latest 90000 blocks' history state by default. -./geth --config ./config.toml --datadir ./node --cache 8000 --rpc.allow-unprotected-txs --history.transactions 0 --tries-verify-mode none --state.scheme path -``` +Configure your BSC node to connect to the Greenfield Peer by adjusting the settings in your configuration file. -#### 5. Monitor node status +Navigate to the P2P section of your BSC node configuration file and specify the enode info of the Greenfield Peer. -Monitor the log from **./node/bsc.log** by default. When the node has started syncing, should be able to see the following output: -```shell -t=2022-09-08T13:00:27+0000 lvl=info msg="Imported new chain segment" blocks=1 txs=177 mgas=17.317 elapsed=31.131ms mgasps=556.259 number=21,153,429 hash=0x42e6b54ba7106387f0650defc62c9ace3160b427702dab7bd1c5abb83a32d8db dirty="0.00 B" -t=2022-09-08T13:00:29+0000 lvl=info msg="Imported new chain segment" blocks=1 txs=251 mgas=39.638 elapsed=68.827ms mgasps=575.900 number=21,153,430 hash=0xa3397b273b31b013e43487689782f20c03f47525b4cd4107c1715af45a88796e dirty="0.00 B" -t=2022-09-08T13:00:33+0000 lvl=info msg="Imported new chain segment" blocks=1 txs=197 mgas=19.364 elapsed=34.663ms mgasps=558.632 number=21,153,431 hash=0x0c7872b698f28cb5c36a8a3e1e315b1d31bda6109b15467a9735a12380e2ad14 dirty="0.00 B" +```toml +# other configurations are omitted +... +[Node.P2P] +MaxPeers = 1 +NoDiscovery = true +TrustedNodes = [] +StaticNodes = ["${enode_info}"] +... ``` -#### 6. Interact with fullnode -Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), -(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://web3js.readthedocs.io/en/) -(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs), -as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). -This tool is optional and if you leave it out you can always attach to an already running -`geth` instance with `geth attach`. - -#### 7. More - -More details about [running a node](https://docs.bnbchain.org/bnb-smart-chain/developers/node_operators/full_node/) and [becoming a validator](https://docs.bnbchain.org/bnb-smart-chain/validator/create-val/) - -*Note: Although some internal protective measures prevent transactions from -crossing over between the main network and test network, you should always -use separate accounts for play and real money. Unless you manually move -accounts, `geth` will by default correctly separate the two networks and will not make any -accounts available between them.* - -### Configuration - -As an alternative to passing the numerous flags to the `geth` binary, you can also pass a -configuration file via: - -```shell -$ geth --config /path/to/your_config.toml -``` - -To get an idea of how the file should look like you can use the `dumpconfig` subcommand to -export your existing configuration: - -```shell -$ geth --your-favourite-flags dumpconfig -``` - -### Programmatically interfacing `geth` nodes - -As a developer, sooner rather than later you'll want to start interacting with `geth` and the -BSC network via your own programs and not manually through the console. To aid -this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://ethereum.github.io/execution-apis/api-documentation/) -and [`geth` specific APIs](https://geth.ethereum.org/docs/interacting-with-geth/rpc)). -These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based -platforms, and named pipes on Windows). - -The IPC interface is enabled by default and exposes all the APIs supported by `geth`, -whereas the HTTP and WS interfaces need to manually be enabled and only expose a -subset of APIs due to security reasons. These can be turned on/off and configured as -you'd expect. - -HTTP based JSON-RPC API options: - -* `--http` Enable the HTTP-RPC server -* `--http.addr` HTTP-RPC server listening interface (default: `localhost`) -* `--http.port` HTTP-RPC server listening port (default: `8545`) -* `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`) -* `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced) -* `--ws` Enable the WS-RPC server -* `--ws.addr` WS-RPC server listening interface (default: `localhost`) -* `--ws.port` WS-RPC server listening port (default: `8546`) -* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`) -* `--ws.origins` Origins from which to accept WebSocket requests -* `--ipcdisable` Disable the IPC-RPC server -* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`) -* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it) - -You'll need to use your own programming environments' capabilities (libraries, tools, etc) to -connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll -need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You -can reuse the same connection for multiple requests! - -**Note: Please understand the security implications of opening up an HTTP/WS based -transport before doing so! Hackers on the internet are actively trying to subvert -BSC nodes with exposed APIs! Further, all browser tabs can access locally -running web servers, so malicious web pages could try to subvert locally available -APIs!** - -### Operating a private network -- [BSC-Deploy](https://github.com/bnb-chain/node-deploy/): deploy tool for setting up both BNB Beacon Chain, BNB Smart Chain and the cross chain infrastructure between them. -- [BSC-Docker](https://github.com/bnb-chain/bsc-docker): deploy tool for setting up local BSC cluster in container. - - -## Running a bootnode - -Bootnodes are super-lightweight nodes that are not behind a NAT and are running just discovery protocol. When you start up a node it should log your enode, which is a public identifier that others can use to connect to your node. - -First the bootnode requires a key, which can be created with the following command, which will save a key to boot.key: - -``` -bootnode -genkey boot.key -``` - -This key can then be used to generate a bootnode as follows: - -``` -bootnode -nodekey boot.key -addr :30311 -network bsc -``` - -The choice of port passed to -addr is arbitrary. -The bootnode command returns the following logs to the terminal, confirming that it is running: - -``` -enode://3063d1c9e1b824cfbb7c7b6abafa34faec6bb4e7e06941d218d760acdd7963b274278c5c3e63914bd6d1b58504c59ec5522c56f883baceb8538674b92da48a96@127.0.0.1:0?discport=30311 -Note: you're using cmd/bootnode, a developer tool. -We recommend using a regular node as bootstrap node for production deployments. -INFO [08-21|11:11:30.687] New local node record seq=1,692,616,290,684 id=2c9af1742f8f85ce ip= udp=0 tcp=0 -INFO [08-21|12:11:30.753] New local node record seq=1,692,616,290,685 id=2c9af1742f8f85ce ip=54.217.128.118 udp=30311 tcp=0 -INFO [09-01|02:46:26.234] New local node record seq=1,692,616,290,686 id=2c9af1742f8f85ce ip=34.250.32.100 udp=30311 tcp=0 -``` - -## Contribution - -Thank you for considering helping out with the source code! We welcome contributions -from anyone on the internet, and are grateful for even the smallest of fixes! - -If you'd like to contribute to bsc, please fork, fix, commit and send a pull request -for the maintainers to review and merge into the main code base. If you wish to submit -more complex changes though, please check up with the core devs first on [our discord channel](https://discord.gg/bnbchain) -to ensure those changes are in line with the general philosophy of the project and/or get -some early feedback which can make both your efforts much lighter as well as our review -and merge procedures quick and simple. - -Please make sure your contributions adhere to our coding guidelines: - - * Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) - guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). - * Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary) - guidelines. - * Pull requests need to be based on and opened against the `master` branch. - * Commit messages should be prefixed with the package(s) they modify. - * E.g. "eth, rpc: make trace configs optional" - -Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/geth-developer/dev-guide) -for more details on configuring your environment, managing project dependencies, and -testing procedures. - -## License +the `enode_info` for BSC Testnet and Mainnet can be found in the [network-info](https://docs.bnbchain.org/bnb-greenfield/for-developers/data-archive/greenfield-peer) page. -The bsc library (i.e. all code outside of the `cmd` directory) is licensed under the -[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html), -also included in our repository in the `COPYING.LESSER` file. -The bsc binaries (i.e. all code inside of the `cmd` directory) is licensed under the -[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also -included in our repository in the `COPYING` file. diff --git a/accounts/abi/bind/base_test.go b/accounts/abi/bind/base_test.go deleted file mode 100644 index 99621767ff..0000000000 --- a/accounts/abi/bind/base_test.go +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bind_test - -import ( - "context" - "errors" - "math/big" - "reflect" - "strings" - "testing" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/internal/ethapi" - "github.com/ethereum/go-ethereum/rlp" - "github.com/stretchr/testify/assert" -) - -func mockSign(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { return tx, nil } - -type mockTransactor struct { - baseFee *big.Int - gasTipCap *big.Int - gasPrice *big.Int - suggestGasTipCapCalled bool - suggestGasPriceCalled bool -} - -func (mt *mockTransactor) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - return &types.Header{BaseFee: mt.baseFee}, nil -} - -func (mt *mockTransactor) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) { - return []byte{1}, nil -} - -func (mt *mockTransactor) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - return 0, nil -} - -func (mt *mockTransactor) SuggestGasPrice(ctx context.Context) (*big.Int, error) { - mt.suggestGasPriceCalled = true - return mt.gasPrice, nil -} - -func (mt *mockTransactor) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - mt.suggestGasTipCapCalled = true - return mt.gasTipCap, nil -} - -func (mt *mockTransactor) EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error) { - return 0, nil -} - -func (mt *mockTransactor) SendTransaction(ctx context.Context, tx *types.Transaction) error { - return nil -} - -func (mt *mockTransactor) SendTransactionConditional(ctx context.Context, tx *types.Transaction, opts ethapi.TransactionOpts) error { - return nil -} - -type mockCaller struct { - codeAtBlockNumber *big.Int - callContractBlockNumber *big.Int - callContractBytes []byte - callContractErr error - codeAtBytes []byte - codeAtErr error -} - -func (mc *mockCaller) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) { - mc.codeAtBlockNumber = blockNumber - return mc.codeAtBytes, mc.codeAtErr -} - -func (mc *mockCaller) CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { - mc.callContractBlockNumber = blockNumber - return mc.callContractBytes, mc.callContractErr -} - -type mockPendingCaller struct { - *mockCaller - pendingCodeAtBytes []byte - pendingCodeAtErr error - pendingCodeAtCalled bool - pendingCallContractCalled bool - pendingCallContractBytes []byte - pendingCallContractErr error -} - -func (mc *mockPendingCaller) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) { - mc.pendingCodeAtCalled = true - return mc.pendingCodeAtBytes, mc.pendingCodeAtErr -} - -func (mc *mockPendingCaller) PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error) { - mc.pendingCallContractCalled = true - return mc.pendingCallContractBytes, mc.pendingCallContractErr -} - -type mockBlockHashCaller struct { - *mockCaller - codeAtHashBytes []byte - codeAtHashErr error - codeAtHashCalled bool - callContractAtHashCalled bool - callContractAtHashBytes []byte - callContractAtHashErr error -} - -func (mc *mockBlockHashCaller) CodeAtHash(ctx context.Context, contract common.Address, hash common.Hash) ([]byte, error) { - mc.codeAtHashCalled = true - return mc.codeAtHashBytes, mc.codeAtHashErr -} - -func (mc *mockBlockHashCaller) CallContractAtHash(ctx context.Context, call ethereum.CallMsg, hash common.Hash) ([]byte, error) { - mc.callContractAtHashCalled = true - return mc.callContractAtHashBytes, mc.callContractAtHashErr -} - -func TestPassingBlockNumber(t *testing.T) { - t.Parallel() - mc := &mockPendingCaller{ - mockCaller: &mockCaller{ - codeAtBytes: []byte{1, 2, 3}, - }, - } - - bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{ - Methods: map[string]abi.Method{ - "something": { - Name: "something", - Outputs: abi.Arguments{}, - }, - }, - }, mc, nil, nil) - - blockNumber := big.NewInt(42) - - bc.Call(&bind.CallOpts{BlockNumber: blockNumber}, nil, "something") - - if mc.callContractBlockNumber != blockNumber { - t.Fatalf("CallContract() was not passed the block number") - } - - if mc.codeAtBlockNumber != blockNumber { - t.Fatalf("CodeAt() was not passed the block number") - } - - bc.Call(&bind.CallOpts{}, nil, "something") - - if mc.callContractBlockNumber != nil { - t.Fatalf("CallContract() was passed a block number when it should not have been") - } - - if mc.codeAtBlockNumber != nil { - t.Fatalf("CodeAt() was passed a block number when it should not have been") - } - - bc.Call(&bind.CallOpts{BlockNumber: blockNumber, Pending: true}, nil, "something") - - if !mc.pendingCallContractCalled { - t.Fatalf("CallContract() was not passed the block number") - } - - if !mc.pendingCodeAtCalled { - t.Fatalf("CodeAt() was not passed the block number") - } -} - -const hexData = "0x000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158" - -func TestUnpackIndexedStringTyLogIntoMap(t *testing.T) { - t.Parallel() - hash := crypto.Keccak256Hash([]byte("testName")) - topics := []common.Hash{ - crypto.Keccak256Hash([]byte("received(string,address,uint256,bytes)")), - hash, - } - mockLog := newMockLog(topics, common.HexToHash("0x0")) - - abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` - parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) - bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - - expectedReceivedMap := map[string]interface{}{ - "name": hash, - "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), - "amount": big.NewInt(1), - "memo": []byte{88}, - } - unpackAndCheck(t, bc, expectedReceivedMap, mockLog) -} - -func TestUnpackAnonymousLogIntoMap(t *testing.T) { - t.Parallel() - mockLog := newMockLog(nil, common.HexToHash("0x0")) - - abiString := `[{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"received","type":"event"}]` - parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) - bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - - var received map[string]interface{} - err := bc.UnpackLogIntoMap(received, "received", mockLog) - if err == nil { - t.Error("unpacking anonymous event is not supported") - } - if err.Error() != "no event signature" { - t.Errorf("expected error 'no event signature', got '%s'", err) - } -} - -func TestUnpackIndexedSliceTyLogIntoMap(t *testing.T) { - t.Parallel() - sliceBytes, err := rlp.EncodeToBytes([]string{"name1", "name2", "name3", "name4"}) - if err != nil { - t.Fatal(err) - } - hash := crypto.Keccak256Hash(sliceBytes) - topics := []common.Hash{ - crypto.Keccak256Hash([]byte("received(string[],address,uint256,bytes)")), - hash, - } - mockLog := newMockLog(topics, common.HexToHash("0x0")) - - abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"names","type":"string[]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` - parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) - bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - - expectedReceivedMap := map[string]interface{}{ - "names": hash, - "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), - "amount": big.NewInt(1), - "memo": []byte{88}, - } - unpackAndCheck(t, bc, expectedReceivedMap, mockLog) -} - -func TestUnpackIndexedArrayTyLogIntoMap(t *testing.T) { - t.Parallel() - arrBytes, err := rlp.EncodeToBytes([2]common.Address{common.HexToAddress("0x0"), common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2")}) - if err != nil { - t.Fatal(err) - } - hash := crypto.Keccak256Hash(arrBytes) - topics := []common.Hash{ - crypto.Keccak256Hash([]byte("received(address[2],address,uint256,bytes)")), - hash, - } - mockLog := newMockLog(topics, common.HexToHash("0x0")) - - abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"addresses","type":"address[2]"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` - parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) - bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - - expectedReceivedMap := map[string]interface{}{ - "addresses": hash, - "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), - "amount": big.NewInt(1), - "memo": []byte{88}, - } - unpackAndCheck(t, bc, expectedReceivedMap, mockLog) -} - -func TestUnpackIndexedFuncTyLogIntoMap(t *testing.T) { - t.Parallel() - mockAddress := common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2") - addrBytes := mockAddress.Bytes() - hash := crypto.Keccak256Hash([]byte("mockFunction(address,uint)")) - functionSelector := hash[:4] - functionTyBytes := append(addrBytes, functionSelector...) - var functionTy [24]byte - copy(functionTy[:], functionTyBytes[0:24]) - topics := []common.Hash{ - crypto.Keccak256Hash([]byte("received(function,address,uint256,bytes)")), - common.BytesToHash(functionTyBytes), - } - mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) - abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"function","type":"function"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` - parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) - bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - - expectedReceivedMap := map[string]interface{}{ - "function": functionTy, - "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), - "amount": big.NewInt(1), - "memo": []byte{88}, - } - unpackAndCheck(t, bc, expectedReceivedMap, mockLog) -} - -func TestUnpackIndexedBytesTyLogIntoMap(t *testing.T) { - t.Parallel() - bytes := []byte{1, 2, 3, 4, 5} - hash := crypto.Keccak256Hash(bytes) - topics := []common.Hash{ - crypto.Keccak256Hash([]byte("received(bytes,address,uint256,bytes)")), - hash, - } - mockLog := newMockLog(topics, common.HexToHash("0x5c698f13940a2153440c6d19660878bc90219d9298fdcf37365aa8d88d40fc42")) - - abiString := `[{"anonymous":false,"inputs":[{"indexed":true,"name":"content","type":"bytes"},{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"}]` - parsedAbi, _ := abi.JSON(strings.NewReader(abiString)) - bc := bind.NewBoundContract(common.HexToAddress("0x0"), parsedAbi, nil, nil, nil) - - expectedReceivedMap := map[string]interface{}{ - "content": hash, - "sender": common.HexToAddress("0x376c47978271565f56DEB45495afa69E59c16Ab2"), - "amount": big.NewInt(1), - "memo": []byte{88}, - } - unpackAndCheck(t, bc, expectedReceivedMap, mockLog) -} - -func TestTransactGasFee(t *testing.T) { - t.Parallel() - assert := assert.New(t) - - // GasTipCap and GasFeeCap - // When opts.GasTipCap and opts.GasFeeCap are nil - mt := &mockTransactor{baseFee: big.NewInt(100), gasTipCap: big.NewInt(5)} - bc := bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil) - opts := &bind.TransactOpts{Signer: mockSign} - tx, err := bc.Transact(opts, "") - assert.Nil(err) - assert.Equal(big.NewInt(5), tx.GasTipCap()) - assert.Equal(big.NewInt(205), tx.GasFeeCap()) - assert.Nil(opts.GasTipCap) - assert.Nil(opts.GasFeeCap) - assert.True(mt.suggestGasTipCapCalled) - - // Second call to Transact should use latest suggested GasTipCap - mt.gasTipCap = big.NewInt(6) - mt.suggestGasTipCapCalled = false - tx, err = bc.Transact(opts, "") - assert.Nil(err) - assert.Equal(big.NewInt(6), tx.GasTipCap()) - assert.Equal(big.NewInt(206), tx.GasFeeCap()) - assert.True(mt.suggestGasTipCapCalled) - - // GasPrice - // When opts.GasPrice is nil - mt = &mockTransactor{gasPrice: big.NewInt(5)} - bc = bind.NewBoundContract(common.Address{}, abi.ABI{}, nil, mt, nil) - opts = &bind.TransactOpts{Signer: mockSign} - tx, err = bc.Transact(opts, "") - assert.Nil(err) - assert.Equal(big.NewInt(5), tx.GasPrice()) - assert.Nil(opts.GasPrice) - assert.True(mt.suggestGasPriceCalled) - - // Second call to Transact should use latest suggested GasPrice - mt.gasPrice = big.NewInt(6) - mt.suggestGasPriceCalled = false - tx, err = bc.Transact(opts, "") - assert.Nil(err) - assert.Equal(big.NewInt(6), tx.GasPrice()) - assert.True(mt.suggestGasPriceCalled) -} - -func unpackAndCheck(t *testing.T, bc *bind.BoundContract, expected map[string]interface{}, mockLog types.Log) { - received := make(map[string]interface{}) - if err := bc.UnpackLogIntoMap(received, "received", mockLog); err != nil { - t.Error(err) - } - - if len(received) != len(expected) { - t.Fatalf("unpacked map length %v not equal expected length of %v", len(received), len(expected)) - } - for name, elem := range expected { - if !reflect.DeepEqual(elem, received[name]) { - t.Errorf("field %v does not match expected, want %v, got %v", name, elem, received[name]) - } - } -} - -func newMockLog(topics []common.Hash, txHash common.Hash) types.Log { - return types.Log{ - Address: common.HexToAddress("0x0"), - Topics: topics, - Data: hexutil.MustDecode(hexData), - BlockNumber: uint64(26), - TxHash: txHash, - TxIndex: 111, - BlockHash: common.BytesToHash([]byte{1, 2, 3, 4, 5}), - Index: 7, - Removed: false, - } -} - -func TestCall(t *testing.T) { - t.Parallel() - var method, methodWithArg = "something", "somethingArrrrg" - tests := []struct { - name, method string - opts *bind.CallOpts - mc bind.ContractCaller - results *[]interface{} - wantErr bool - wantErrExact error - }{{ - name: "ok not pending", - mc: &mockCaller{ - codeAtBytes: []byte{0}, - }, - method: method, - }, { - name: "ok pending", - mc: &mockPendingCaller{ - pendingCodeAtBytes: []byte{0}, - }, - opts: &bind.CallOpts{ - Pending: true, - }, - method: method, - }, { - name: "ok hash", - mc: &mockBlockHashCaller{ - codeAtHashBytes: []byte{0}, - }, - opts: &bind.CallOpts{ - BlockHash: common.Hash{0xaa}, - }, - method: method, - }, { - name: "pack error, no method", - mc: new(mockCaller), - method: "else", - wantErr: true, - }, { - name: "interface error, pending but not a PendingContractCaller", - mc: new(mockCaller), - opts: &bind.CallOpts{ - Pending: true, - }, - method: method, - wantErrExact: bind.ErrNoPendingState, - }, { - name: "interface error, blockHash but not a BlockHashContractCaller", - mc: new(mockCaller), - opts: &bind.CallOpts{ - BlockHash: common.Hash{0xaa}, - }, - method: method, - wantErrExact: bind.ErrNoBlockHashState, - }, { - name: "pending call canceled", - mc: &mockPendingCaller{ - pendingCallContractErr: context.DeadlineExceeded, - }, - opts: &bind.CallOpts{ - Pending: true, - }, - method: method, - wantErrExact: context.DeadlineExceeded, - }, { - name: "pending code at error", - mc: &mockPendingCaller{ - pendingCodeAtErr: errors.New(""), - }, - opts: &bind.CallOpts{ - Pending: true, - }, - method: method, - wantErr: true, - }, { - name: "no pending code at", - mc: new(mockPendingCaller), - opts: &bind.CallOpts{ - Pending: true, - }, - method: method, - wantErrExact: bind.ErrNoCode, - }, { - name: "call contract error", - mc: &mockCaller{ - callContractErr: context.DeadlineExceeded, - }, - method: method, - wantErrExact: context.DeadlineExceeded, - }, { - name: "code at error", - mc: &mockCaller{ - codeAtErr: errors.New(""), - }, - method: method, - wantErr: true, - }, { - name: "no code at", - mc: new(mockCaller), - method: method, - wantErrExact: bind.ErrNoCode, - }, { - name: "call contract at hash error", - mc: &mockBlockHashCaller{ - callContractAtHashErr: context.DeadlineExceeded, - }, - opts: &bind.CallOpts{ - BlockHash: common.Hash{0xaa}, - }, - method: method, - wantErrExact: context.DeadlineExceeded, - }, { - name: "code at error", - mc: &mockBlockHashCaller{ - codeAtHashErr: errors.New(""), - }, - opts: &bind.CallOpts{ - BlockHash: common.Hash{0xaa}, - }, - method: method, - wantErr: true, - }, { - name: "no code at hash", - mc: new(mockBlockHashCaller), - opts: &bind.CallOpts{ - BlockHash: common.Hash{0xaa}, - }, - method: method, - wantErrExact: bind.ErrNoCode, - }, { - name: "unpack error missing arg", - mc: &mockCaller{ - codeAtBytes: []byte{0}, - }, - method: methodWithArg, - wantErr: true, - }, { - name: "interface unpack error", - mc: &mockCaller{ - codeAtBytes: []byte{0}, - }, - method: method, - results: &[]interface{}{0}, - wantErr: true, - }} - for _, test := range tests { - bc := bind.NewBoundContract(common.HexToAddress("0x0"), abi.ABI{ - Methods: map[string]abi.Method{ - method: { - Name: method, - Outputs: abi.Arguments{}, - }, - methodWithArg: { - Name: methodWithArg, - Outputs: abi.Arguments{abi.Argument{}}, - }, - }, - }, test.mc, nil, nil) - err := bc.Call(test.opts, test.results, test.method) - if test.wantErr || test.wantErrExact != nil { - if err == nil { - t.Fatalf("%q expected error", test.name) - } - if test.wantErrExact != nil && !errors.Is(err, test.wantErrExact) { - t.Fatalf("%q expected error %q but got %q", test.name, test.wantErrExact, err) - } - continue - } - if err != nil { - t.Fatalf("%q unexpected error: %v", test.name, err) - } - } -} - -// TestCrashers contains some strings which previously caused the abi codec to crash. -func TestCrashers(t *testing.T) { - t.Parallel() - abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"_1"}]}]}]`)) - abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"&"}]}]}]`)) - abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"----"}]}]}]`)) - abi.JSON(strings.NewReader(`[{"inputs":[{"type":"tuple[]","components":[{"type":"bool","name":"foo.Bar"}]}]}]`)) -} diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go deleted file mode 100644 index 4a3a7c4bdf..0000000000 --- a/accounts/abi/bind/bind_test.go +++ /dev/null @@ -1,2166 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bind - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/ethereum/go-ethereum/common" -) - -var bindTests = []struct { - name string - contract string - bytecode []string - abi []string - imports string - tester string - fsigs []map[string]string - libs map[string]string - aliases map[string]string - types []string -}{ - // Test that the binding is available in combined and separate forms too - { - `Empty`, - `contract NilContract {}`, - []string{`606060405260068060106000396000f3606060405200`}, - []string{`[]`}, - `"github.com/ethereum/go-ethereum/common"`, - ` - if b, err := NewEmpty(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("combined binding (%v) nil or error (%v) not nil", b, nil) - } - if b, err := NewEmptyCaller(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("caller binding (%v) nil or error (%v) not nil", b, nil) - } - if b, err := NewEmptyTransactor(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("transactor binding (%v) nil or error (%v) not nil", b, nil) - } - `, - nil, - nil, - nil, - nil, - }, - // Test that all the official sample contracts bind correctly - { - `Token`, - `https://ethereum.org/token`, - []string{`60606040526040516107fd3803806107fd83398101604052805160805160a05160c051929391820192909101600160a060020a0333166000908152600360209081526040822086905581548551838052601f6002600019610100600186161502019093169290920482018390047f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56390810193919290918801908390106100e857805160ff19168380011785555b506101189291505b8082111561017157600081556001016100b4565b50506002805460ff19168317905550505050610658806101a56000396000f35b828001600101855582156100ac579182015b828111156100ac5782518260005055916020019190600101906100fa565b50508060016000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061017557805160ff19168380011785555b506100c89291506100b4565b5090565b82800160010185558215610165579182015b8281111561016557825182600050559160200191906001019061018756606060405236156100775760e060020a600035046306fdde03811461007f57806323b872dd146100dc578063313ce5671461010e57806370a082311461011a57806395d89b4114610132578063a9059cbb1461018e578063cae9ca51146101bd578063dc3080f21461031c578063dd62ed3e14610341575b610365610002565b61036760008054602060026001831615610100026000190190921691909104601f810182900490910260809081016040526060828152929190828280156104eb5780601f106104c0576101008083540402835291602001916104eb565b6103d5600435602435604435600160a060020a038316600090815260036020526040812054829010156104f357610002565b6103e760025460ff1681565b6103d560043560036020526000908152604090205481565b610367600180546020600282841615610100026000190190921691909104601f810182900490910260809081016040526060828152929190828280156104eb5780601f106104c0576101008083540402835291602001916104eb565b610365600435602435600160a060020a033316600090815260036020526040902054819010156103f157610002565b60806020604435600481810135601f8101849004909302840160405260608381526103d5948235946024803595606494939101919081908382808284375094965050505050505060006000836004600050600033600160a060020a03168152602001908152602001600020600050600087600160a060020a031681526020019081526020016000206000508190555084905080600160a060020a0316638f4ffcb1338630876040518560e060020a0281526004018085600160a060020a0316815260200184815260200183600160a060020a03168152602001806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156102f25780820380516001836020036101000a031916815260200191505b50955050505050506000604051808303816000876161da5a03f11561000257505050509392505050565b6005602090815260043560009081526040808220909252602435815220546103d59081565b60046020818152903560009081526040808220909252602435815220546103d59081565b005b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156103c75780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a03821660009081526040902054808201101561041357610002565b806003600050600033600160a060020a03168152602001908152602001600020600082828250540392505081905550806003600050600084600160a060020a0316815260200190815260200160002060008282825054019250508190555081600160a060020a031633600160a060020a03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a35050565b820191906000526020600020905b8154815290600101906020018083116104ce57829003601f168201915b505050505081565b600160a060020a03831681526040812054808301101561051257610002565b600160a060020a0380851680835260046020908152604080852033949094168086529382528085205492855260058252808520938552929052908220548301111561055c57610002565b816003600050600086600160a060020a03168152602001908152602001600020600082828250540392505081905550816003600050600085600160a060020a03168152602001908152602001600020600082828250540192505081905550816005600050600086600160a060020a03168152602001908152602001600020600050600033600160a060020a0316815260200190815260200160002060008282825054019250508190555082600160a060020a031633600160a060020a03167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3939250505056`}, - []string{`[{"constant":true,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transferFrom","outputs":[{"name":"success","type":"bool"}],"type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"name":"","type":"uint8"}],"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"balanceOf","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transfer","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_value","type":"uint256"},{"name":"_extraData","type":"bytes"}],"name":"approveAndCall","outputs":[{"name":"success","type":"bool"}],"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"},{"name":"","type":"address"}],"name":"spentAllowance","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"},{"name":"","type":"address"}],"name":"allowance","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"inputs":[{"name":"initialSupply","type":"uint256"},{"name":"tokenName","type":"string"},{"name":"decimalUnits","type":"uint8"},{"name":"tokenSymbol","type":"string"}],"type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"name":"from","type":"address"},{"indexed":true,"name":"to","type":"address"},{"indexed":false,"name":"value","type":"uint256"}],"name":"Transfer","type":"event"}]`}, - `"github.com/ethereum/go-ethereum/common"`, - ` - if b, err := NewToken(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("binding (%v) nil or error (%v) not nil", b, nil) - } - `, - nil, - nil, - nil, - nil, - }, - { - `Crowdsale`, - `https://ethereum.org/crowdsale`, - []string{`606060408190526007805460ff1916905560a0806105a883396101006040529051608051915160c05160e05160008054600160a060020a03199081169095178155670de0b6b3a7640000958602600155603c9093024201600355930260045560058054909216909217905561052f90819061007990396000f36060604052361561006c5760e060020a600035046301cb3b20811461008257806329dcb0cf1461014457806338af3eed1461014d5780636e66f6e91461015f5780637a3a0e84146101715780637b3e5e7b1461017a578063a035b1fe14610183578063dc0d3dff1461018c575b61020060075460009060ff161561032357610002565b61020060035460009042106103205760025460015490106103cb576002548154600160a060020a0316908290606082818181858883f150915460025460408051600160a060020a039390931683526020830191909152818101869052517fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf6945090819003909201919050a15b60405160008054600160a060020a039081169230909116319082818181858883f150506007805460ff1916600117905550505050565b6103a160035481565b6103ab600054600160a060020a031681565b6103ab600554600160a060020a031681565b6103a160015481565b6103a160025481565b6103a160045481565b6103be60043560068054829081101561000257506000526002027ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f8101547ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d409190910154600160a060020a03919091169082565b005b505050815481101561000257906000526020600020906002020160005060008201518160000160006101000a815481600160a060020a030219169083021790555060208201518160010160005055905050806002600082828250540192505081905550600560009054906101000a9004600160a060020a0316600160a060020a031663a9059cbb3360046000505484046040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506000604051808303816000876161da5a03f11561000257505060408051600160a060020a03331681526020810184905260018183015290517fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf692509081900360600190a15b50565b5060a0604052336060908152346080819052600680546001810180835592939282908280158290116102025760020281600202836000526020600020918201910161020291905b8082111561039d57805473ffffffffffffffffffffffffffffffffffffffff19168155600060019190910190815561036a565b5090565b6060908152602090f35b600160a060020a03166060908152602090f35b6060918252608052604090f35b5b60065481101561010e576006805482908110156100025760009182526002027ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f0190600680549254600160a060020a0316928490811015610002576002027ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d40015460405190915082818181858883f19350505050507fe842aea7a5f1b01049d752008c53c52890b1a6daf660cf39e8eec506112bbdf660066000508281548110156100025760008290526002027ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d3f01548154600160a060020a039190911691908490811015610002576002027ff652222313e28459528d920b65115c16c04f3efc82aaedc97be59f3f377c0d40015460408051600160a060020a0394909416845260208401919091526000838201525191829003606001919050a16001016103cc56`}, - []string{`[{"constant":false,"inputs":[],"name":"checkGoalReached","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"deadline","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"beneficiary","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":true,"inputs":[],"name":"tokenReward","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":true,"inputs":[],"name":"fundingGoal","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"amountRaised","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"price","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"funders","outputs":[{"name":"addr","type":"address"},{"name":"amount","type":"uint256"}],"type":"function"},{"inputs":[{"name":"ifSuccessfulSendTo","type":"address"},{"name":"fundingGoalInEthers","type":"uint256"},{"name":"durationInMinutes","type":"uint256"},{"name":"etherCostOfEachToken","type":"uint256"},{"name":"addressOfTokenUsedAsReward","type":"address"}],"type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"name":"backer","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"isContribution","type":"bool"}],"name":"FundTransfer","type":"event"}]`}, - `"github.com/ethereum/go-ethereum/common"`, - ` - if b, err := NewCrowdsale(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("binding (%v) nil or error (%v) not nil", b, nil) - } - `, - nil, - nil, - nil, - nil, - }, - { - `DAO`, - `https://ethereum.org/dao`, - []string{`606060405260405160808061145f833960e06040529051905160a05160c05160008054600160a060020a03191633179055600184815560028490556003839055600780549182018082558280158290116100b8576003028160030283600052602060002091820191016100b891906101c8565b50506060919091015160029190910155600160a060020a0381166000146100a65760008054600160a060020a031916821790555b505050506111f18061026e6000396000f35b505060408051608081018252600080825260208281018290528351908101845281815292820192909252426060820152600780549194509250811015610002579081527fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c6889050815181546020848101517401000000000000000000000000000000000000000002600160a060020a03199290921690921760a060020a60ff021916178255604083015180516001848101805460008281528690209195600293821615610100026000190190911692909204601f9081018390048201949192919091019083901061023e57805160ff19168380011785555b50610072929150610226565b5050600060028201556001015b8082111561023a578054600160a860020a031916815560018181018054600080835592600290821615610100026000190190911604601f81901061020c57506101bb565b601f0160209004906000526020600020908101906101bb91905b8082111561023a5760008155600101610226565b5090565b828001600101855582156101af579182015b828111156101af57825182600050559160200191906001019061025056606060405236156100b95760e060020a6000350463013cf08b81146100bb578063237e9492146101285780633910682114610281578063400e3949146102995780635daf08ca146102a257806369bd34361461032f5780638160f0b5146103385780638da5cb5b146103415780639644fcbd14610353578063aa02a90f146103be578063b1050da5146103c7578063bcca1fd3146104b5578063d3c0715b146104dc578063eceb29451461058d578063f2fde38b1461067b575b005b61069c6004356004805482908110156100025790600052602060002090600a02016000506005810154815460018301546003840154600485015460068601546007870154600160a060020a03959095169750929560020194919360ff828116946101009093041692919089565b60408051602060248035600481810135601f81018590048502860185019096528585526107759581359591946044949293909201918190840183828082843750949650505050505050600060006004600050848154811015610002575090527f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19e600a8402908101547f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b909101904210806101e65750600481015460ff165b8061026757508060000160009054906101000a9004600160a060020a03168160010160005054846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816007016000505414155b8061027757506001546005820154105b1561109257610002565b61077560043560066020526000908152604090205481565b61077560055481565b61078760043560078054829081101561000257506000526003026000805160206111d18339815191528101547fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c68a820154600160a060020a0382169260a060020a90920460ff16917fa66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c689019084565b61077560025481565b61077560015481565b610830600054600160a060020a031681565b604080516020604435600481810135601f81018490048402850184019095528484526100b9948135946024803595939460649492939101918190840183828082843750949650505050505050600080548190600160a060020a03908116339091161461084d57610002565b61077560035481565b604080516020604435600481810135601f8101849004840285018401909552848452610775948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a019093528282529698976084979196506024909101945090925082915084018382808284375094965050505050505033600160a060020a031660009081526006602052604081205481908114806104ab5750604081205460078054909190811015610002579082526003026000805160206111d1833981519152015460a060020a900460ff16155b15610ce557610002565b6100b960043560243560443560005433600160a060020a03908116911614610b1857610002565b604080516020604435600481810135601f810184900484028501840190955284845261077594813594602480359593946064949293910191819084018382808284375094965050505050505033600160a060020a031660009081526006602052604081205481908114806105835750604081205460078054909190811015610002579082526003026000805160206111d18339815191520181505460a060020a900460ff16155b15610f1d57610002565b604080516020606435600481810135601f81018490048402850184019095528484526107759481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600460005086815481101561000257908252600a027f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b01815090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160070160005054149150610cdc565b6100b960043560005433600160a060020a03908116911614610f0857610002565b604051808a600160a060020a031681526020018981526020018060200188815260200187815260200186815260200185815260200184815260200183815260200182810382528981815460018160011615610100020316600290048152602001915080546001816001161561010002031660029004801561075e5780601f106107335761010080835404028352916020019161075e565b820191906000526020600020905b81548152906001019060200180831161074157829003601f168201915b50509a505050505050505050505060405180910390f35b60408051918252519081900360200190f35b60408051600160a060020a038616815260208101859052606081018390526080918101828152845460026001821615610100026000190190911604928201839052909160a08301908590801561081e5780601f106107f35761010080835404028352916020019161081e565b820191906000526020600020905b81548152906001019060200180831161080157829003601f168201915b50509550505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b600160a060020a03851660009081526006602052604081205414156108a957604060002060078054918290556001820180825582801582901161095c5760030281600302836000526020600020918201910161095c9190610a4f565b600160a060020a03851660009081526006602052604090205460078054919350908390811015610002575060005250600381026000805160206111d183398151915201805474ff0000000000000000000000000000000000000000191660a060020a85021781555b60408051600160a060020a03871681526020810186905281517f27b022af4a8347100c7a041ce5ccf8e14d644ff05de696315196faae8cd50c9b929181900390910190a15050505050565b505050915081506080604051908101604052808681526020018581526020018481526020014281526020015060076000508381548110156100025790600052602060002090600302016000508151815460208481015160a060020a02600160a060020a03199290921690921774ff00000000000000000000000000000000000000001916178255604083015180516001848101805460008281528690209195600293821615610100026000190190911692909204601f90810183900482019491929190910190839010610ad357805160ff19168380011785555b50610b03929150610abb565b5050600060028201556001015b80821115610acf57805474ffffffffffffffffffffffffffffffffffffffffff1916815560018181018054600080835592600290821615610100026000190190911604601f819010610aa15750610a42565b601f016020900490600052602060002090810190610a4291905b80821115610acf5760008155600101610abb565b5090565b82800160010185558215610a36579182015b82811115610a36578251826000505591602001919060010190610ae5565b50506060919091015160029190910155610911565b600183905560028290556003819055604080518481526020810184905280820183905290517fa439d3fa452be5e0e1e24a8145e715f4fd8b9c08c96a42fd82a855a85e5d57de9181900360600190a1505050565b50508585846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160070160005081905550600260005054603c024201816003016000508190555060008160040160006101000a81548160ff0219169083021790555060008160040160016101000a81548160ff02191690830217905550600081600501600050819055507f646fec02522b41e7125cfc859a64fd4f4cefd5dc3b6237ca0abe251ded1fa881828787876040518085815260200184600160a060020a03168152602001838152602001806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015610cc45780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a1600182016005555b50949350505050565b6004805460018101808355909190828015829011610d1c57600a0281600a028360005260206000209182019101610d1c9190610db8565b505060048054929450918491508110156100025790600052602060002090600a02016000508054600160a060020a031916871781556001818101879055855160028381018054600082815260209081902096975091959481161561010002600019011691909104601f90810182900484019391890190839010610ed857805160ff19168380011785555b50610b6c929150610abb565b50506001015b80821115610acf578054600160a060020a03191681556000600182810182905560028381018054848255909281161561010002600019011604601f819010610e9c57505b5060006003830181905560048301805461ffff191690556005830181905560068301819055600783018190556008830180548282559082526020909120610db2916002028101905b80821115610acf57805474ffffffffffffffffffffffffffffffffffffffffff1916815560018181018054600080835592600290821615610100026000190190911604601f819010610eba57505b5050600101610e44565b601f016020900490600052602060002090810190610dfc9190610abb565b601f016020900490600052602060002090810190610e929190610abb565b82800160010185558215610da6579182015b82811115610da6578251826000505591602001919060010190610eea565b60008054600160a060020a0319168217905550565b600480548690811015610002576000918252600a027f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b01905033600160a060020a0316600090815260098201602052604090205490915060ff1660011415610f8457610002565b33600160a060020a031660009081526009820160205260409020805460ff1916600190811790915560058201805490910190558315610fcd576006810180546001019055610fda565b6006810180546000190190555b7fc34f869b7ff431b034b7b9aea9822dac189a685e0b015c7d1be3add3f89128e8858533866040518085815260200184815260200183600160a060020a03168152602001806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f16801561107a5780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a1509392505050565b6006810154600354901315611158578060000160009054906101000a9004600160a060020a0316600160a060020a03168160010160005054670de0b6b3a76400000284604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156111225780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f15050505060048101805460ff191660011761ff00191661010017905561116d565b60048101805460ff191660011761ff00191690555b60068101546005820154600483015460408051888152602081019490945283810192909252610100900460ff166060830152517fd220b7272a8b6d0d7d6bcdace67b936a8f175e6d5c1b3ee438b72256b32ab3af9181900360800190a1509291505056a66cc928b5edb82af9bd49922954155ab7b0942694bea4ce44661d9a8736c688`}, - []string{`[{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"proposals","outputs":[{"name":"recipient","type":"address"},{"name":"amount","type":"uint256"},{"name":"description","type":"string"},{"name":"votingDeadline","type":"uint256"},{"name":"executed","type":"bool"},{"name":"proposalPassed","type":"bool"},{"name":"numberOfVotes","type":"uint256"},{"name":"currentResult","type":"int256"},{"name":"proposalHash","type":"bytes32"}],"type":"function"},{"constant":false,"inputs":[{"name":"proposalNumber","type":"uint256"},{"name":"transactionBytecode","type":"bytes"}],"name":"executeProposal","outputs":[{"name":"result","type":"int256"}],"type":"function"},{"constant":true,"inputs":[{"name":"","type":"address"}],"name":"memberId","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"numProposals","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"members","outputs":[{"name":"member","type":"address"},{"name":"canVote","type":"bool"},{"name":"name","type":"string"},{"name":"memberSince","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"debatingPeriodInMinutes","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"minimumQuorum","outputs":[{"name":"","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"type":"function"},{"constant":false,"inputs":[{"name":"targetMember","type":"address"},{"name":"canVote","type":"bool"},{"name":"memberName","type":"string"}],"name":"changeMembership","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"majorityMargin","outputs":[{"name":"","type":"int256"}],"type":"function"},{"constant":false,"inputs":[{"name":"beneficiary","type":"address"},{"name":"etherAmount","type":"uint256"},{"name":"JobDescription","type":"string"},{"name":"transactionBytecode","type":"bytes"}],"name":"newProposal","outputs":[{"name":"proposalID","type":"uint256"}],"type":"function"},{"constant":false,"inputs":[{"name":"minimumQuorumForProposals","type":"uint256"},{"name":"minutesForDebate","type":"uint256"},{"name":"marginOfVotesForMajority","type":"int256"}],"name":"changeVotingRules","outputs":[],"type":"function"},{"constant":false,"inputs":[{"name":"proposalNumber","type":"uint256"},{"name":"supportsProposal","type":"bool"},{"name":"justificationText","type":"string"}],"name":"vote","outputs":[{"name":"voteID","type":"uint256"}],"type":"function"},{"constant":true,"inputs":[{"name":"proposalNumber","type":"uint256"},{"name":"beneficiary","type":"address"},{"name":"etherAmount","type":"uint256"},{"name":"transactionBytecode","type":"bytes"}],"name":"checkProposalCode","outputs":[{"name":"codeChecksOut","type":"bool"}],"type":"function"},{"constant":false,"inputs":[{"name":"newOwner","type":"address"}],"name":"transferOwnership","outputs":[],"type":"function"},{"inputs":[{"name":"minimumQuorumForProposals","type":"uint256"},{"name":"minutesForDebate","type":"uint256"},{"name":"marginOfVotesForMajority","type":"int256"},{"name":"congressLeader","type":"address"}],"type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"name":"proposalID","type":"uint256"},{"indexed":false,"name":"recipient","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"description","type":"string"}],"name":"ProposalAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"proposalID","type":"uint256"},{"indexed":false,"name":"position","type":"bool"},{"indexed":false,"name":"voter","type":"address"},{"indexed":false,"name":"justification","type":"string"}],"name":"Voted","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"proposalID","type":"uint256"},{"indexed":false,"name":"result","type":"int256"},{"indexed":false,"name":"quorum","type":"uint256"},{"indexed":false,"name":"active","type":"bool"}],"name":"ProposalTallied","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"member","type":"address"},{"indexed":false,"name":"isMember","type":"bool"}],"name":"MembershipChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"minimumQuorum","type":"uint256"},{"indexed":false,"name":"debatingPeriodInMinutes","type":"uint256"},{"indexed":false,"name":"majorityMargin","type":"int256"}],"name":"ChangeOfRules","type":"event"}]`}, - `"github.com/ethereum/go-ethereum/common"`, - ` - if b, err := NewDAO(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("binding (%v) nil or error (%v) not nil", b, nil) - } - `, - nil, - nil, - nil, - nil, - }, - // Test that named and anonymous inputs are handled correctly - { - `InputChecker`, ``, []string{``}, - []string{` - [ - {"type":"function","name":"noInput","constant":true,"inputs":[],"outputs":[]}, - {"type":"function","name":"namedInput","constant":true,"inputs":[{"name":"str","type":"string"}],"outputs":[]}, - {"type":"function","name":"anonInput","constant":true,"inputs":[{"name":"","type":"string"}],"outputs":[]}, - {"type":"function","name":"namedInputs","constant":true,"inputs":[{"name":"str1","type":"string"},{"name":"str2","type":"string"}],"outputs":[]}, - {"type":"function","name":"anonInputs","constant":true,"inputs":[{"name":"","type":"string"},{"name":"","type":"string"}],"outputs":[]}, - {"type":"function","name":"mixedInputs","constant":true,"inputs":[{"name":"","type":"string"},{"name":"str","type":"string"}],"outputs":[]} - ] - `}, - ` - "fmt" - - "github.com/ethereum/go-ethereum/common" - `, - `if b, err := NewInputChecker(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("binding (%v) nil or error (%v) not nil", b, nil) - } else if false { // Don't run, just compile and test types - var err error - - err = b.NoInput(nil) - err = b.NamedInput(nil, "") - err = b.AnonInput(nil, "") - err = b.NamedInputs(nil, "", "") - err = b.AnonInputs(nil, "", "") - err = b.MixedInputs(nil, "", "") - - fmt.Println(err) - }`, - nil, - nil, - nil, - nil, - }, - // Test that named and anonymous outputs are handled correctly - { - `OutputChecker`, ``, []string{``}, - []string{` - [ - {"type":"function","name":"noOutput","constant":true,"inputs":[],"outputs":[]}, - {"type":"function","name":"namedOutput","constant":true,"inputs":[],"outputs":[{"name":"str","type":"string"}]}, - {"type":"function","name":"anonOutput","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"}]}, - {"type":"function","name":"namedOutputs","constant":true,"inputs":[],"outputs":[{"name":"str1","type":"string"},{"name":"str2","type":"string"}]}, - {"type":"function","name":"collidingOutputs","constant":true,"inputs":[],"outputs":[{"name":"str","type":"string"},{"name":"Str","type":"string"}]}, - {"type":"function","name":"anonOutputs","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"},{"name":"","type":"string"}]}, - {"type":"function","name":"mixedOutputs","constant":true,"inputs":[],"outputs":[{"name":"","type":"string"},{"name":"str","type":"string"}]} - ] - `}, - ` - "fmt" - - "github.com/ethereum/go-ethereum/common" - `, - `if b, err := NewOutputChecker(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("binding (%v) nil or error (%v) not nil", b, nil) - } else if false { // Don't run, just compile and test types - var str1, str2 string - var err error - - err = b.NoOutput(nil) - str1, err = b.NamedOutput(nil) - str1, err = b.AnonOutput(nil) - res, _ := b.NamedOutputs(nil) - str1, str2, err = b.CollidingOutputs(nil) - str1, str2, err = b.AnonOutputs(nil) - str1, str2, err = b.MixedOutputs(nil) - - fmt.Println(str1, str2, res.Str1, res.Str2, err) - }`, - nil, - nil, - nil, - nil, - }, - // Tests that named, anonymous and indexed events are handled correctly - { - `EventChecker`, ``, []string{``}, - []string{` - [ - {"type":"event","name":"empty","inputs":[]}, - {"type":"event","name":"indexed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256","indexed":true}]}, - {"type":"event","name":"mixed","inputs":[{"name":"addr","type":"address","indexed":true},{"name":"num","type":"int256"}]}, - {"type":"event","name":"anonymous","anonymous":true,"inputs":[]}, - {"type":"event","name":"dynamic","inputs":[{"name":"idxStr","type":"string","indexed":true},{"name":"idxDat","type":"bytes","indexed":true},{"name":"str","type":"string"},{"name":"dat","type":"bytes"}]}, - {"type":"event","name":"unnamed","inputs":[{"name":"","type":"uint256","indexed": true},{"name":"","type":"uint256","indexed":true}]} - ] - `}, - ` - "fmt" - "math/big" - "reflect" - - "github.com/ethereum/go-ethereum/common" - `, - `if e, err := NewEventChecker(common.Address{}, nil); e == nil || err != nil { - t.Fatalf("binding (%v) nil or error (%v) not nil", e, nil) - } else if false { // Don't run, just compile and test types - var ( - err error - res bool - str string - dat []byte - hash common.Hash - ) - _, err = e.FilterEmpty(nil) - _, err = e.FilterIndexed(nil, []common.Address{}, []*big.Int{}) - - mit, err := e.FilterMixed(nil, []common.Address{}) - - res = mit.Next() // Make sure the iterator has a Next method - err = mit.Error() // Make sure the iterator has an Error method - err = mit.Close() // Make sure the iterator has a Close method - - fmt.Println(mit.Event.Raw.BlockHash) // Make sure the raw log is contained within the results - fmt.Println(mit.Event.Num) // Make sure the unpacked non-indexed fields are present - fmt.Println(mit.Event.Addr) // Make sure the reconstructed indexed fields are present - - dit, err := e.FilterDynamic(nil, []string{}, [][]byte{}) - - str = dit.Event.Str // Make sure non-indexed strings retain their type - dat = dit.Event.Dat // Make sure non-indexed bytes retain their type - hash = dit.Event.IdxStr // Make sure indexed strings turn into hashes - hash = dit.Event.IdxDat // Make sure indexed bytes turn into hashes - - sink := make(chan *EventCheckerMixed) - sub, err := e.WatchMixed(nil, sink, []common.Address{}) - defer sub.Unsubscribe() - - event := <-sink - fmt.Println(event.Raw.BlockHash) // Make sure the raw log is contained within the results - fmt.Println(event.Num) // Make sure the unpacked non-indexed fields are present - fmt.Println(event.Addr) // Make sure the reconstructed indexed fields are present - - fmt.Println(res, str, dat, hash, err) - - oit, err := e.FilterUnnamed(nil, []*big.Int{}, []*big.Int{}) - - arg0 := oit.Event.Arg0 // Make sure unnamed arguments are handled correctly - arg1 := oit.Event.Arg1 // Make sure unnamed arguments are handled correctly - fmt.Println(arg0, arg1) - } - // Run a tiny reflection test to ensure disallowed methods don't appear - if _, ok := reflect.TypeOf(&EventChecker{}).MethodByName("FilterAnonymous"); ok { - t.Errorf("binding has disallowed method (FilterAnonymous)") - }`, - nil, - nil, - nil, - nil, - }, - // Test that contract interactions (deploy, transact and call) generate working code - { - `Interactor`, - ` - contract Interactor { - string public deployString; - string public transactString; - - function Interactor(string str) { - deployString = str; - } - - function transact(string str) { - transactString = str; - } - } - `, - []string{`6060604052604051610328380380610328833981016040528051018060006000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f10608d57805160ff19168380011785555b50607c9291505b8082111560ba57838155600101606b565b50505061026a806100be6000396000f35b828001600101855582156064579182015b828111156064578251826000505591602001919060010190609e565b509056606060405260e060020a60003504630d86a0e181146100315780636874e8091461008d578063d736c513146100ea575b005b610190600180546020600282841615610100026000190190921691909104601f810182900490910260809081016040526060828152929190828280156102295780601f106101fe57610100808354040283529160200191610229565b61019060008054602060026001831615610100026000190190921691909104601f810182900490910260809081016040526060828152929190828280156102295780601f106101fe57610100808354040283529160200191610229565b60206004803580820135601f81018490049093026080908101604052606084815261002f946024939192918401918190838280828437509496505050505050508060016000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f1061023157805160ff19168380011785555b506102619291505b808211156102665760008155830161017d565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156101f05780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b820191906000526020600020905b81548152906001019060200180831161020c57829003601f168201915b505050505081565b82800160010185558215610175579182015b82811115610175578251826000505591602001919060010190610243565b505050565b509056`}, - []string{`[{"constant":true,"inputs":[],"name":"transactString","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":true,"inputs":[],"name":"deployString","outputs":[{"name":"","type":"string"}],"type":"function"},{"constant":false,"inputs":[{"name":"str","type":"string"}],"name":"transact","outputs":[],"type":"function"},{"inputs":[{"name":"str","type":"string"}],"type":"constructor"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy an interaction tester contract and call a transaction on it - _, _, interactor, err := DeployInteractor(auth, sim, "Deploy string") - if err != nil { - t.Fatalf("Failed to deploy interactor contract: %v", err) - } - sim.Commit() - if _, err := interactor.Transact(auth, "Transact string"); err != nil { - t.Fatalf("Failed to transact with interactor contract: %v", err) - } - // Commit all pending transactions in the simulator and check the contract state - sim.Commit() - - if str, err := interactor.DeployString(nil); err != nil { - t.Fatalf("Failed to retrieve deploy string: %v", err) - } else if str != "Deploy string" { - t.Fatalf("Deploy string mismatch: have '%s', want 'Deploy string'", str) - } - if str, err := interactor.TransactString(nil); err != nil { - t.Fatalf("Failed to retrieve transact string: %v", err) - } else if str != "Transact string" { - t.Fatalf("Transact string mismatch: have '%s', want 'Transact string'", str) - } - `, - nil, - nil, - nil, - nil, - }, - // Tests that plain values can be properly returned and deserialized - { - `Getter`, - ` - contract Getter { - function getter() constant returns (string, int, bytes32) { - return ("Hi", 1, sha3("")); - } - } - `, - []string{`606060405260dc8060106000396000f3606060405260e060020a6000350463993a04b78114601a575b005b600060605260c0604052600260809081527f486900000000000000000000000000000000000000000000000000000000000060a05260017fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060e0829052610100819052606060c0908152600261012081905281906101409060a09080838184600060046012f1505081517fffff000000000000000000000000000000000000000000000000000000000000169091525050604051610160819003945092505050f3`}, - []string{`[{"constant":true,"inputs":[],"name":"getter","outputs":[{"name":"","type":"string"},{"name":"","type":"int256"},{"name":"","type":"bytes32"}],"type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a tuple tester contract and execute a structured call on it - _, _, getter, err := DeployGetter(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy getter contract: %v", err) - } - sim.Commit() - - if str, num, _, err := getter.Getter(nil); err != nil { - t.Fatalf("Failed to call anonymous field retriever: %v", err) - } else if str != "Hi" || num.Cmp(big.NewInt(1)) != 0 { - t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", str, num, "Hi", 1) - } - `, - nil, - nil, - nil, - nil, - }, - // Tests that tuples can be properly returned and deserialized - { - `Tupler`, - ` - contract Tupler { - function tuple() constant returns (string a, int b, bytes32 c) { - return ("Hi", 1, sha3("")); - } - } - `, - []string{`606060405260dc8060106000396000f3606060405260e060020a60003504633175aae28114601a575b005b600060605260c0604052600260809081527f486900000000000000000000000000000000000000000000000000000000000060a05260017fc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a47060e0829052610100819052606060c0908152600261012081905281906101409060a09080838184600060046012f1505081517fffff000000000000000000000000000000000000000000000000000000000000169091525050604051610160819003945092505050f3`}, - []string{`[{"constant":true,"inputs":[],"name":"tuple","outputs":[{"name":"a","type":"string"},{"name":"b","type":"int256"},{"name":"c","type":"bytes32"}],"type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a tuple tester contract and execute a structured call on it - _, _, tupler, err := DeployTupler(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy tupler contract: %v", err) - } - sim.Commit() - - if res, err := tupler.Tuple(nil); err != nil { - t.Fatalf("Failed to call structure retriever: %v", err) - } else if res.A != "Hi" || res.B.Cmp(big.NewInt(1)) != 0 { - t.Fatalf("Retrieved value mismatch: have %v/%v, want %v/%v", res.A, res.B, "Hi", 1) - } - `, - nil, - nil, - nil, - nil, - }, - // Tests that arrays/slices can be properly returned and deserialized. - // Only addresses are tested, remainder just compiled to keep the test small. - { - `Slicer`, - ` - contract Slicer { - function echoAddresses(address[] input) constant returns (address[] output) { - return input; - } - function echoInts(int[] input) constant returns (int[] output) { - return input; - } - function echoFancyInts(uint24[23] input) constant returns (uint24[23] output) { - return input; - } - function echoBools(bool[] input) constant returns (bool[] output) { - return input; - } - } - `, - []string{`606060405261015c806100126000396000f3606060405260e060020a6000350463be1127a3811461003c578063d88becc014610092578063e15a3db71461003c578063f637e5891461003c575b005b604080516020600480358082013583810285810185019096528085526100ee959294602494909392850192829185019084908082843750949650505050505050604080516020810190915260009052805b919050565b604080516102e0818101909252610138916004916102e491839060179083908390808284375090955050505050506102e0604051908101604052806017905b60008152602001906001900390816100d15790505081905061008d565b60405180806020018281038252838181518152602001915080519060200190602002808383829060006004602084601f0104600f02600301f1509050019250505060405180910390f35b60405180826102e0808381846000600461015cf15090500191505060405180910390f3`}, - []string{`[{"constant":true,"inputs":[{"name":"input","type":"address[]"}],"name":"echoAddresses","outputs":[{"name":"output","type":"address[]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"uint24[23]"}],"name":"echoFancyInts","outputs":[{"name":"output","type":"uint24[23]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"int256[]"}],"name":"echoInts","outputs":[{"name":"output","type":"int256[]"}],"type":"function"},{"constant":true,"inputs":[{"name":"input","type":"bool[]"}],"name":"echoBools","outputs":[{"name":"output","type":"bool[]"}],"type":"function"}]`}, - ` - "math/big" - "reflect" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a slice tester contract and execute a n array call on it - _, _, slicer, err := DeploySlicer(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy slicer contract: %v", err) - } - sim.Commit() - - if out, err := slicer.EchoAddresses(nil, []common.Address{auth.From, common.Address{}}); err != nil { - t.Fatalf("Failed to call slice echoer: %v", err) - } else if !reflect.DeepEqual(out, []common.Address{auth.From, common.Address{}}) { - t.Fatalf("Slice return mismatch: have %v, want %v", out, []common.Address{auth.From, common.Address{}}) - } - `, - nil, - nil, - nil, - nil, - }, - // Tests that anonymous default methods can be correctly invoked - { - `Defaulter`, - ` - contract Defaulter { - address public caller; - - function() { - caller = msg.sender; - } - } - `, - []string{`6060604052606a8060106000396000f360606040523615601d5760e060020a6000350463fc9c8d3981146040575b605e6000805473ffffffffffffffffffffffffffffffffffffffff191633179055565b606060005473ffffffffffffffffffffffffffffffffffffffff1681565b005b6060908152602090f3`}, - []string{`[{"constant":true,"inputs":[],"name":"caller","outputs":[{"name":"","type":"address"}],"type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a default method invoker contract and execute its default method - _, _, defaulter, err := DeployDefaulter(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy defaulter contract: %v", err) - } - sim.Commit() - if _, err := (&DefaulterRaw{defaulter}).Transfer(auth); err != nil { - t.Fatalf("Failed to invoke default method: %v", err) - } - sim.Commit() - - if caller, err := defaulter.Caller(nil); err != nil { - t.Fatalf("Failed to call address retriever: %v", err) - } else if (caller != auth.From) { - t.Fatalf("Address mismatch: have %v, want %v", caller, auth.From) - } - `, - nil, - nil, - nil, - nil, - }, - // Tests that structs are correctly unpacked - { - - `Structs`, - ` - pragma solidity ^0.6.5; - pragma experimental ABIEncoderV2; - contract Structs { - struct A { - bytes32 B; - } - - function F() public view returns (A[] memory a, uint256[] memory c, bool[] memory d) { - A[] memory a = new A[](2); - a[0].B = bytes32(uint256(1234) << 96); - uint256[] memory c; - bool[] memory d; - return (a, c, d); - } - - function G() public view returns (A[] memory a) { - A[] memory a = new A[](2); - a[0].B = bytes32(uint256(1234) << 96); - return a; - } - } - `, - []string{`608060405234801561001057600080fd5b50610278806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806328811f591461003b5780636fecb6231461005b575b600080fd5b610043610070565b604051610052939291906101a0565b60405180910390f35b6100636100d6565b6040516100529190610186565b604080516002808252606082810190935282918291829190816020015b610095610131565b81526020019060019003908161008d575050805190915061026960611b9082906000906100be57fe5b60209081029190910101515293606093508392509050565b6040805160028082526060828101909352829190816020015b6100f7610131565b8152602001906001900390816100ef575050805190915061026960611b90829060009061012057fe5b602090810291909101015152905090565b60408051602081019091526000815290565b815260200190565b6000815180845260208085019450808401835b8381101561017b578151518752958201959082019060010161015e565b509495945050505050565b600060208252610199602083018461014b565b9392505050565b6000606082526101b3606083018661014b565b6020838203818501528186516101c98185610239565b91508288019350845b818110156101f3576101e5838651610143565b9484019492506001016101d2565b505084810360408601528551808252908201925081860190845b8181101561022b57825115158552938301939183019160010161020d565b509298975050505050505050565b9081526020019056fea2646970667358221220eb85327e285def14230424c52893aebecec1e387a50bb6b75fc4fdbed647f45f64736f6c63430006050033`}, - []string{`[{"inputs":[],"name":"F","outputs":[{"components":[{"internalType":"bytes32","name":"B","type":"bytes32"}],"internalType":"structStructs.A[]","name":"a","type":"tuple[]"},{"internalType":"uint256[]","name":"c","type":"uint256[]"},{"internalType":"bool[]","name":"d","type":"bool[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"G","outputs":[{"components":[{"internalType":"bytes32","name":"B","type":"bytes32"}],"internalType":"structStructs.A[]","name":"a","type":"tuple[]"}],"stateMutability":"view","type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a structs method invoker contract and execute its default method - _, _, structs, err := DeployStructs(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy defaulter contract: %v", err) - } - sim.Commit() - opts := bind.CallOpts{} - if _, err := structs.F(&opts); err != nil { - t.Fatalf("Failed to invoke F method: %v", err) - } - if _, err := structs.G(&opts); err != nil { - t.Fatalf("Failed to invoke G method: %v", err) - } - `, - nil, - nil, - nil, - nil, - }, - // Tests that non-existent contracts are reported as such (though only simulator test) - { - `NonExistent`, - ` - contract NonExistent { - function String() constant returns(string) { - return "I don't exist"; - } - } - `, - []string{`6060604052609f8060106000396000f3606060405260e060020a6000350463f97a60058114601a575b005b600060605260c0604052600d60809081527f4920646f6e27742065786973740000000000000000000000000000000000000060a052602060c0908152600d60e081905281906101009060a09080838184600060046012f15050815172ffffffffffffffffffffffffffffffffffffff1916909152505060405161012081900392509050f3`}, - []string{`[{"constant":true,"inputs":[],"name":"String","outputs":[{"name":"","type":"string"}],"type":"function"}]`}, - ` - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - `, - ` - // Create a simulator and wrap a non-deployed contract - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000)) - defer sim.Close() - - nonexistent, err := NewNonExistent(common.Address{}, sim) - if err != nil { - t.Fatalf("Failed to access non-existent contract: %v", err) - } - // Ensure that contract calls fail with the appropriate error - if res, err := nonexistent.String(nil); err == nil { - t.Fatalf("Call succeeded on non-existent contract: %v", res) - } else if (err != bind.ErrNoCode) { - t.Fatalf("Error mismatch: have %v, want %v", err, bind.ErrNoCode) - } - `, - nil, - nil, - nil, - nil, - }, - { - `NonExistentStruct`, - ` - contract NonExistentStruct { - function Struct() public view returns(uint256 a, uint256 b) { - return (10, 10); - } - } - `, - []string{`6080604052348015600f57600080fd5b5060888061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063d5f6622514602d575b600080fd5b6033604c565b6040805192835260208301919091528051918290030190f35b600a809156fea264697066735822beefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeef64736f6c6343decafe0033`}, - []string{`[{"inputs":[],"name":"Struct","outputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"stateMutability":"pure","type":"function"}]`}, - ` - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - `, - ` - // Create a simulator and wrap a non-deployed contract - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{}, uint64(10000000000)) - defer sim.Close() - - nonexistent, err := NewNonExistentStruct(common.Address{}, sim) - if err != nil { - t.Fatalf("Failed to access non-existent contract: %v", err) - } - // Ensure that contract calls fail with the appropriate error - if res, err := nonexistent.Struct(nil); err == nil { - t.Fatalf("Call succeeded on non-existent contract: %v", res) - } else if (err != bind.ErrNoCode) { - t.Fatalf("Error mismatch: have %v, want %v", err, bind.ErrNoCode) - } - `, - nil, - nil, - nil, - nil, - }, - // Tests that gas estimation works for contracts with weird gas mechanics too. - { - `FunkyGasPattern`, - ` - contract FunkyGasPattern { - string public field; - - function SetField(string value) { - // This check will screw gas estimation! Good, good! - if (msg.gas < 100000) { - throw; - } - field = value; - } - } - `, - []string{`606060405261021c806100126000396000f3606060405260e060020a600035046323fcf32a81146100265780634f28bf0e1461007b575b005b6040805160206004803580820135601f8101849004840285018401909552848452610024949193602493909291840191908190840183828082843750949650505050505050620186a05a101561014e57610002565b6100db60008054604080516020601f600260001961010060018816150201909516949094049384018190048102820181019092528281529291908301828280156102145780601f106101e957610100808354040283529160200191610214565b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600302600f01f150905090810190601f16801561013b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b505050565b8060006000509080519060200190828054600181600116156101000203166002900490600052602060002090601f016020900481019282601f106101b557805160ff19168380011785555b506101499291505b808211156101e557600081556001016101a1565b82800160010185558215610199579182015b828111156101995782518260005055916020019190600101906101c7565b5090565b820191906000526020600020905b8154815290600101906020018083116101f757829003601f168201915b50505050508156`}, - []string{`[{"constant":false,"inputs":[{"name":"value","type":"string"}],"name":"SetField","outputs":[],"type":"function"},{"constant":true,"inputs":[],"name":"field","outputs":[{"name":"","type":"string"}],"type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a funky gas pattern contract - _, _, limiter, err := DeployFunkyGasPattern(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy funky contract: %v", err) - } - sim.Commit() - - // Set the field with automatic estimation and check that it succeeds - if _, err := limiter.SetField(auth, "automatic"); err != nil { - t.Fatalf("Failed to call automatically gased transaction: %v", err) - } - sim.Commit() - - if field, _ := limiter.Field(nil); field != "automatic" { - t.Fatalf("Field mismatch: have %v, want %v", field, "automatic") - } - `, - nil, - nil, - nil, - nil, - }, - // Test that constant functions can be called from an (optional) specified address - { - `CallFrom`, - ` - contract CallFrom { - function callFrom() constant returns(address) { - return msg.sender; - } - } - `, []string{`6060604052346000575b6086806100176000396000f300606060405263ffffffff60e060020a60003504166349f8e98281146022575b6000565b34600057602c6055565b6040805173ffffffffffffffffffffffffffffffffffffffff9092168252519081900360200190f35b335b905600a165627a7a72305820aef6b7685c0fa24ba6027e4870404a57df701473fe4107741805c19f5138417c0029`}, - []string{`[{"constant":true,"inputs":[],"name":"callFrom","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a sender tester contract and execute a structured call on it - _, _, callfrom, err := DeployCallFrom(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy sender contract: %v", err) - } - sim.Commit() - - if res, err := callfrom.CallFrom(nil); err != nil { - t.Errorf("Failed to call constant function: %v", err) - } else if res != (common.Address{}) { - t.Errorf("Invalid address returned, want: %x, got: %x", (common.Address{}), res) - } - - for _, addr := range []common.Address{common.Address{}, common.Address{1}, common.Address{2}} { - if res, err := callfrom.CallFrom(&bind.CallOpts{From: addr}); err != nil { - t.Fatalf("Failed to call constant function: %v", err) - } else if res != addr { - t.Fatalf("Invalid address returned, want: %x, got: %x", addr, res) - } - } - `, - nil, - nil, - nil, - nil, - }, - // Tests that methods and returns with underscores inside work correctly. - { - `Underscorer`, - ` - contract Underscorer { - function UnderscoredOutput() constant returns (int _int, string _string) { - return (314, "pi"); - } - function LowerLowerCollision() constant returns (int _res, int res) { - return (1, 2); - } - function LowerUpperCollision() constant returns (int _res, int Res) { - return (1, 2); - } - function UpperLowerCollision() constant returns (int _Res, int res) { - return (1, 2); - } - function UpperUpperCollision() constant returns (int _Res, int Res) { - return (1, 2); - } - function PurelyUnderscoredOutput() constant returns (int _, int res) { - return (1, 2); - } - function AllPurelyUnderscoredOutput() constant returns (int _, int __) { - return (1, 2); - } - function _under_scored_func() constant returns (int _int) { - return 0; - } - } - `, []string{`6060604052341561000f57600080fd5b6103858061001e6000396000f30060606040526004361061008e576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806303a592131461009357806346546dbe146100c357806367e6633d146100ec5780639df4848514610181578063af7486ab146101b1578063b564b34d146101e1578063e02ab24d14610211578063e409ca4514610241575b600080fd5b341561009e57600080fd5b6100a6610271565b604051808381526020018281526020019250505060405180910390f35b34156100ce57600080fd5b6100d6610286565b6040518082815260200191505060405180910390f35b34156100f757600080fd5b6100ff61028e565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b8381101561014557808201518184015260208101905061012a565b50505050905090810190601f1680156101725780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b341561018c57600080fd5b6101946102dc565b604051808381526020018281526020019250505060405180910390f35b34156101bc57600080fd5b6101c46102f1565b604051808381526020018281526020019250505060405180910390f35b34156101ec57600080fd5b6101f4610306565b604051808381526020018281526020019250505060405180910390f35b341561021c57600080fd5b61022461031b565b604051808381526020018281526020019250505060405180910390f35b341561024c57600080fd5b610254610330565b604051808381526020018281526020019250505060405180910390f35b60008060016002819150809050915091509091565b600080905090565b6000610298610345565b61013a8090506040805190810160405280600281526020017f7069000000000000000000000000000000000000000000000000000000000000815250915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b6020604051908101604052806000815250905600a165627a7a72305820d1a53d9de9d1e3d55cb3dc591900b63c4f1ded79114f7b79b332684840e186a40029`}, - []string{`[{"constant":true,"inputs":[],"name":"LowerUpperCollision","outputs":[{"name":"_res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"_under_scored_func","outputs":[{"name":"_int","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UnderscoredOutput","outputs":[{"name":"_int","type":"int256"},{"name":"_string","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperLowerCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"AllPurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"__","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperUpperCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"LowerLowerCollision","outputs":[{"name":"_res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"}]`}, - ` - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a underscorer tester contract and execute a structured call on it - _, _, underscorer, err := DeployUnderscorer(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy underscorer contract: %v", err) - } - sim.Commit() - - // Verify that underscored return values correctly parse into structs - if res, err := underscorer.UnderscoredOutput(nil); err != nil { - t.Errorf("Failed to call constant function: %v", err) - } else if res.Int.Cmp(big.NewInt(314)) != 0 || res.String != "pi" { - t.Errorf("Invalid result, want: {314, \"pi\"}, got: %+v", res) - } - // Verify that underscored and non-underscored name collisions force tuple outputs - var a, b *big.Int - - a, b, _ = underscorer.LowerLowerCollision(nil) - a, b, _ = underscorer.LowerUpperCollision(nil) - a, b, _ = underscorer.UpperLowerCollision(nil) - a, b, _ = underscorer.UpperUpperCollision(nil) - a, b, _ = underscorer.PurelyUnderscoredOutput(nil) - a, b, _ = underscorer.AllPurelyUnderscoredOutput(nil) - a, _ = underscorer.UnderScoredFunc(nil) - - fmt.Println(a, b, err) - `, - nil, - nil, - nil, - nil, - }, - // Tests that logs can be successfully filtered and decoded. - { - `Eventer`, - ` - contract Eventer { - event SimpleEvent ( - address indexed Addr, - bytes32 indexed Id, - bool indexed Flag, - uint Value - ); - function raiseSimpleEvent(address addr, bytes32 id, bool flag, uint value) { - SimpleEvent(addr, id, flag, value); - } - - event NodataEvent ( - uint indexed Number, - int16 indexed Short, - uint32 indexed Long - ); - function raiseNodataEvent(uint number, int16 short, uint32 long) { - NodataEvent(number, short, long); - } - - event DynamicEvent ( - string indexed IndexedString, - bytes indexed IndexedBytes, - string NonIndexedString, - bytes NonIndexedBytes - ); - function raiseDynamicEvent(string str, bytes blob) { - DynamicEvent(str, blob, str, blob); - } - - event FixedBytesEvent ( - bytes24 indexed IndexedBytes, - bytes24 NonIndexedBytes - ); - function raiseFixedBytesEvent(bytes24 blob) { - FixedBytesEvent(blob, blob); - } - } - `, - []string{`608060405234801561001057600080fd5b5061043f806100206000396000f3006080604052600436106100615763ffffffff7c0100000000000000000000000000000000000000000000000000000000600035041663528300ff8114610066578063630c31e2146100ff5780636cc6b94014610138578063c7d116dd1461015b575b600080fd5b34801561007257600080fd5b506040805160206004803580820135601f81018490048402850184019095528484526100fd94369492936024939284019190819084018382808284375050604080516020601f89358b018035918201839004830284018301909452808352979a9998810197919650918201945092508291508401838280828437509497506101829650505050505050565b005b34801561010b57600080fd5b506100fd73ffffffffffffffffffffffffffffffffffffffff60043516602435604435151560643561033c565b34801561014457600080fd5b506100fd67ffffffffffffffff1960043516610394565b34801561016757600080fd5b506100fd60043560243560010b63ffffffff604435166103d6565b806040518082805190602001908083835b602083106101b25780518252601f199092019160209182019101610193565b51815160209384036101000a6000190180199092169116179052604051919093018190038120875190955087945090928392508401908083835b6020831061020b5780518252601f1990920191602091820191016101ec565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405180910390207f3281fd4f5e152dd3385df49104a3f633706e21c9e80672e88d3bcddf33101f008484604051808060200180602001838103835285818151815260200191508051906020019080838360005b8381101561029c578181015183820152602001610284565b50505050905090810190601f1680156102c95780820380516001836020036101000a031916815260200191505b50838103825284518152845160209182019186019080838360005b838110156102fc5781810151838201526020016102e4565b50505050905090810190601f1680156103295780820380516001836020036101000a031916815260200191505b5094505050505060405180910390a35050565b60408051828152905183151591859173ffffffffffffffffffffffffffffffffffffffff8816917f1f097de4289df643bd9c11011cc61367aa12983405c021056e706eb5ba1250c8919081900360200190a450505050565b6040805167ffffffffffffffff19831680825291517fcdc4c1b1aed5524ffb4198d7a5839a34712baef5fa06884fac7559f4a5854e0a9181900360200190a250565b8063ffffffff168260010b847f3ca7f3a77e5e6e15e781850bc82e32adfa378a2a609370db24b4d0fae10da2c960405160405180910390a45050505600a165627a7a72305820468b5843bf653145bd924b323c64ef035d3dd922c170644b44d61aa666ea6eee0029`}, - []string{`[{"constant":false,"inputs":[{"name":"str","type":"string"},{"name":"blob","type":"bytes"}],"name":"raiseDynamicEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"addr","type":"address"},{"name":"id","type":"bytes32"},{"name":"flag","type":"bool"},{"name":"value","type":"uint256"}],"name":"raiseSimpleEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"blob","type":"bytes24"}],"name":"raiseFixedBytesEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"number","type":"uint256"},{"name":"short","type":"int16"},{"name":"long","type":"uint32"}],"name":"raiseNodataEvent","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"Addr","type":"address"},{"indexed":true,"name":"Id","type":"bytes32"},{"indexed":true,"name":"Flag","type":"bool"},{"indexed":false,"name":"Value","type":"uint256"}],"name":"SimpleEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"Number","type":"uint256"},{"indexed":true,"name":"Short","type":"int16"},{"indexed":true,"name":"Long","type":"uint32"}],"name":"NodataEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"IndexedString","type":"string"},{"indexed":true,"name":"IndexedBytes","type":"bytes"},{"indexed":false,"name":"NonIndexedString","type":"string"},{"indexed":false,"name":"NonIndexedBytes","type":"bytes"}],"name":"DynamicEvent","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"IndexedBytes","type":"bytes24"},{"indexed":false,"name":"NonIndexedBytes","type":"bytes24"}],"name":"FixedBytesEvent","type":"event"}]`}, - ` - "math/big" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy an eventer contract - _, _, eventer, err := DeployEventer(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy eventer contract: %v", err) - } - sim.Commit() - - // Inject a few events into the contract, gradually more in each block - for i := 1; i <= 3; i++ { - for j := 1; j <= i; j++ { - if _, err := eventer.RaiseSimpleEvent(auth, common.Address{byte(j)}, [32]byte{byte(j)}, true, big.NewInt(int64(10*i+j))); err != nil { - t.Fatalf("block %d, event %d: raise failed: %v", i, j, err) - } - } - sim.Commit() - } - // Test filtering for certain events and ensure they can be found - sit, err := eventer.FilterSimpleEvent(nil, []common.Address{common.Address{1}, common.Address{3}}, [][32]byte{{byte(1)}, {byte(2)}, {byte(3)}}, []bool{true}) - if err != nil { - t.Fatalf("failed to filter for simple events: %v", err) - } - defer sit.Close() - - sit.Next() - if sit.Event.Value.Uint64() != 11 || !sit.Event.Flag { - t.Errorf("simple log content mismatch: have %v, want {11, true}", sit.Event) - } - sit.Next() - if sit.Event.Value.Uint64() != 21 || !sit.Event.Flag { - t.Errorf("simple log content mismatch: have %v, want {21, true}", sit.Event) - } - sit.Next() - if sit.Event.Value.Uint64() != 31 || !sit.Event.Flag { - t.Errorf("simple log content mismatch: have %v, want {31, true}", sit.Event) - } - sit.Next() - if sit.Event.Value.Uint64() != 33 || !sit.Event.Flag { - t.Errorf("simple log content mismatch: have %v, want {33, true}", sit.Event) - } - - if sit.Next() { - t.Errorf("unexpected simple event found: %+v", sit.Event) - } - if err = sit.Error(); err != nil { - t.Fatalf("simple event iteration failed: %v", err) - } - // Test raising and filtering for an event with no data component - if _, err := eventer.RaiseNodataEvent(auth, big.NewInt(314), 141, 271); err != nil { - t.Fatalf("failed to raise nodata event: %v", err) - } - sim.Commit() - - nit, err := eventer.FilterNodataEvent(nil, []*big.Int{big.NewInt(314)}, []int16{140, 141, 142}, []uint32{271}) - if err != nil { - t.Fatalf("failed to filter for nodata events: %v", err) - } - defer nit.Close() - - if !nit.Next() { - t.Fatalf("nodata log not found: %v", nit.Error()) - } - if nit.Event.Number.Uint64() != 314 { - t.Errorf("nodata log content mismatch: have %v, want 314", nit.Event.Number) - } - if nit.Next() { - t.Errorf("unexpected nodata event found: %+v", nit.Event) - } - if err = nit.Error(); err != nil { - t.Fatalf("nodata event iteration failed: %v", err) - } - // Test raising and filtering for events with dynamic indexed components - if _, err := eventer.RaiseDynamicEvent(auth, "Hello", []byte("World")); err != nil { - t.Fatalf("failed to raise dynamic event: %v", err) - } - sim.Commit() - - dit, err := eventer.FilterDynamicEvent(nil, []string{"Hi", "Hello", "Bye"}, [][]byte{[]byte("World")}) - if err != nil { - t.Fatalf("failed to filter for dynamic events: %v", err) - } - defer dit.Close() - - if !dit.Next() { - t.Fatalf("dynamic log not found: %v", dit.Error()) - } - if dit.Event.NonIndexedString != "Hello" || string(dit.Event.NonIndexedBytes) != "World" || dit.Event.IndexedString != common.HexToHash("0x06b3dfaec148fb1bb2b066f10ec285e7c9bf402ab32aa78a5d38e34566810cd2") || dit.Event.IndexedBytes != common.HexToHash("0xf2208c967df089f60420785795c0a9ba8896b0f6f1867fa7f1f12ad6f79c1a18") { - t.Errorf("dynamic log content mismatch: have %v, want {'0x06b3dfaec148fb1bb2b066f10ec285e7c9bf402ab32aa78a5d38e34566810cd2, '0xf2208c967df089f60420785795c0a9ba8896b0f6f1867fa7f1f12ad6f79c1a18', 'Hello', 'World'}", dit.Event) - } - if dit.Next() { - t.Errorf("unexpected dynamic event found: %+v", dit.Event) - } - if err = dit.Error(); err != nil { - t.Fatalf("dynamic event iteration failed: %v", err) - } - // Test raising and filtering for events with fixed bytes components - var fblob [24]byte - copy(fblob[:], []byte("Fixed Bytes")) - - if _, err := eventer.RaiseFixedBytesEvent(auth, fblob); err != nil { - t.Fatalf("failed to raise fixed bytes event: %v", err) - } - sim.Commit() - - fit, err := eventer.FilterFixedBytesEvent(nil, [][24]byte{fblob}) - if err != nil { - t.Fatalf("failed to filter for fixed bytes events: %v", err) - } - defer fit.Close() - - if !fit.Next() { - t.Fatalf("fixed bytes log not found: %v", fit.Error()) - } - if fit.Event.NonIndexedBytes != fblob || fit.Event.IndexedBytes != fblob { - t.Errorf("fixed bytes log content mismatch: have %v, want {'%x', '%x'}", fit.Event, fblob, fblob) - } - if fit.Next() { - t.Errorf("unexpected fixed bytes event found: %+v", fit.Event) - } - if err = fit.Error(); err != nil { - t.Fatalf("fixed bytes event iteration failed: %v", err) - } - // Test subscribing to an event and raising it afterwards - ch := make(chan *EventerSimpleEvent, 16) - sub, err := eventer.WatchSimpleEvent(nil, ch, nil, nil, nil) - if err != nil { - t.Fatalf("failed to subscribe to simple events: %v", err) - } - if _, err := eventer.RaiseSimpleEvent(auth, common.Address{255}, [32]byte{255}, true, big.NewInt(255)); err != nil { - t.Fatalf("failed to raise subscribed simple event: %v", err) - } - sim.Commit() - - select { - case event := <-ch: - if event.Value.Uint64() != 255 { - t.Errorf("simple log content mismatch: have %v, want 255", event) - } - case <-time.After(250 * time.Millisecond): - t.Fatalf("subscribed simple event didn't arrive") - } - // Unsubscribe from the event and make sure we're not delivered more - sub.Unsubscribe() - - if _, err := eventer.RaiseSimpleEvent(auth, common.Address{254}, [32]byte{254}, true, big.NewInt(254)); err != nil { - t.Fatalf("failed to raise subscribed simple event: %v", err) - } - sim.Commit() - - select { - case event := <-ch: - t.Fatalf("unsubscribed simple event arrived: %v", event) - case <-time.After(250 * time.Millisecond): - } - `, - nil, - nil, - nil, - nil, - }, - { - `DeeplyNestedArray`, - ` - contract DeeplyNestedArray { - uint64[3][4][5] public deepUint64Array; - function storeDeepUintArray(uint64[3][4][5] arr) public { - deepUint64Array = arr; - } - function retrieveDeepArray() public view returns (uint64[3][4][5]) { - return deepUint64Array; - } - } - `, - []string{`6060604052341561000f57600080fd5b6106438061001e6000396000f300606060405260043610610057576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063344248551461005c5780638ed4573a1461011457806398ed1856146101ab575b600080fd5b341561006757600080fd5b610112600480806107800190600580602002604051908101604052809291906000905b828210156101055783826101800201600480602002604051908101604052809291906000905b828210156100f25783826060020160038060200260405190810160405280929190826003602002808284378201915050505050815260200190600101906100b0565b505050508152602001906001019061008a565b5050505091905050610208565b005b341561011f57600080fd5b61012761021d565b604051808260056000925b8184101561019b578284602002015160046000925b8184101561018d5782846020020151600360200280838360005b8381101561017c578082015181840152602081019050610161565b505050509050019260010192610147565b925050509260010192610132565b9250505091505060405180910390f35b34156101b657600080fd5b6101de6004808035906020019091908035906020019091908035906020019091905050610309565b604051808267ffffffffffffffff1667ffffffffffffffff16815260200191505060405180910390f35b80600090600561021992919061035f565b5050565b6102256103b0565b6000600580602002604051908101604052809291906000905b8282101561030057838260040201600480602002604051908101604052809291906000905b828210156102ed578382016003806020026040519081016040528092919082600380156102d9576020028201916000905b82829054906101000a900467ffffffffffffffff1667ffffffffffffffff16815260200190600801906020826007010492830192600103820291508084116102945790505b505050505081526020019060010190610263565b505050508152602001906001019061023e565b50505050905090565b60008360058110151561031857fe5b600402018260048110151561032957fe5b018160038110151561033757fe5b6004918282040191900660080292509250509054906101000a900467ffffffffffffffff1681565b826005600402810192821561039f579160200282015b8281111561039e5782518290600461038e9291906103df565b5091602001919060040190610375565b5b5090506103ac919061042d565b5090565b610780604051908101604052806005905b6103c9610459565b8152602001906001900390816103c15790505090565b826004810192821561041c579160200282015b8281111561041b5782518290600361040b929190610488565b50916020019190600101906103f2565b5b5090506104299190610536565b5090565b61045691905b8082111561045257600081816104499190610562565b50600401610433565b5090565b90565b610180604051908101604052806004905b6104726105a7565b81526020019060019003908161046a5790505090565b82600380016004900481019282156105255791602002820160005b838211156104ef57835183826101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555092602001926008016020816007010492830192600103026104a3565b80156105235782816101000a81549067ffffffffffffffff02191690556008016020816007010492830192600103026104ef565b505b50905061053291906105d9565b5090565b61055f91905b8082111561055b57600081816105529190610610565b5060010161053c565b5090565b90565b50600081816105719190610610565b50600101600081816105839190610610565b50600101600081816105959190610610565b5060010160006105a59190610610565b565b6060604051908101604052806003905b600067ffffffffffffffff168152602001906001900390816105b75790505090565b61060d91905b8082111561060957600081816101000a81549067ffffffffffffffff0219169055506001016105df565b5090565b90565b50600090555600a165627a7a7230582087e5a43f6965ab6ef7a4ff056ab80ed78fd8c15cff57715a1bf34ec76a93661c0029`}, - []string{`[{"constant":false,"inputs":[{"name":"arr","type":"uint64[3][4][5]"}],"name":"storeDeepUintArray","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"retrieveDeepArray","outputs":[{"name":"","type":"uint64[3][4][5]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"},{"name":"","type":"uint256"},{"name":"","type":"uint256"}],"name":"deepUint64Array","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"view","type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - //deploy the test contract - _, _, testContract, err := DeployDeeplyNestedArray(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy test contract: %v", err) - } - - // Finish deploy. - sim.Commit() - - //Create coordinate-filled array, for testing purposes. - testArr := [5][4][3]uint64{} - for i := 0; i < 5; i++ { - testArr[i] = [4][3]uint64{} - for j := 0; j < 4; j++ { - testArr[i][j] = [3]uint64{} - for k := 0; k < 3; k++ { - //pack the coordinates, each array value will be unique, and can be validated easily. - testArr[i][j][k] = uint64(i) << 16 | uint64(j) << 8 | uint64(k) - } - } - } - - if _, err := testContract.StoreDeepUintArray(&bind.TransactOpts{ - From: auth.From, - Signer: auth.Signer, - }, testArr); err != nil { - t.Fatalf("Failed to store nested array in test contract: %v", err) - } - - sim.Commit() - - retrievedArr, err := testContract.RetrieveDeepArray(&bind.CallOpts{ - From: auth.From, - Pending: false, - }) - if err != nil { - t.Fatalf("Failed to retrieve nested array from test contract: %v", err) - } - - //quick check to see if contents were copied - // (See accounts/abi/unpack_test.go for more extensive testing) - if retrievedArr[4][3][2] != testArr[4][3][2] { - t.Fatalf("Retrieved value does not match expected value! got: %d, expected: %d. %v", retrievedArr[4][3][2], testArr[4][3][2], err) - } - `, - nil, - nil, - nil, - nil, - }, - { - `CallbackParam`, - ` - contract FunctionPointerTest { - function test(function(uint256) external callback) external { - callback(1); - } - } - `, - []string{`608060405234801561001057600080fd5b5061015e806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063d7a5aba214610040575b600080fd5b34801561004c57600080fd5b506100be6004803603602081101561006357600080fd5b810190808035806c0100000000000000000000000090049068010000000000000000900463ffffffff1677ffffffffffffffffffffffffffffffffffffffffffffffff169091602001919093929190939291905050506100c0565b005b818160016040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050600060405180830381600087803b15801561011657600080fd5b505af115801561012a573d6000803e3d6000fd5b50505050505056fea165627a7a7230582062f87455ff84be90896dbb0c4e4ddb505c600d23089f8e80a512548440d7e2580029`}, - []string{`[ - { - "constant": false, - "inputs": [ - { - "name": "callback", - "type": "function" - } - ], - "name": "test", - "outputs": [], - "payable": false, - "stateMutability": "nonpayable", - "type": "function" - } - ]`}, ` - "strings" - `, - ` - if strings.Compare("test(function)", CallbackParamFuncSigs["d7a5aba2"]) != 0 { - t.Fatalf("") - } - `, - []map[string]string{ - { - "test(function)": "d7a5aba2", - }, - }, - nil, - nil, - nil, - }, { - `Tuple`, - ` - pragma solidity >=0.4.19 <0.6.0; - pragma experimental ABIEncoderV2; - - contract Tuple { - struct S { uint a; uint[] b; T[] c; } - struct T { uint x; uint y; } - struct P { uint8 x; uint8 y; } - struct Q { uint16 x; uint16 y; } - event TupleEvent(S a, T[2][] b, T[][2] c, S[] d, uint[] e); - event TupleEvent2(P[]); - - function func1(S memory a, T[2][] memory b, T[][2] memory c, S[] memory d, uint[] memory e) public pure returns (S memory, T[2][] memory, T[][2] memory, S[] memory, uint[] memory) { - return (a, b, c, d, e); - } - function func2(S memory a, T[2][] memory b, T[][2] memory c, S[] memory d, uint[] memory e) public { - emit TupleEvent(a, b, c, d, e); - } - function func3(Q[] memory) public pure {} // call function, nothing to return - } - `, - []string{`60806040523480156100115760006000fd5b50610017565b6110b2806100266000396000f3fe60806040523480156100115760006000fd5b50600436106100465760003560e01c8063443c79b41461004c578063d0062cdd14610080578063e4d9a43b1461009c57610046565b60006000fd5b610066600480360361006191908101906107b8565b6100b8565b604051610077959493929190610ccb565b60405180910390f35b61009a600480360361009591908101906107b8565b6100ef565b005b6100b660048036036100b19190810190610775565b610136565b005b6100c061013a565b60606100ca61015e565b606060608989898989945094509450945094506100e2565b9550955095509550959050565b7f18d6e66efa53739ca6d13626f35ebc700b31cced3eddb50c70bbe9c082c6cd008585858585604051610126959493929190610ccb565b60405180910390a15b5050505050565b5b50565b60405180606001604052806000815260200160608152602001606081526020015090565b60405180604001604052806002905b606081526020019060019003908161016d57905050905661106e565b600082601f830112151561019d5760006000fd5b81356101b06101ab82610d6f565b610d41565b915081818352602084019350602081019050838560808402820111156101d65760006000fd5b60005b8381101561020757816101ec888261037a565b8452602084019350608083019250505b6001810190506101d9565b5050505092915050565b600082601f83011215156102255760006000fd5b600261023861023382610d98565b610d41565b9150818360005b83811015610270578135860161025588826103f3565b8452602084019350602083019250505b60018101905061023f565b5050505092915050565b600082601f830112151561028e5760006000fd5b81356102a161029c82610dbb565b610d41565b915081818352602084019350602081019050838560408402820111156102c75760006000fd5b60005b838110156102f857816102dd888261058b565b8452602084019350604083019250505b6001810190506102ca565b5050505092915050565b600082601f83011215156103165760006000fd5b813561032961032482610de4565b610d41565b9150818183526020840193506020810190508360005b83811015610370578135860161035588826105d8565b8452602084019350602083019250505b60018101905061033f565b5050505092915050565b600082601f830112151561038e5760006000fd5b60026103a161039c82610e0d565b610d41565b915081838560408402820111156103b85760006000fd5b60005b838110156103e957816103ce88826106fe565b8452602084019350604083019250505b6001810190506103bb565b5050505092915050565b600082601f83011215156104075760006000fd5b813561041a61041582610e30565b610d41565b915081818352602084019350602081019050838560408402820111156104405760006000fd5b60005b83811015610471578161045688826106fe565b8452602084019350604083019250505b600181019050610443565b5050505092915050565b600082601f830112151561048f5760006000fd5b81356104a261049d82610e59565b610d41565b915081818352602084019350602081019050838560208402820111156104c85760006000fd5b60005b838110156104f957816104de8882610760565b8452602084019350602083019250505b6001810190506104cb565b5050505092915050565b600082601f83011215156105175760006000fd5b813561052a61052582610e82565b610d41565b915081818352602084019350602081019050838560208402820111156105505760006000fd5b60005b8381101561058157816105668882610760565b8452602084019350602083019250505b600181019050610553565b5050505092915050565b60006040828403121561059e5760006000fd5b6105a86040610d41565b905060006105b88482850161074b565b60008301525060206105cc8482850161074b565b60208301525092915050565b6000606082840312156105eb5760006000fd5b6105f56060610d41565b9050600061060584828501610760565b600083015250602082013567ffffffffffffffff8111156106265760006000fd5b6106328482850161047b565b602083015250604082013567ffffffffffffffff8111156106535760006000fd5b61065f848285016103f3565b60408301525092915050565b60006060828403121561067e5760006000fd5b6106886060610d41565b9050600061069884828501610760565b600083015250602082013567ffffffffffffffff8111156106b95760006000fd5b6106c58482850161047b565b602083015250604082013567ffffffffffffffff8111156106e65760006000fd5b6106f2848285016103f3565b60408301525092915050565b6000604082840312156107115760006000fd5b61071b6040610d41565b9050600061072b84828501610760565b600083015250602061073f84828501610760565b60208301525092915050565b60008135905061075a8161103a565b92915050565b60008135905061076f81611054565b92915050565b6000602082840312156107885760006000fd5b600082013567ffffffffffffffff8111156107a35760006000fd5b6107af8482850161027a565b91505092915050565b6000600060006000600060a086880312156107d35760006000fd5b600086013567ffffffffffffffff8111156107ee5760006000fd5b6107fa8882890161066b565b955050602086013567ffffffffffffffff8111156108185760006000fd5b61082488828901610189565b945050604086013567ffffffffffffffff8111156108425760006000fd5b61084e88828901610211565b935050606086013567ffffffffffffffff81111561086c5760006000fd5b61087888828901610302565b925050608086013567ffffffffffffffff8111156108965760006000fd5b6108a288828901610503565b9150509295509295909350565b60006108bb8383610a6a565b60808301905092915050565b60006108d38383610ac2565b905092915050565b60006108e78383610c36565b905092915050565b60006108fb8383610c8d565b60408301905092915050565b60006109138383610cbc565b60208301905092915050565b600061092a82610f0f565b6109348185610fb7565b935061093f83610eab565b8060005b8381101561097157815161095788826108af565b975061096283610f5c565b9250505b600181019050610943565b5085935050505092915050565b600061098982610f1a565b6109938185610fc8565b9350836020820285016109a585610ebb565b8060005b858110156109e257848403895281516109c285826108c7565b94506109cd83610f69565b925060208a019950505b6001810190506109a9565b50829750879550505050505092915050565b60006109ff82610f25565b610a098185610fd3565b935083602082028501610a1b85610ec5565b8060005b85811015610a585784840389528151610a3885826108db565b9450610a4383610f76565b925060208a019950505b600181019050610a1f565b50829750879550505050505092915050565b610a7381610f30565b610a7d8184610fe4565b9250610a8882610ed5565b8060005b83811015610aba578151610aa087826108ef565b9650610aab83610f83565b9250505b600181019050610a8c565b505050505050565b6000610acd82610f3b565b610ad78185610fef565b9350610ae283610edf565b8060005b83811015610b14578151610afa88826108ef565b9750610b0583610f90565b9250505b600181019050610ae6565b5085935050505092915050565b6000610b2c82610f51565b610b368185611011565b9350610b4183610eff565b8060005b83811015610b73578151610b598882610907565b9750610b6483610faa565b9250505b600181019050610b45565b5085935050505092915050565b6000610b8b82610f46565b610b958185611000565b9350610ba083610eef565b8060005b83811015610bd2578151610bb88882610907565b9750610bc383610f9d565b9250505b600181019050610ba4565b5085935050505092915050565b6000606083016000830151610bf76000860182610cbc565b5060208301518482036020860152610c0f8282610b80565b91505060408301518482036040860152610c298282610ac2565b9150508091505092915050565b6000606083016000830151610c4e6000860182610cbc565b5060208301518482036020860152610c668282610b80565b91505060408301518482036040860152610c808282610ac2565b9150508091505092915050565b604082016000820151610ca36000850182610cbc565b506020820151610cb66020850182610cbc565b50505050565b610cc581611030565b82525050565b600060a0820190508181036000830152610ce58188610bdf565b90508181036020830152610cf9818761091f565b90508181036040830152610d0d818661097e565b90508181036060830152610d2181856109f4565b90508181036080830152610d358184610b21565b90509695505050505050565b6000604051905081810181811067ffffffffffffffff82111715610d655760006000fd5b8060405250919050565b600067ffffffffffffffff821115610d875760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610db05760006000fd5b602082029050919050565b600067ffffffffffffffff821115610dd35760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610dfc5760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610e255760006000fd5b602082029050919050565b600067ffffffffffffffff821115610e485760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610e715760006000fd5b602082029050602081019050919050565b600067ffffffffffffffff821115610e9a5760006000fd5b602082029050602081019050919050565b6000819050602082019050919050565b6000819050919050565b6000819050602082019050919050565b6000819050919050565b6000819050602082019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600060029050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b600082825260208201905092915050565b600061ffff82169050919050565b6000819050919050565b61104381611022565b811415156110515760006000fd5b50565b61105d81611030565b8114151561106b5760006000fd5b50565bfea365627a7a72315820d78c6ba7ee332581e6c4d9daa5fc07941841230f7ce49edf6e05b1b63853e8746c6578706572696d656e74616cf564736f6c634300050c0040`}, - []string{` -[{"anonymous":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"indexed":false,"internalType":"struct Tuple.S","name":"a","type":"tuple"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"indexed":false,"internalType":"struct Tuple.T[2][]","name":"b","type":"tuple[2][]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"indexed":false,"internalType":"struct Tuple.T[][2]","name":"c","type":"tuple[][2]"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"indexed":false,"internalType":"struct Tuple.S[]","name":"d","type":"tuple[]"},{"indexed":false,"internalType":"uint256[]","name":"e","type":"uint256[]"}],"name":"TupleEvent","type":"event"},{"anonymous":false,"inputs":[{"components":[{"internalType":"uint8","name":"x","type":"uint8"},{"internalType":"uint8","name":"y","type":"uint8"}],"indexed":false,"internalType":"struct Tuple.P[]","name":"","type":"tuple[]"}],"name":"TupleEvent2","type":"event"},{"constant":true,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S","name":"a","type":"tuple"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[2][]","name":"b","type":"tuple[2][]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[][2]","name":"c","type":"tuple[][2]"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S[]","name":"d","type":"tuple[]"},{"internalType":"uint256[]","name":"e","type":"uint256[]"}],"name":"func1","outputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S","name":"","type":"tuple"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[2][]","name":"","type":"tuple[2][]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[][2]","name":"","type":"tuple[][2]"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S[]","name":"","type":"tuple[]"},{"internalType":"uint256[]","name":"","type":"uint256[]"}],"payable":false,"stateMutability":"pure","type":"function"},{"constant":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S","name":"a","type":"tuple"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[2][]","name":"b","type":"tuple[2][]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[][2]","name":"c","type":"tuple[][2]"},{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256[]","name":"b","type":"uint256[]"},{"components":[{"internalType":"uint256","name":"x","type":"uint256"},{"internalType":"uint256","name":"y","type":"uint256"}],"internalType":"struct Tuple.T[]","name":"c","type":"tuple[]"}],"internalType":"struct Tuple.S[]","name":"d","type":"tuple[]"},{"internalType":"uint256[]","name":"e","type":"uint256[]"}],"name":"func2","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"components":[{"internalType":"uint16","name":"x","type":"uint16"},{"internalType":"uint16","name":"y","type":"uint16"}],"internalType":"struct Tuple.Q[]","name":"","type":"tuple[]"}],"name":"func3","outputs":[],"payable":false,"stateMutability":"pure","type":"function"}] - `}, - ` - "math/big" - "reflect" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - - ` - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - _, _, contract, err := DeployTuple(auth, sim) - if err != nil { - t.Fatalf("deploy contract failed %v", err) - } - sim.Commit() - - check := func(a, b interface{}, errMsg string) { - if !reflect.DeepEqual(a, b) { - t.Fatal(errMsg) - } - } - - a := TupleS{ - A: big.NewInt(1), - B: []*big.Int{big.NewInt(2), big.NewInt(3)}, - C: []TupleT{ - { - X: big.NewInt(4), - Y: big.NewInt(5), - }, - { - X: big.NewInt(6), - Y: big.NewInt(7), - }, - }, - } - - b := [][2]TupleT{ - { - { - X: big.NewInt(8), - Y: big.NewInt(9), - }, - { - X: big.NewInt(10), - Y: big.NewInt(11), - }, - }, - } - - c := [2][]TupleT{ - { - { - X: big.NewInt(12), - Y: big.NewInt(13), - }, - { - X: big.NewInt(14), - Y: big.NewInt(15), - }, - }, - { - { - X: big.NewInt(16), - Y: big.NewInt(17), - }, - }, - } - - d := []TupleS{a} - - e := []*big.Int{big.NewInt(18), big.NewInt(19)} - ret1, ret2, ret3, ret4, ret5, err := contract.Func1(nil, a, b, c, d, e) - if err != nil { - t.Fatalf("invoke contract failed, err %v", err) - } - check(ret1, a, "ret1 mismatch") - check(ret2, b, "ret2 mismatch") - check(ret3, c, "ret3 mismatch") - check(ret4, d, "ret4 mismatch") - check(ret5, e, "ret5 mismatch") - - _, err = contract.Func2(auth, a, b, c, d, e) - if err != nil { - t.Fatalf("invoke contract failed, err %v", err) - } - sim.Commit() - - iter, err := contract.FilterTupleEvent(nil) - if err != nil { - t.Fatalf("failed to create event filter, err %v", err) - } - defer iter.Close() - - iter.Next() - check(iter.Event.A, a, "field1 mismatch") - check(iter.Event.B, b, "field2 mismatch") - check(iter.Event.C, c, "field3 mismatch") - check(iter.Event.D, d, "field4 mismatch") - check(iter.Event.E, e, "field5 mismatch") - - err = contract.Func3(nil, nil) - if err != nil { - t.Fatalf("failed to call function which has no return, err %v", err) - } - `, - nil, - nil, - nil, - nil, - }, - { - `UseLibrary`, - ` - library Math { - function add(uint a, uint b) public view returns(uint) { - return a + b; - } - } - - contract UseLibrary { - function add (uint c, uint d) public view returns(uint) { - return Math.add(c,d); - } - } - `, - []string{ - // Bytecode for the UseLibrary contract - `608060405234801561001057600080fd5b5061011d806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063771602f714602d575b600080fd5b604d60048036036040811015604157600080fd5b5080359060200135605f565b60408051918252519081900360200190f35b600073__$b98c933f0a6ececcd167bd4f9d3299b1a0$__63771602f784846040518363ffffffff1660e01b8152600401808381526020018281526020019250505060206040518083038186803b15801560b757600080fd5b505af415801560ca573d6000803e3d6000fd5b505050506040513d602081101560df57600080fd5b5051939250505056fea265627a7a72305820eb5c38f42445604cfa43d85e3aa5ecc48b0a646456c902dd48420ae7241d06f664736f6c63430005090032`, - // Bytecode for the Math contract - `60a3610024600b82828239805160001a607314601757fe5b30600052607381538281f3fe730000000000000000000000000000000000000000301460806040526004361060335760003560e01c8063771602f7146038575b600080fd5b605860048036036040811015604c57600080fd5b5080359060200135606a565b60408051918252519081900360200190f35b019056fea265627a7a723058206fc6c05f3078327f9c763edffdb5ab5f8bd212e293a1306c7d0ad05af3ad35f464736f6c63430005090032`, - }, - []string{ - `[{"constant":true,"inputs":[{"name":"c","type":"uint256"},{"name":"d","type":"uint256"}],"name":"add","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}]`, - `[{"constant":true,"inputs":[{"name":"a","type":"uint256"},{"name":"b","type":"uint256"}],"name":"add","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}]`, - }, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - //deploy the test contract - _, _, testContract, err := DeployUseLibrary(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy test contract: %v", err) - } - - // Finish deploy. - sim.Commit() - - // Check that the library contract has been deployed - // by calling the contract's add function. - res, err := testContract.Add(&bind.CallOpts{ - From: auth.From, - Pending: false, - }, big.NewInt(1), big.NewInt(2)) - if err != nil { - t.Fatalf("Failed to call linked contract: %v", err) - } - if res.Cmp(big.NewInt(3)) != 0 { - t.Fatalf("Add did not return the correct result: %d != %d", res, 3) - } - `, - nil, - map[string]string{ - "b98c933f0a6ececcd167bd4f9d3299b1a0": "Math", - }, - nil, - []string{"UseLibrary", "Math"}, - }, { - "Overload", - ` - pragma solidity ^0.5.10; - - contract overload { - mapping(address => uint256) balances; - - event bar(uint256 i); - event bar(uint256 i, uint256 j); - - function foo(uint256 i) public { - emit bar(i); - } - function foo(uint256 i, uint256 j) public { - emit bar(i, j); - } - } - `, - []string{`608060405234801561001057600080fd5b50610153806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806304bc52f81461003b5780632fbebd3814610073575b600080fd5b6100716004803603604081101561005157600080fd5b8101908080359060200190929190803590602001909291905050506100a1565b005b61009f6004803603602081101561008957600080fd5b81019080803590602001909291905050506100e4565b005b7fae42e9514233792a47a1e4554624e83fe852228e1503f63cd383e8a431f4f46d8282604051808381526020018281526020019250505060405180910390a15050565b7f0423a1321222a0a8716c22b92fac42d85a45a612b696a461784d9fa537c81e5c816040518082815260200191505060405180910390a15056fea265627a7a72305820e22b049858b33291cbe67eeaece0c5f64333e439d27032ea8337d08b1de18fe864736f6c634300050a0032`}, - []string{`[{"constant":false,"inputs":[{"name":"i","type":"uint256"},{"name":"j","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"i","type":"uint256"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"}],"name":"bar","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"i","type":"uint256"},{"indexed":false,"name":"j","type":"uint256"}],"name":"bar","type":"event"}]`}, - ` - "math/big" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Initialize test accounts - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // deploy the test contract - _, _, contract, err := DeployOverload(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy contract: %v", err) - } - // Finish deploy. - sim.Commit() - - resCh, stopCh := make(chan uint64), make(chan struct{}) - - go func() { - barSink := make(chan *OverloadBar) - sub, _ := contract.WatchBar(nil, barSink) - defer sub.Unsubscribe() - - bar0Sink := make(chan *OverloadBar0) - sub0, _ := contract.WatchBar0(nil, bar0Sink) - defer sub0.Unsubscribe() - - for { - select { - case ev := <-barSink: - resCh <- ev.I.Uint64() - case ev := <-bar0Sink: - resCh <- ev.I.Uint64() + ev.J.Uint64() - case <-stopCh: - return - } - } - }() - contract.Foo(auth, big.NewInt(1), big.NewInt(2)) - sim.Commit() - select { - case n := <-resCh: - if n != 3 { - t.Fatalf("Invalid bar0 event") - } - case <-time.NewTimer(3 * time.Second).C: - t.Fatalf("Wait bar0 event timeout") - } - - contract.Foo0(auth, big.NewInt(1)) - sim.Commit() - select { - case n := <-resCh: - if n != 1 { - t.Fatalf("Invalid bar event") - } - case <-time.NewTimer(3 * time.Second).C: - t.Fatalf("Wait bar event timeout") - } - close(stopCh) - `, - nil, - nil, - nil, - nil, - }, - { - "IdentifierCollision", - ` - pragma solidity >=0.4.19 <0.6.0; - - contract IdentifierCollision { - uint public _myVar; - - function MyVar() public view returns (uint) { - return _myVar; - } - } - `, - []string{"60806040523480156100115760006000fd5b50610017565b60c3806100256000396000f3fe608060405234801560105760006000fd5b506004361060365760003560e01c806301ad4d8714603c5780634ef1f0ad146058576036565b60006000fd5b60426074565b6040518082815260200191505060405180910390f35b605e607d565b6040518082815260200191505060405180910390f35b60006000505481565b60006000600050549050608b565b9056fea265627a7a7231582067c8d84688b01c4754ba40a2a871cede94ea1f28b5981593ab2a45b46ac43af664736f6c634300050c0032"}, - []string{`[{"constant":true,"inputs":[],"name":"MyVar","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"_myVar","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/core/types" - `, - ` - // Initialize test accounts - key, _ := crypto.GenerateKey() - addr := crypto.PubkeyToAddress(key.PublicKey) - - // Deploy registrar contract - sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - _, _, _, err := DeployIdentifierCollision(transactOpts, sim) - if err != nil { - t.Fatalf("failed to deploy contract: %v", err) - } - `, - nil, - nil, - map[string]string{"_myVar": "pubVar"}, // alias MyVar to PubVar - nil, - }, - { - "MultiContracts", - ` - pragma solidity ^0.5.11; - pragma experimental ABIEncoderV2; - - library ExternalLib { - struct SharedStruct{ - uint256 f1; - bytes32 f2; - } - } - - contract ContractOne { - function foo(ExternalLib.SharedStruct memory s) pure public { - // Do stuff - } - } - - contract ContractTwo { - function bar(ExternalLib.SharedStruct memory s) pure public { - // Do stuff - } - } - `, - []string{ - `60806040523480156100115760006000fd5b50610017565b6101b5806100266000396000f3fe60806040523480156100115760006000fd5b50600436106100305760003560e01c80639d8a8ba81461003657610030565b60006000fd5b610050600480360361004b91908101906100d1565b610052565b005b5b5056610171565b6000813590506100698161013d565b92915050565b6000604082840312156100825760006000fd5b61008c60406100fb565b9050600061009c848285016100bc565b60008301525060206100b08482850161005a565b60208301525092915050565b6000813590506100cb81610157565b92915050565b6000604082840312156100e45760006000fd5b60006100f28482850161006f565b91505092915050565b6000604051905081810181811067ffffffffffffffff8211171561011f5760006000fd5b8060405250919050565b6000819050919050565b6000819050919050565b61014681610129565b811415156101545760006000fd5b50565b61016081610133565b8114151561016e5760006000fd5b50565bfea365627a7a72315820749274eb7f6c01010d5322af4e1668b0a154409eb7968bd6cae5524c7ed669bb6c6578706572696d656e74616cf564736f6c634300050c0040`, - `60806040523480156100115760006000fd5b50610017565b6101b5806100266000396000f3fe60806040523480156100115760006000fd5b50600436106100305760003560e01c8063db8ba08c1461003657610030565b60006000fd5b610050600480360361004b91908101906100d1565b610052565b005b5b5056610171565b6000813590506100698161013d565b92915050565b6000604082840312156100825760006000fd5b61008c60406100fb565b9050600061009c848285016100bc565b60008301525060206100b08482850161005a565b60208301525092915050565b6000813590506100cb81610157565b92915050565b6000604082840312156100e45760006000fd5b60006100f28482850161006f565b91505092915050565b6000604051905081810181811067ffffffffffffffff8211171561011f5760006000fd5b8060405250919050565b6000819050919050565b6000819050919050565b61014681610129565b811415156101545760006000fd5b50565b61016081610133565b8114151561016e5760006000fd5b50565bfea365627a7a723158209bc28ee7ea97c131a13330d77ec73b4493b5c59c648352da81dd288b021192596c6578706572696d656e74616cf564736f6c634300050c0040`, - `606c6026600b82828239805160001a6073141515601857fe5b30600052607381538281f350fe73000000000000000000000000000000000000000030146080604052600436106023575b60006000fdfea365627a7a72315820518f0110144f5b3de95697d05e456a064656890d08e6f9cff47f3be710cc46a36c6578706572696d656e74616cf564736f6c634300050c0040`, - }, - []string{ - `[{"constant":true,"inputs":[{"components":[{"internalType":"uint256","name":"f1","type":"uint256"},{"internalType":"bytes32","name":"f2","type":"bytes32"}],"internalType":"struct ExternalLib.SharedStruct","name":"s","type":"tuple"}],"name":"foo","outputs":[],"payable":false,"stateMutability":"pure","type":"function"}]`, - `[{"constant":true,"inputs":[{"components":[{"internalType":"uint256","name":"f1","type":"uint256"},{"internalType":"bytes32","name":"f2","type":"bytes32"}],"internalType":"struct ExternalLib.SharedStruct","name":"s","type":"tuple"}],"name":"bar","outputs":[],"payable":false,"stateMutability":"pure","type":"function"}]`, - `[]`, - }, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/core/types" - `, - ` - key, _ := crypto.GenerateKey() - addr := crypto.PubkeyToAddress(key.PublicKey) - - // Deploy registrar contract - sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - _, _, c1, err := DeployContractOne(transactOpts, sim) - if err != nil { - t.Fatal("Failed to deploy contract") - } - sim.Commit() - err = c1.Foo(nil, ExternalLibSharedStruct{ - F1: big.NewInt(100), - F2: [32]byte{0x01, 0x02, 0x03}, - }) - if err != nil { - t.Fatal("Failed to invoke function") - } - _, _, c2, err := DeployContractTwo(transactOpts, sim) - if err != nil { - t.Fatal("Failed to deploy contract") - } - sim.Commit() - err = c2.Bar(nil, ExternalLibSharedStruct{ - F1: big.NewInt(100), - F2: [32]byte{0x01, 0x02, 0x03}, - }) - if err != nil { - t.Fatal("Failed to invoke function") - } - `, - nil, - nil, - nil, - []string{"ContractOne", "ContractTwo", "ExternalLib"}, - }, - // Test the existence of the free retrieval calls - { - `PureAndView`, - `pragma solidity >=0.6.0; - contract PureAndView { - function PureFunc() public pure returns (uint) { - return 42; - } - function ViewFunc() public view returns (uint) { - return block.number; - } - } - `, - []string{`608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c806376b5686a146037578063bb38c66c146053575b600080fd5b603d606f565b6040518082815260200191505060405180910390f35b60596077565b6040518082815260200191505060405180910390f35b600043905090565b6000602a90509056fea2646970667358221220d158c2ab7fdfce366a7998ec79ab84edd43b9815630bbaede2c760ea77f29f7f64736f6c63430006000033`}, - []string{`[{"inputs": [],"name": "PureFunc","outputs": [{"internalType": "uint256","name": "","type": "uint256"}],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "ViewFunc","outputs": [{"internalType": "uint256","name": "","type": "uint256"}],"stateMutability": "view","type": "function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - // Generate a new random account and a funded simulator - key, _ := crypto.GenerateKey() - auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000000000)}}, 10000000) - defer sim.Close() - - // Deploy a tester contract and execute a structured call on it - _, _, pav, err := DeployPureAndView(auth, sim) - if err != nil { - t.Fatalf("Failed to deploy PureAndView contract: %v", err) - } - sim.Commit() - - // This test the existence of the free retriever call for view and pure functions - if num, err := pav.PureFunc(nil); err != nil { - t.Fatalf("Failed to call anonymous field retriever: %v", err) - } else if num.Cmp(big.NewInt(42)) != 0 { - t.Fatalf("Retrieved value mismatch: have %v, want %v", num, 42) - } - if num, err := pav.ViewFunc(nil); err != nil { - t.Fatalf("Failed to call anonymous field retriever: %v", err) - } else if num.Cmp(big.NewInt(1)) != 0 { - t.Fatalf("Retrieved value mismatch: have %v, want %v", num, 1) - } - `, - nil, - nil, - nil, - nil, - }, - // Test fallback separation introduced in v0.6.0 - { - `NewFallbacks`, - ` - pragma solidity >=0.6.0 <0.7.0; - - contract NewFallbacks { - event Fallback(bytes data); - fallback() external { - emit Fallback(msg.data); - } - - event Received(address addr, uint value); - receive() external payable { - emit Received(msg.sender, msg.value); - } - } - `, - []string{"6080604052348015600f57600080fd5b506101078061001f6000396000f3fe608060405236605f577f88a5966d370b9919b20f3e2c13ff65706f196a4e32cc2c12bf57088f885258743334604051808373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1005b348015606a57600080fd5b507f9043988963722edecc2099c75b0af0ff76af14ffca42ed6bce059a20a2a9f98660003660405180806020018281038252848482818152602001925080828437600081840152601f19601f820116905080830192505050935050505060405180910390a100fea26469706673582212201f994dcfbc53bf610b19176f9a361eafa77b447fd9c796fa2c615dfd0aaf3b8b64736f6c634300060c0033"}, - []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"}],"name":"Fallback","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"addr","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Received","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"stateMutability":"payable","type":"receive"}]`}, - ` - "bytes" - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - `, - ` - key, _ := crypto.GenerateKey() - addr := crypto.PubkeyToAddress(key.PublicKey) - - sim := backends.NewSimulatedBackend(types.GenesisAlloc{addr: {Balance: big.NewInt(10000000000000000)}}, 1000000) - defer sim.Close() - - opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - _, _, c, err := DeployNewFallbacks(opts, sim) - if err != nil { - t.Fatalf("Failed to deploy contract: %v", err) - } - sim.Commit() - - // Test receive function - opts.Value = big.NewInt(100) - c.Receive(opts) - sim.Commit() - - var gotEvent bool - iter, _ := c.FilterReceived(nil) - defer iter.Close() - for iter.Next() { - if iter.Event.Addr != addr { - t.Fatal("Msg.sender mismatch") - } - if iter.Event.Value.Uint64() != 100 { - t.Fatal("Msg.value mismatch") - } - gotEvent = true - break - } - if !gotEvent { - t.Fatal("Expect to receive event emitted by receive") - } - - // Test fallback function - gotEvent = false - opts.Value = nil - calldata := []byte{0x01, 0x02, 0x03} - c.Fallback(opts, calldata) - sim.Commit() - - iter2, _ := c.FilterFallback(nil) - defer iter2.Close() - for iter2.Next() { - if !bytes.Equal(iter2.Event.Data, calldata) { - t.Fatal("calldata mismatch") - } - gotEvent = true - break - } - if !gotEvent { - t.Fatal("Expect to receive event emitted by fallback") - } - `, - nil, - nil, - nil, - nil, - }, - // Test resolving single struct argument - { - `NewSingleStructArgument`, - ` - pragma solidity ^0.8.0; - - contract NewSingleStructArgument { - struct MyStruct{ - uint256 a; - uint256 b; - } - event StructEvent(MyStruct s); - function TestEvent() public { - emit StructEvent(MyStruct({a: 1, b: 2})); - } - } - `, - []string{"608060405234801561001057600080fd5b50610113806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c806324ec1d3f14602d575b600080fd5b60336035565b005b7fb4b2ff75e30cb4317eaae16dd8a187dd89978df17565104caa6c2797caae27d460405180604001604052806001815260200160028152506040516078919060ba565b60405180910390a1565b6040820160008201516096600085018260ad565b50602082015160a7602085018260ad565b50505050565b60b48160d3565b82525050565b600060408201905060cd60008301846082565b92915050565b600081905091905056fea26469706673582212208823628796125bf9941ce4eda18da1be3cf2931b231708ab848e1bd7151c0c9a64736f6c63430008070033"}, - []string{`[{"anonymous":false,"inputs":[{"components":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"indexed":false,"internalType":"struct Test.MyStruct","name":"s","type":"tuple"}],"name":"StructEvent","type":"event"},{"inputs":[],"name":"TestEvent","outputs":[],"stateMutability":"nonpayable","type":"function"}]`}, - ` - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/ethconfig" - `, - ` - var ( - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) - ) - defer sim.Close() - - _, _, d, err := DeployNewSingleStructArgument(user, sim) - if err != nil { - t.Fatalf("Failed to deploy contract %v", err) - } - sim.Commit() - - _, err = d.TestEvent(user) - if err != nil { - t.Fatalf("Failed to call contract %v", err) - } - sim.Commit() - - it, err := d.FilterStructEvent(nil) - if err != nil { - t.Fatalf("Failed to filter contract event %v", err) - } - var count int - for it.Next() { - if it.Event.S.A.Cmp(big.NewInt(1)) != 0 { - t.Fatal("Unexpected contract event") - } - if it.Event.S.B.Cmp(big.NewInt(2)) != 0 { - t.Fatal("Unexpected contract event") - } - count += 1 - } - if count != 1 { - t.Fatal("Unexpected contract event number") - } - `, - nil, - nil, - nil, - nil, - }, - // Test errors introduced in v0.8.4 - { - `NewErrors`, - ` - pragma solidity >0.8.4; - - contract NewErrors { - error MyError(uint256); - error MyError1(uint256); - error MyError2(uint256, uint256); - error MyError3(uint256 a, uint256 b, uint256 c); - function Error() public pure { - revert MyError3(1,2,3); - } - } - `, - []string{"0x6080604052348015600f57600080fd5b5060998061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063726c638214602d575b600080fd5b60336035565b005b60405163024876cd60e61b815260016004820152600260248201526003604482015260640160405180910390fdfea264697066735822122093f786a1bc60216540cd999fbb4a6109e0fef20abcff6e9107fb2817ca968f3c64736f6c63430008070033"}, - []string{`[{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError1","type":"error"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"},{"internalType":"uint256","name":"","type":"uint256"}],"name":"MyError2","type":"error"},{"inputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"},{"internalType":"uint256","name":"c","type":"uint256"}],"name":"MyError3","type":"error"},{"inputs":[],"name":"Error","outputs":[],"stateMutability":"pure","type":"function"}]`}, - ` - "context" - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/ethconfig" - `, - ` - var ( - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) - ) - defer sim.Close() - - _, tx, contract, err := DeployNewErrors(user, sim) - if err != nil { - t.Fatal(err) - } - sim.Commit() - _, err = bind.WaitDeployed(context.Background(), sim, tx) - if err != nil { - t.Error(err) - } - if err := contract.Error(new(bind.CallOpts)); err == nil { - t.Fatalf("expected contract to throw error") - } - // TODO (MariusVanDerWijden unpack error using abigen - // once that is implemented - `, - nil, - nil, - nil, - nil, - }, - { - name: `ConstructorWithStructParam`, - contract: ` - pragma solidity >=0.8.0 <0.9.0; - - contract ConstructorWithStructParam { - struct StructType { - uint256 field; - } - - constructor(StructType memory st) {} - } - `, - bytecode: []string{`0x608060405234801561001057600080fd5b506040516101c43803806101c48339818101604052810190610032919061014a565b50610177565b6000604051905090565b600080fd5b600080fd5b6000601f19601f8301169050919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6100958261004c565b810181811067ffffffffffffffff821117156100b4576100b361005d565b5b80604052505050565b60006100c7610038565b90506100d3828261008c565b919050565b6000819050919050565b6100eb816100d8565b81146100f657600080fd5b50565b600081519050610108816100e2565b92915050565b60006020828403121561012457610123610047565b5b61012e60206100bd565b9050600061013e848285016100f9565b60008301525092915050565b6000602082840312156101605761015f610042565b5b600061016e8482850161010e565b91505092915050565b603f806101856000396000f3fe6080604052600080fdfea2646970667358221220cdffa667affecefac5561f65f4a4ba914204a8d4eb859d8cd426fb306e5c12a364736f6c634300080a0033`}, - abi: []string{`[{"inputs":[{"components":[{"internalType":"uint256","name":"field","type":"uint256"}],"internalType":"struct ConstructorWithStructParam.StructType","name":"st","type":"tuple"}],"stateMutability":"nonpayable","type":"constructor"}]`}, - imports: ` - "context" - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/ethconfig" - `, - tester: ` - var ( - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) - ) - defer sim.Close() - - _, tx, _, err := DeployConstructorWithStructParam(user, sim, ConstructorWithStructParamStructType{Field: big.NewInt(42)}) - if err != nil { - t.Fatalf("DeployConstructorWithStructParam() got err %v; want nil err", err) - } - sim.Commit() - - if _, err = bind.WaitDeployed(context.Background(), sim, tx); err != nil { - t.Logf("Deployment tx: %+v", tx) - t.Errorf("bind.WaitDeployed(nil, %T, ) got err %v; want nil err", sim, err) - } - `, - }, - { - name: `NameConflict`, - contract: ` - // SPDX-License-Identifier: GPL-3.0 - pragma solidity >=0.4.22 <0.9.0; - contract oracle { - struct request { - bytes data; - bytes _data; - } - event log (int msg, int _msg); - function addRequest(request memory req) public pure {} - function getRequest() pure public returns (request memory) { - return request("", ""); - } - } - `, - bytecode: []string{"0x608060405234801561001057600080fd5b5061042b806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c8063c2bb515f1461003b578063cce7b04814610059575b600080fd5b610043610075565b60405161005091906101af565b60405180910390f35b610073600480360381019061006e91906103ac565b6100b5565b005b61007d6100b8565b604051806040016040528060405180602001604052806000815250815260200160405180602001604052806000815250815250905090565b50565b604051806040016040528060608152602001606081525090565b600081519050919050565b600082825260208201905092915050565b60005b8381101561010c5780820151818401526020810190506100f1565b8381111561011b576000848401525b50505050565b6000601f19601f8301169050919050565b600061013d826100d2565b61014781856100dd565b93506101578185602086016100ee565b61016081610121565b840191505092915050565b600060408301600083015184820360008601526101888282610132565b915050602083015184820360208601526101a28282610132565b9150508091505092915050565b600060208201905081810360008301526101c9818461016b565b905092915050565b6000604051905090565b600080fd5b600080fd5b600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b61022282610121565b810181811067ffffffffffffffff82111715610241576102406101ea565b5b80604052505050565b60006102546101d1565b90506102608282610219565b919050565b600080fd5b600080fd5b600080fd5b600067ffffffffffffffff82111561028f5761028e6101ea565b5b61029882610121565b9050602081019050919050565b82818337600083830152505050565b60006102c76102c284610274565b61024a565b9050828152602081018484840111156102e3576102e261026f565b5b6102ee8482856102a5565b509392505050565b600082601f83011261030b5761030a61026a565b5b813561031b8482602086016102b4565b91505092915050565b60006040828403121561033a576103396101e5565b5b610344604061024a565b9050600082013567ffffffffffffffff81111561036457610363610265565b5b610370848285016102f6565b600083015250602082013567ffffffffffffffff81111561039457610393610265565b5b6103a0848285016102f6565b60208301525092915050565b6000602082840312156103c2576103c16101db565b5b600082013567ffffffffffffffff8111156103e0576103df6101e0565b5b6103ec84828501610324565b9150509291505056fea264697066735822122033bca1606af9b6aeba1673f98c52003cec19338539fb44b86690ce82c51483b564736f6c634300080e0033"}, - abi: []string{`[ { "anonymous": false, "inputs": [ { "indexed": false, "internalType": "int256", "name": "msg", "type": "int256" }, { "indexed": false, "internalType": "int256", "name": "_msg", "type": "int256" } ], "name": "log", "type": "event" }, { "inputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "req", "type": "tuple" } ], "name": "addRequest", "outputs": [], "stateMutability": "pure", "type": "function" }, { "inputs": [], "name": "getRequest", "outputs": [ { "components": [ { "internalType": "bytes", "name": "data", "type": "bytes" }, { "internalType": "bytes", "name": "_data", "type": "bytes" } ], "internalType": "struct oracle.request", "name": "", "type": "tuple" } ], "stateMutability": "pure", "type": "function" } ]`}, - imports: ` - "context" - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/ethconfig" - `, - tester: ` - var ( - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) - ) - defer sim.Close() - - _, tx, _, err := DeployNameConflict(user, sim) - if err != nil { - t.Fatalf("DeployNameConflict() got err %v; want nil err", err) - } - sim.Commit() - - if _, err = bind.WaitDeployed(context.Background(), sim, tx); err != nil { - t.Logf("Deployment tx: %+v", tx) - t.Errorf("bind.WaitDeployed(nil, %T, ) got err %v; want nil err", sim, err) - } - `, - }, - { - name: "RangeKeyword", - contract: ` - // SPDX-License-Identifier: GPL-3.0 - pragma solidity >=0.4.22 <0.9.0; - contract keywordcontract { - function functionWithKeywordParameter(range uint256) public pure {} - } - `, - bytecode: []string{"0x608060405234801561001057600080fd5b5060dc8061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063527a119f14602d575b600080fd5b60436004803603810190603f9190605b565b6045565b005b50565b6000813590506055816092565b92915050565b600060208284031215606e57606d608d565b5b6000607a848285016048565b91505092915050565b6000819050919050565b600080fd5b6099816083565b811460a357600080fd5b5056fea2646970667358221220d4f4525e2615516394055d369fb17df41c359e5e962734f27fd683ea81fd9db164736f6c63430008070033"}, - abi: []string{`[{"inputs":[{"internalType":"uint256","name":"range","type":"uint256"}],"name":"functionWithKeywordParameter","outputs":[],"stateMutability":"pure","type":"function"}]`}, - imports: ` - "context" - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/ethconfig" - `, - tester: ` - var ( - key, _ = crypto.GenerateKey() - user, _ = bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) - sim = backends.NewSimulatedBackend(types.GenesisAlloc{user.From: {Balance: big.NewInt(1000000000000000000)}}, ethconfig.Defaults.Miner.GasCeil) - ) - _, tx, _, err := DeployRangeKeyword(user, sim) - if err != nil { - t.Fatalf("error deploying contract: %v", err) - } - sim.Commit() - - if _, err = bind.WaitDeployed(context.Background(), sim, tx); err != nil { - t.Errorf("error deploying the contract: %v", err) - } - `, - }, { - name: "NumericMethodName", - contract: ` - // SPDX-License-Identifier: GPL-3.0 - pragma solidity >=0.4.22 <0.9.0; - - contract NumericMethodName { - event _1TestEvent(address _param); - function _1test() public pure {} - function __1test() public pure {} - function __2test() public pure {} - } - `, - bytecode: []string{"0x6080604052348015600f57600080fd5b5060958061001e6000396000f3fe6080604052348015600f57600080fd5b5060043610603c5760003560e01c80639d993132146041578063d02767c7146049578063ffa02795146051575b600080fd5b60476059565b005b604f605b565b005b6057605d565b005b565b565b56fea26469706673582212200382ca602dff96a7e2ba54657985e2b4ac423a56abe4a1f0667bc635c4d4371f64736f6c63430008110033"}, - abi: []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"_param","type":"address"}],"name":"_1TestEvent","type":"event"},{"inputs":[],"name":"_1test","outputs":[],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"__1test","outputs":[],"stateMutability":"pure","type":"function"},{"inputs":[],"name":"__2test","outputs":[],"stateMutability":"pure","type":"function"}]`}, - imports: ` - "github.com/ethereum/go-ethereum/common" - `, - tester: ` - if b, err := NewNumericMethodName(common.Address{}, nil); b == nil || err != nil { - t.Fatalf("combined binding (%v) nil or error (%v) not nil", b, nil) - } -`, - }, -} - -// Tests that packages generated by the binder can be successfully compiled and -// the requested tester run against it. -func TestGolangBindings(t *testing.T) { - t.Parallel() - // Skip the test if no Go command can be found - gocmd := runtime.GOROOT() + "/bin/go" - if !common.FileExist(gocmd) { - t.Skip("go sdk not found for testing") - } - // Create a temporary workspace for the test suite - ws := t.TempDir() - - pkg := filepath.Join(ws, "bindtest") - if err := os.MkdirAll(pkg, 0700); err != nil { - t.Fatalf("failed to create package: %v", err) - } - // Generate the test suite for all the contracts - for i, tt := range bindTests { - t.Run(tt.name, func(t *testing.T) { - var types []string - if tt.types != nil { - types = tt.types - } else { - types = []string{tt.name} - } - // Generate the binding and create a Go source file in the workspace - bind, err := Bind(types, tt.abi, tt.bytecode, tt.fsigs, "bindtest", LangGo, tt.libs, tt.aliases) - if err != nil { - t.Fatalf("test %d: failed to generate binding: %v", i, err) - } - if err = os.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+".go"), []byte(bind), 0600); err != nil { - t.Fatalf("test %d: failed to write binding: %v", i, err) - } - // Generate the test file with the injected test code - code := fmt.Sprintf(` - package bindtest - - import ( - "testing" - %s - ) - - func Test%s(t *testing.T) { - %s - } - `, tt.imports, tt.name, tt.tester) - if err := os.WriteFile(filepath.Join(pkg, strings.ToLower(tt.name)+"_test.go"), []byte(code), 0600); err != nil { - t.Fatalf("test %d: failed to write tests: %v", i, err) - } - }) - } - // Convert the package to go modules and use the current source for go-ethereum - moder := exec.Command(gocmd, "mod", "init", "bindtest") - moder.Dir = pkg - if out, err := moder.CombinedOutput(); err != nil { - t.Fatalf("failed to convert binding test to modules: %v\n%s", err, out) - } - pwd, _ := os.Getwd() - replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ethereum/go-ethereum@v0.0.0", "-replace", "github.com/ethereum/go-ethereum="+filepath.Join(pwd, "..", "..", "..")) // Repo root - replacer.Dir = pkg - if out, err := replacer.CombinedOutput(); err != nil { - t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) - } - - replacer = exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/tendermint/tendermint@v0.0.0", "-replace", "github.com/tendermint/tendermint=github.com/bnb-chain/tendermint@v0.31.16") // Repo root - replacer.Dir = pkg - if out, err := replacer.CombinedOutput(); err != nil { - t.Fatalf("failed to replace tendermint dependency to bnb-chain source: %v\n%s", err, out) - } - - replacer = exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/cometbft/cometbft@v0.0.0", "-replace", "github.com/cometbft/cometbft=github.com/bnb-chain/greenfield-tendermint@v0.0.0-20230417032003-4cda1f296fb2") // Repo root - replacer.Dir = pkg - if out, err := replacer.CombinedOutput(); err != nil { - t.Fatalf("failed to replace cometbft dependency to bnb-chain source: %v\n%s", err, out) - } - - replacer = exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/syndtr/goleveldb@v1.0.1", "-replace", "github.com/syndtr/goleveldb=github.com/syndtr/goleveldb@v1.0.1-0.20210819022825-2ae1ddf74ef7") - replacer.Dir = pkg - if out, err := replacer.CombinedOutput(); err != nil { - t.Fatalf("failed to replace cometbft dependency to bnb-chain source: %v\n%s", err, out) - } - - tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.21") - tidier.Dir = pkg - if out, err := tidier.CombinedOutput(); err != nil { - t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) - } - // Test the entire package and report any failures - cmd := exec.Command(gocmd, "test", "-v", "-count", "1") - cmd.Dir = pkg - if out, err := cmd.CombinedOutput(); err != nil { - t.Fatalf("failed to run binding test: %v\n%s", err, out) - } -} diff --git a/accounts/abi/bind/util_test.go b/accounts/abi/bind/util_test.go deleted file mode 100644 index 592465f2ac..0000000000 --- a/accounts/abi/bind/util_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bind_test - -import ( - "context" - "errors" - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient/simulated" - "github.com/ethereum/go-ethereum/params" -) - -var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - -var waitDeployedTests = map[string]struct { - code string - gas uint64 - wantAddress common.Address - wantErr error -}{ - "successful deploy": { - code: `6060604052600a8060106000396000f360606040526008565b00`, - gas: 3000000, - wantAddress: common.HexToAddress("0x3a220f351252089d385b29beca14e27f204c296a"), - }, - "empty code": { - code: ``, - gas: 300000, - wantErr: bind.ErrNoCodeAfterDeploy, - wantAddress: common.HexToAddress("0x3a220f351252089d385b29beca14e27f204c296a"), - }, -} - -func TestWaitDeployed(t *testing.T) { - t.Parallel() - for name, test := range waitDeployedTests { - backend := simulated.NewBackend( - types.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, - }, - ) - defer backend.Close() - - // Create the transaction - head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough - gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei)) - - tx := types.NewContractCreation(0, big.NewInt(0), test.gas, gasPrice, common.FromHex(test.code)) - tx, _ = types.SignTx(tx, types.LatestSignerForChainID(big.NewInt(1337)), testKey) - - // Wait for it to get mined in the background. - var ( - err error - address common.Address - mined = make(chan struct{}) - ctx = context.Background() - ) - go func() { - address, err = bind.WaitDeployed(ctx, backend.Client(), tx) - close(mined) - }() - - // Send and mine the transaction. - backend.Client().SendTransaction(ctx, tx) - backend.Commit() - - select { - case <-mined: - if err != test.wantErr { - t.Errorf("test %q: error mismatch: want %q, got %q", name, test.wantErr, err) - } - if address != test.wantAddress { - t.Errorf("test %q: unexpected contract address %s", name, address.Hex()) - } - case <-time.After(2 * time.Second): - t.Errorf("test %q: timeout", name) - } - } -} - -func TestWaitDeployedCornerCases(t *testing.T) { - backend := simulated.NewBackend( - types.GenesisAlloc{ - crypto.PubkeyToAddress(testKey.PublicKey): {Balance: big.NewInt(10000000000000000)}, - }, - ) - defer backend.Close() - - head, _ := backend.Client().HeaderByNumber(context.Background(), nil) // Should be child's, good enough - gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) - - // Create a transaction to an account. - code := "6060604052600a8060106000396000f360606040526008565b00" - tx := types.NewTransaction(0, common.HexToAddress("0x01"), big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) - tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - backend.Client().SendTransaction(ctx, tx) - backend.Commit() - notContractCreation := errors.New("tx is not contract creation") - if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != notContractCreation.Error() { - t.Errorf("error mismatch: want %q, got %q, ", notContractCreation, err) - } - - // Create a transaction that is not mined. - tx = types.NewContractCreation(1, big.NewInt(0), 3000000, gasPrice, common.FromHex(code)) - tx, _ = types.SignTx(tx, types.LatestSigner(params.AllDevChainProtocolChanges), testKey) - - go func() { - contextCanceled := errors.New("context canceled") - if _, err := bind.WaitDeployed(ctx, backend.Client(), tx); err.Error() != contextCanceled.Error() { - t.Errorf("error mismatch: want %q, got %q, ", contextCanceled, err) - } - }() - - backend.Client().SendTransaction(ctx, tx) - cancel() -} diff --git a/cmd/clef/consolecmd_test.go b/cmd/clef/consolecmd_test.go deleted file mode 100644 index c8b37f5b92..0000000000 --- a/cmd/clef/consolecmd_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "testing" -) - -// TestImportRaw tests clef --importraw -func TestImportRaw(t *testing.T) { - t.Parallel() - keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name())) - os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777) - t.Cleanup(func() { os.Remove(keyPath) }) - - t.Run("happy-path", func(t *testing.T) { - t.Parallel() - // Run clef importraw - clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath) - clef.input("myverylongpassword").input("myverylongpassword") - if out := string(clef.Output()); !strings.Contains(out, - "Key imported:\n Address 0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6") { - t.Logf("Output\n%v", out) - t.Error("Failure") - } - }) - // tests clef --importraw with mismatched passwords. - t.Run("pw-mismatch", func(t *testing.T) { - t.Parallel() - // Run clef importraw - clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath) - clef.input("myverylongpassword1").input("myverylongpassword2").WaitExit() - if have, want := clef.StderrText(), "Passwords do not match\n"; have != want { - t.Errorf("have %q, want %q", have, want) - } - }) - // tests clef --importraw with a too short password. - t.Run("short-pw", func(t *testing.T) { - t.Parallel() - // Run clef importraw - clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath) - clef.input("shorty").input("shorty").WaitExit() - if have, want := clef.StderrText(), - "password requirements not met: password too short (<10 characters)\n"; have != want { - t.Errorf("have %q, want %q", have, want) - } - }) -} - -// TestListAccounts tests clef --list-accounts -func TestListAccounts(t *testing.T) { - t.Parallel() - keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name())) - os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777) - t.Cleanup(func() { os.Remove(keyPath) }) - - t.Run("no-accounts", func(t *testing.T) { - t.Parallel() - clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-accounts") - if out := string(clef.Output()); !strings.Contains(out, "The keystore is empty.") { - t.Logf("Output\n%v", out) - t.Error("Failure") - } - }) - t.Run("one-account", func(t *testing.T) { - t.Parallel() - // First, we need to import - clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath) - clef.input("myverylongpassword").input("myverylongpassword").WaitExit() - // Secondly, do a listing, using the same datadir - clef = runWithKeystore(t, clef.Datadir, "--suppress-bootwarn", "--lightkdf", "list-accounts") - if out := string(clef.Output()); !strings.Contains(out, "0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6 (keystore:") { - t.Logf("Output\n%v", out) - t.Error("Failure") - } - }) -} - -// TestListWallets tests clef --list-wallets -func TestListWallets(t *testing.T) { - t.Parallel() - keyPath := filepath.Join(os.TempDir(), fmt.Sprintf("%v-tempkey.test", t.Name())) - os.WriteFile(keyPath, []byte("0102030405060708090a0102030405060708090a0102030405060708090a0102"), 0777) - t.Cleanup(func() { os.Remove(keyPath) }) - - t.Run("no-accounts", func(t *testing.T) { - t.Parallel() - clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "list-wallets") - if out := string(clef.Output()); !strings.Contains(out, "There are no wallets.") { - t.Logf("Output\n%v", out) - t.Error("Failure") - } - }) - t.Run("one-account", func(t *testing.T) { - t.Parallel() - // First, we need to import - clef := runClef(t, "--suppress-bootwarn", "--lightkdf", "importraw", keyPath) - clef.input("myverylongpassword").input("myverylongpassword").WaitExit() - // Secondly, do a listing, using the same datadir - clef = runWithKeystore(t, clef.Datadir, "--suppress-bootwarn", "--lightkdf", "list-wallets") - if out := string(clef.Output()); !strings.Contains(out, "Account 0: 0x9160DC9105f7De5dC5E7f3d97ef11DA47269BdA6") { - t.Logf("Output\n%v", out) - t.Error("Failure") - } - }) -} diff --git a/cmd/clef/run_test.go b/cmd/clef/run_test.go deleted file mode 100644 index 5fa6e02e14..0000000000 --- a/cmd/clef/run_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "os" - "testing" - - "github.com/ethereum/go-ethereum/internal/cmdtest" - "github.com/ethereum/go-ethereum/internal/reexec" -) - -const registeredName = "clef-test" - -type testproc struct { - *cmdtest.TestCmd - - // template variables for expect - Datadir string - Etherbase string -} - -func init() { - reexec.Register(registeredName, func() { - if err := app.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - os.Exit(0) - }) -} - -func TestMain(m *testing.M) { - // check if we have been reexec'd - if reexec.Init() { - return - } - os.Exit(m.Run()) -} - -// runClef spawns clef with the given command line args and adds keystore arg. -// This method creates a temporary keystore folder which will be removed after -// the test exits. -func runClef(t *testing.T, args ...string) *testproc { - ddir, err := os.MkdirTemp("", "cleftest-*") - if err != nil { - return nil - } - t.Cleanup(func() { - os.RemoveAll(ddir) - }) - return runWithKeystore(t, ddir, args...) -} - -// runWithKeystore spawns clef with the given command line args and adds keystore arg. -// This method does _not_ create the keystore folder, but it _does_ add the arg -// to the args. -func runWithKeystore(t *testing.T, keystore string, args ...string) *testproc { - args = append([]string{"--keystore", keystore}, args...) - tt := &testproc{Datadir: keystore} - tt.TestCmd = cmdtest.NewTestCmd(t, tt) - // Boot "clef". This actually runs the test binary but the TestMain - // function will prevent any tests from running. - tt.Run(registeredName, args...) - return tt -} - -func (proc *testproc) input(text string) *testproc { - proc.TestCmd.InputLine(text) - return proc -} - -/* -// waitForEndpoint waits for the rpc endpoint to appear, or -// aborts after 3 seconds. -func (proc *testproc) waitForEndpoint(t *testing.T) *testproc { - t.Helper() - timeout := 3 * time.Second - ipc := filepath.Join(proc.Datadir, "clef.ipc") - - start := time.Now() - for time.Since(start) < timeout { - if _, err := os.Stat(ipc); !errors.Is(err, os.ErrNotExist) { - t.Logf("endpoint %v opened", ipc) - return proc - } - time.Sleep(200 * time.Millisecond) - } - t.Logf("stderr: \n%v", proc.StderrText()) - t.Logf("stdout: \n%v", proc.Output()) - t.Fatal("endpoint", ipc, "did not open within", timeout) - return proc -} -*/ diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go deleted file mode 100644 index c8479fd3aa..0000000000 --- a/cmd/devp2p/internal/ethtest/suite_test.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package ethtest // TOFIX - -import ( - crand "crypto/rand" - "fmt" - "os" - "path" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/eth/catalyst" - "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/internal/utesting" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p" -) - -func makeJWTSecret() (string, [32]byte, error) { - var secret [32]byte - if _, err := crand.Read(secret[:]); err != nil { - return "", secret, fmt.Errorf("failed to create jwt secret: %v", err) - } - jwtPath := path.Join(os.TempDir(), "jwt_secret") - if err := os.WriteFile(jwtPath, []byte(hexutil.Encode(secret[:])), 0600); err != nil { - return "", secret, fmt.Errorf("failed to prepare jwt secret file: %v", err) - } - return jwtPath, secret, nil -} - -func TestEthSuite(t *testing.T) { - jwtPath, secret, err := makeJWTSecret() - if err != nil { - t.Fatalf("could not make jwt secret: %v", err) - } - geth, err := runGeth("./testdata", jwtPath) - if err != nil { - t.Fatalf("could not run geth: %v", err) - } - defer geth.Close() - - suite, err := NewSuite(geth.Server().Self(), "./testdata", geth.HTTPAuthEndpoint(), common.Bytes2Hex(secret[:])) - if err != nil { - t.Fatalf("could not create new test suite: %v", err) - } - for _, test := range suite.EthTests() { - t.Run(test.Name, func(t *testing.T) { - if test.Slow && testing.Short() { - t.Skipf("%s: skipping in -short mode", test.Name) - } - result := utesting.RunTests([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) - if result[0].Failed { - t.Fatal() - } - }) - } -} - -func TestSnapSuite(t *testing.T) { - jwtPath, secret, err := makeJWTSecret() - if err != nil { - t.Fatalf("could not make jwt secret: %v", err) - } - geth, err := runGeth("./testdata", jwtPath) - if err != nil { - t.Fatalf("could not run geth: %v", err) - } - defer geth.Close() - - suite, err := NewSuite(geth.Server().Self(), "./testdata", geth.HTTPAuthEndpoint(), common.Bytes2Hex(secret[:])) - if err != nil { - t.Fatalf("could not create new test suite: %v", err) - } - for _, test := range suite.SnapTests() { - t.Run(test.Name, func(t *testing.T) { - result := utesting.RunTests([]utesting.Test{{Name: test.Name, Fn: test.Fn}}, os.Stdout) - if result[0].Failed { - t.Fatal() - } - }) - } -} - -// runGeth creates and starts a geth node -func runGeth(dir string, jwtPath string) (*node.Node, error) { - stack, err := node.New(&node.Config{ - AuthAddr: "127.0.0.1", - AuthPort: 0, - P2P: p2p.Config{ - ListenAddr: "127.0.0.1:0", - NoDiscovery: true, - MaxPeers: 10, // in case a test requires multiple connections, can be changed in the future - MaxPeersPerIP: 10, - NoDial: true, - }, - JWTSecret: jwtPath, - }) - if err != nil { - return nil, err - } - - err = setupGeth(stack, dir) - if err != nil { - stack.Close() - return nil, err - } - if err = stack.Start(); err != nil { - stack.Close() - return nil, err - } - return stack, nil -} - -func setupGeth(stack *node.Node, dir string) error { - chain, err := NewChain(dir) - if err != nil { - return err - } - backend, err := eth.New(stack, ðconfig.Config{ - Genesis: &chain.genesis, - NetworkId: chain.genesis.Config.ChainID.Uint64(), // 19763 - DatabaseCache: 10, - TrieCleanCache: 10, - TrieDirtyCache: 16, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 10, - TriesInMemory: 128, - }) - if err != nil { - return err - } - if err := catalyst.Register(stack, backend); err != nil { - return fmt.Errorf("failed to register catalyst service: %v", err) - } - _, err = backend.BlockChain().InsertChain(chain.blocks[1:]) - return err -} diff --git a/cmd/ethkey/message_test.go b/cmd/ethkey/message_test.go deleted file mode 100644 index 389bb8c8ea..0000000000 --- a/cmd/ethkey/message_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "path/filepath" - "testing" -) - -func TestMessageSignVerify(t *testing.T) { - t.Parallel() - tmpdir := t.TempDir() - - keyfile := filepath.Join(tmpdir, "the-keyfile") - message := "test message" - - // Create the key. - generate := runEthkey(t, "generate", "--lightkdf", keyfile) - generate.Expect(` -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "foobar"}} -Repeat password: {{.InputLine "foobar"}} -`) - _, matches := generate.ExpectRegexp(`Address: (0x[0-9a-fA-F]{40})\n`) - address := matches[1] - generate.ExpectExit() - - // Sign a message. - sign := runEthkey(t, "signmessage", keyfile, message) - sign.Expect(` -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "foobar"}} -`) - _, matches = sign.ExpectRegexp(`Signature: ([0-9a-f]+)\n`) - signature := matches[1] - sign.ExpectExit() - - // Verify the message. - verify := runEthkey(t, "verifymessage", address, signature, message) - _, matches = verify.ExpectRegexp(` -Signature verification successful! -Recovered public key: [0-9a-f]+ -Recovered address: (0x[0-9a-fA-F]{40}) -`) - recovered := matches[1] - verify.ExpectExit() - - if recovered != address { - t.Error("recovered address doesn't match generated key") - } -} diff --git a/cmd/ethkey/run_test.go b/cmd/ethkey/run_test.go deleted file mode 100644 index 73506e5da1..0000000000 --- a/cmd/ethkey/run_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "os" - "testing" - - "github.com/ethereum/go-ethereum/internal/cmdtest" - "github.com/ethereum/go-ethereum/internal/reexec" -) - -type testEthkey struct { - *cmdtest.TestCmd -} - -// spawns ethkey with the given command line args. -func runEthkey(t *testing.T, args ...string) *testEthkey { - tt := new(testEthkey) - tt.TestCmd = cmdtest.NewTestCmd(t, tt) - tt.Run("ethkey-test", args...) - return tt -} - -func TestMain(m *testing.M) { - // Run the app if we've been exec'd as "ethkey-test" in runEthkey. - reexec.Register("ethkey-test", func() { - if err := app.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - os.Exit(0) - }) - // check if we have been reexec'd - if reexec.Init() { - return - } - os.Exit(m.Run()) -} diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go deleted file mode 100644 index 363b48c3ff..0000000000 --- a/cmd/evm/t8n_test.go +++ /dev/null @@ -1,576 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "encoding/json" - "fmt" - "os" - "reflect" - "strings" - "testing" - - "github.com/ethereum/go-ethereum/cmd/evm/internal/t8ntool" - "github.com/ethereum/go-ethereum/internal/cmdtest" - "github.com/ethereum/go-ethereum/internal/reexec" -) - -func TestMain(m *testing.M) { - // Run the app if we've been exec'd as "ethkey-test" in runEthkey. - reexec.Register("evm-test", func() { - if err := app.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - os.Exit(0) - }) - // check if we have been reexec'd - if reexec.Init() { - return - } - os.Exit(m.Run()) -} - -type testT8n struct { - *cmdtest.TestCmd -} - -type t8nInput struct { - inAlloc string - inTxs string - inEnv string - stFork string - stReward string -} - -func (args *t8nInput) get(base string) []string { - var out []string - if opt := args.inAlloc; opt != "" { - out = append(out, "--input.alloc") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.inTxs; opt != "" { - out = append(out, "--input.txs") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.inEnv; opt != "" { - out = append(out, "--input.env") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.stFork; opt != "" { - out = append(out, "--state.fork", opt) - } - if opt := args.stReward; opt != "" { - out = append(out, "--state.reward", opt) - } - return out -} - -type t8nOutput struct { - alloc bool - result bool - body bool -} - -func (args *t8nOutput) get() (out []string) { - if args.body { - out = append(out, "--output.body", "stdout") - } else { - out = append(out, "--output.body", "") // empty means ignore - } - if args.result { - out = append(out, "--output.result", "stdout") - } else { - out = append(out, "--output.result", "") - } - if args.alloc { - out = append(out, "--output.alloc", "stdout") - } else { - out = append(out, "--output.alloc", "") - } - return out -} - -func TestT8n(t *testing.T) { - t.Parallel() - tt := new(testT8n) - tt.TestCmd = cmdtest.NewTestCmd(t, tt) - for i, tc := range []struct { - base string - input t8nInput - output t8nOutput - expExitCode int - expOut string - }{ - { // Test exit (3) on bad config - base: "./testdata/1", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Frontier+1346", "", - }, - output: t8nOutput{alloc: true, result: true}, - expExitCode: 3, - }, - { - base: "./testdata/1", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Byzantium", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // blockhash test - base: "./testdata/3", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Berlin", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // missing blockhash test - base: "./testdata/4", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Berlin", "", - }, - output: t8nOutput{alloc: true, result: true}, - expExitCode: 4, - }, - { // Uncle test - base: "./testdata/5", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Byzantium", "0x80", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // Sign json transactions - base: "./testdata/13", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "London", "", - }, - output: t8nOutput{body: true}, - expOut: "exp.json", - }, - { // Already signed transactions - base: "./testdata/13", - input: t8nInput{ - "alloc.json", "signed_txs.rlp", "env.json", "London", "", - }, - output: t8nOutput{result: true}, - expOut: "exp2.json", - }, - { // Difficulty calculation - no uncles - base: "./testdata/14", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "London", "", - }, - output: t8nOutput{result: true}, - expOut: "exp.json", - }, - { // Difficulty calculation - with uncles - base: "./testdata/14", - input: t8nInput{ - "alloc.json", "txs.json", "env.uncles.json", "London", "", - }, - output: t8nOutput{result: true}, - expOut: "exp2.json", - }, - { // Difficulty calculation - with ommers + Berlin - base: "./testdata/14", - input: t8nInput{ - "alloc.json", "txs.json", "env.uncles.json", "Berlin", "", - }, - output: t8nOutput{result: true}, - expOut: "exp_berlin.json", - }, - { // Difficulty calculation on arrow glacier - base: "./testdata/19", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "London", "", - }, - output: t8nOutput{result: true}, - expOut: "exp_london.json", - }, - { // Difficulty calculation on arrow glacier - base: "./testdata/19", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "ArrowGlacier", "", - }, - output: t8nOutput{result: true}, - expOut: "exp_arrowglacier.json", - }, - { // Difficulty calculation on gray glacier - base: "./testdata/19", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "GrayGlacier", "", - }, - output: t8nOutput{result: true}, - expOut: "exp_grayglacier.json", - }, - { // Sign unprotected (pre-EIP155) transaction - base: "./testdata/23", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Berlin", "", - }, - output: t8nOutput{result: true}, - expOut: "exp.json", - }, - { // Test post-merge transition - base: "./testdata/24", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Merge", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // Test post-merge transition where input is missing random - base: "./testdata/24", - input: t8nInput{ - "alloc.json", "txs.json", "env-missingrandom.json", "Merge", "", - }, - output: t8nOutput{alloc: false, result: false}, - expExitCode: 3, - }, - // base fee logic is different with go-ethereum - /* - { // Test base fee calculation - base: "./testdata/25", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Merge", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - */ - { // Test withdrawals transition - base: "./testdata/26", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Shanghai", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // Cancun tests - base: "./testdata/28", - input: t8nInput{ - "alloc.json", "txs.rlp", "env.json", "Cancun", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // More cancun tests - base: "./testdata/29", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Cancun", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // More cancun test, plus example of rlp-transaction that cannot be decoded properly - base: "./testdata/30", - input: t8nInput{ - "alloc.json", "txs_more.rlp", "env.json", "Cancun", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - } { - args := []string{"t8n"} - args = append(args, tc.output.get()...) - args = append(args, tc.input.get(tc.base)...) - var qArgs []string // quoted args for debugging purposes - for _, arg := range args { - if len(arg) == 0 { - qArgs = append(qArgs, `""`) - } else { - qArgs = append(qArgs, arg) - } - } - tt.Logf("args: %v\n", strings.Join(qArgs, " ")) - tt.Run("evm-test", args...) - // Compare the expected output, if provided - if tc.expOut != "" { - file := fmt.Sprintf("%v/%v", tc.base, tc.expOut) - want, err := os.ReadFile(file) - if err != nil { - t.Fatalf("test %d: could not read expected output: %v", i, err) - } - have := tt.Output() - ok, err := cmpJson(have, want) - switch { - case err != nil: - t.Fatalf("test %d, file %v: json parsing failed: %v", i, file, err) - case !ok: - t.Fatalf("test %d, file %v: output wrong, have \n%v\nwant\n%v\n", i, file, string(have), string(want)) - } - } - tt.WaitExit() - if have, want := tt.ExitStatus(), tc.expExitCode; have != want { - t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) - } - } -} - -type t9nInput struct { - inTxs string - stFork string -} - -func (args *t9nInput) get(base string) []string { - var out []string - if opt := args.inTxs; opt != "" { - out = append(out, "--input.txs") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.stFork; opt != "" { - out = append(out, "--state.fork", opt) - } - return out -} - -func TestT9n(t *testing.T) { - t.Parallel() - tt := new(testT8n) - tt.TestCmd = cmdtest.NewTestCmd(t, tt) - for i, tc := range []struct { - base string - input t9nInput - expExitCode int - expOut string - }{ - { // London txs on homestead - base: "./testdata/15", - input: t9nInput{ - inTxs: "signed_txs.rlp", - stFork: "Homestead", - }, - expOut: "exp.json", - }, - { // London txs on London - base: "./testdata/15", - input: t9nInput{ - inTxs: "signed_txs.rlp", - stFork: "London", - }, - expOut: "exp2.json", - }, - { // An RLP list (a blockheader really) - base: "./testdata/15", - input: t9nInput{ - inTxs: "blockheader.rlp", - stFork: "London", - }, - expOut: "exp3.json", - }, - { // Transactions with too low gas - base: "./testdata/16", - input: t9nInput{ - inTxs: "signed_txs.rlp", - stFork: "London", - }, - expOut: "exp.json", - }, - { // Transactions with value exceeding 256 bits - base: "./testdata/17", - input: t9nInput{ - inTxs: "signed_txs.rlp", - stFork: "London", - }, - expOut: "exp.json", - }, - { // Invalid RLP - base: "./testdata/18", - input: t9nInput{ - inTxs: "invalid.rlp", - stFork: "London", - }, - expExitCode: t8ntool.ErrorIO, - }, - } { - args := []string{"t9n"} - args = append(args, tc.input.get(tc.base)...) - - tt.Run("evm-test", args...) - tt.Logf("args:\n go run . %v\n", strings.Join(args, " ")) - // Compare the expected output, if provided - if tc.expOut != "" { - want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) - if err != nil { - t.Fatalf("test %d: could not read expected output: %v", i, err) - } - have := tt.Output() - ok, err := cmpJson(have, want) - switch { - case err != nil: - t.Logf(string(have)) - t.Fatalf("test %d, json parsing failed: %v", i, err) - case !ok: - t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) - } - } - tt.WaitExit() - if have, want := tt.ExitStatus(), tc.expExitCode; have != want { - t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) - } - } -} - -type b11rInput struct { - inEnv string - inOmmersRlp string - inWithdrawals string - inTxsRlp string - inClique string - ethash bool - ethashMode string - ethashDir string -} - -func (args *b11rInput) get(base string) []string { - var out []string - if opt := args.inEnv; opt != "" { - out = append(out, "--input.header") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.inOmmersRlp; opt != "" { - out = append(out, "--input.ommers") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.inWithdrawals; opt != "" { - out = append(out, "--input.withdrawals") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.inTxsRlp; opt != "" { - out = append(out, "--input.txs") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.inClique; opt != "" { - out = append(out, "--seal.clique") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if args.ethash { - out = append(out, "--seal.ethash") - } - if opt := args.ethashMode; opt != "" { - out = append(out, "--seal.ethash.mode") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.ethashDir; opt != "" { - out = append(out, "--seal.ethash.dir") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - out = append(out, "--output.block") - out = append(out, "stdout") - return out -} - -func TestB11r(t *testing.T) { - t.Parallel() - tt := new(testT8n) - tt.TestCmd = cmdtest.NewTestCmd(t, tt) - for i, tc := range []struct { - base string - input b11rInput - expExitCode int - expOut string - }{ - { // unsealed block - base: "./testdata/20", - input: b11rInput{ - inEnv: "header.json", - inOmmersRlp: "ommers.json", - inTxsRlp: "txs.rlp", - }, - expOut: "exp.json", - }, - { // ethash test seal - base: "./testdata/21", - input: b11rInput{ - inEnv: "header.json", - inOmmersRlp: "ommers.json", - inTxsRlp: "txs.rlp", - }, - expOut: "exp.json", - }, - { // clique test seal - base: "./testdata/21", - input: b11rInput{ - inEnv: "header.json", - inOmmersRlp: "ommers.json", - inTxsRlp: "txs.rlp", - inClique: "clique.json", - }, - expOut: "exp-clique.json", - }, - { // block with ommers - base: "./testdata/22", - input: b11rInput{ - inEnv: "header.json", - inOmmersRlp: "ommers.json", - inTxsRlp: "txs.rlp", - }, - expOut: "exp.json", - }, - { // block with withdrawals - base: "./testdata/27", - input: b11rInput{ - inEnv: "header.json", - inOmmersRlp: "ommers.json", - inWithdrawals: "withdrawals.json", - inTxsRlp: "txs.rlp", - }, - expOut: "exp.json", - }, - } { - args := []string{"b11r"} - args = append(args, tc.input.get(tc.base)...) - - tt.Run("evm-test", args...) - tt.Logf("args:\n go run . %v\n", strings.Join(args, " ")) - // Compare the expected output, if provided - if tc.expOut != "" { - want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) - if err != nil { - t.Fatalf("test %d: could not read expected output: %v", i, err) - } - have := tt.Output() - ok, err := cmpJson(have, want) - switch { - case err != nil: - t.Logf(string(have)) - t.Fatalf("test %d, json parsing failed: %v", i, err) - case !ok: - t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) - } - } - tt.WaitExit() - if have, want := tt.ExitStatus(), tc.expExitCode; have != want { - t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) - } - } -} - -// cmpJson compares the JSON in two byte slices. -func cmpJson(a, b []byte) (bool, error) { - var j, j2 interface{} - if err := json.Unmarshal(a, &j); err != nil { - return false, err - } - if err := json.Unmarshal(b, &j2); err != nil { - return false, err - } - return reflect.DeepEqual(j2, j), nil -} diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go deleted file mode 100644 index ea3a7c3b64..0000000000 --- a/cmd/geth/accountcmd_test.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "os" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/cespare/cp" -) - -// These tests are 'smoke tests' for the account related -// subcommands and flags. -// -// For most tests, the test files from package accounts -// are copied into a temporary keystore directory. - -func tmpDatadirWithKeystore(t *testing.T) string { - datadir := t.TempDir() - keystore := filepath.Join(datadir, "keystore") - source := filepath.Join("..", "..", "accounts", "keystore", "testdata", "keystore") - if err := cp.CopyAll(keystore, source); err != nil { - t.Fatal(err) - } - return datadir -} - -func TestAccountListEmpty(t *testing.T) { - t.Parallel() - geth := runGeth(t, "account", "list") - geth.ExpectExit() -} - -func TestAccountList(t *testing.T) { - t.Parallel() - datadir := tmpDatadirWithKeystore(t) - var want = ` -Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 -Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}/keystore/aaa -Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}/keystore/zzz -` - if runtime.GOOS == "windows" { - want = ` -Account #0: {7ef5a6135f1fd6a02593eedc869c6d41d934aef8} keystore://{{.Datadir}}\keystore\UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 -Account #1: {f466859ead1932d743d622cb74fc058882e8648a} keystore://{{.Datadir}}\keystore\aaa -Account #2: {289d485d9771714cce91d3393d764e1311907acc} keystore://{{.Datadir}}\keystore\zzz -` - } - { - geth := runGeth(t, "account", "list", "--datadir", datadir) - geth.Expect(want) - geth.ExpectExit() - } - { - geth := runGeth(t, "--datadir", datadir, "account", "list") - geth.Expect(want) - geth.ExpectExit() - } -} - -func TestAccountNew(t *testing.T) { - t.Parallel() - geth := runGeth(t, "account", "new", "--lightkdf") - defer geth.ExpectExit() - geth.Expect(` -Your new account is locked with a password. Please give a password. Do not forget this password. -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "foobar"}} -Repeat password: {{.InputLine "foobar"}} - -Your new key was generated -`) - geth.ExpectRegexp(` -Public address of the key: 0x[0-9a-fA-F]{40} -Path of the secret key file: .*UTC--.+--[0-9a-f]{40} - -- You can share your public address with anyone. Others need it to interact with you. -- You must NEVER share the secret key with anyone! The key controls access to your funds! -- You must BACKUP your key file! Without the key, it's impossible to access account funds! -- You must REMEMBER your password! Without the password, it's impossible to decrypt the key! -`) -} - -func TestAccountImport(t *testing.T) { - t.Parallel() - tests := []struct{ name, key, output string }{ - { - name: "correct account", - key: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", - output: "Address: {fcad0b19bb29d4674531d6f115237e16afce377c}\n", - }, - { - name: "invalid character", - key: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef1", - output: "Fatal: Failed to load the private key: invalid character '1' at end of key file\n", - }, - } - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - importAccountWithExpect(t, test.key, test.output) - }) - } -} - -func TestAccountHelp(t *testing.T) { - t.Parallel() - geth := runGeth(t, "account", "-h") - geth.WaitExit() - if have, want := geth.ExitStatus(), 0; have != want { - t.Errorf("exit error, have %d want %d", have, want) - } - - geth = runGeth(t, "account", "import", "-h") - geth.WaitExit() - if have, want := geth.ExitStatus(), 0; have != want { - t.Errorf("exit error, have %d want %d", have, want) - } -} - -func importAccountWithExpect(t *testing.T, key string, expected string) { - dir := t.TempDir() - keyfile := filepath.Join(dir, "key.prv") - if err := os.WriteFile(keyfile, []byte(key), 0600); err != nil { - t.Error(err) - } - passwordFile := filepath.Join(dir, "password.txt") - if err := os.WriteFile(passwordFile, []byte("foobar"), 0600); err != nil { - t.Error(err) - } - geth := runGeth(t, "--lightkdf", "account", "import", "-password", passwordFile, keyfile) - defer geth.ExpectExit() - geth.Expect(expected) -} - -func TestAccountNewBadRepeat(t *testing.T) { - t.Parallel() - geth := runGeth(t, "account", "new", "--lightkdf") - defer geth.ExpectExit() - geth.Expect(` -Your new account is locked with a password. Please give a password. Do not forget this password. -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "something"}} -Repeat password: {{.InputLine "something else"}} -Fatal: Passwords do not match -`) -} - -func TestAccountUpdate(t *testing.T) { - t.Parallel() - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, "account", "update", - "--datadir", datadir, "--lightkdf", - "f466859ead1932d743d622cb74fc058882e8648a") - defer geth.ExpectExit() - geth.Expect(` -Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "foobar"}} -Please give a new password. Do not forget this password. -Password: {{.InputLine "foobar2"}} -Repeat password: {{.InputLine "foobar2"}} -`) -} - -func TestWalletImport(t *testing.T) { - t.Parallel() - geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json") - defer geth.ExpectExit() - geth.Expect(` -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "foo"}} -Address: {d4584b5f6229b7be90727b0fc8c6b91bb427821f} -`) - - files, err := os.ReadDir(filepath.Join(geth.Datadir, "keystore")) - if len(files) != 1 { - t.Errorf("expected one key file in keystore directory, found %d files (error: %v)", len(files), err) - } -} - -func TestWalletImportBadPassword(t *testing.T) { - t.Parallel() - geth := runGeth(t, "wallet", "import", "--lightkdf", "testdata/guswallet.json") - defer geth.ExpectExit() - geth.Expect(` -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "wrong"}} -Fatal: could not decrypt key with given password -`) -} - -func TestUnlockFlag(t *testing.T) { - t.Parallel() - geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')") - geth.Expect(` -Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "foobar"}} -undefined -`) - geth.ExpectExit() - - wantMessages := []string{ - "Unlocked account", - "=0xf466859eAD1932D743d622CB74FC058882E8648A", - } - for _, m := range wantMessages { - if !strings.Contains(geth.StderrText(), m) { - t.Errorf("stderr text does not contain %q", m) - } - } -} - -func TestUnlockFlagWrongPassword(t *testing.T) { - t.Parallel() - geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "console", "--exec", "loadScript('testdata/empty.js')") - - defer geth.ExpectExit() - geth.Expect(` -Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "wrong1"}} -Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 2/3 -Password: {{.InputLine "wrong2"}} -Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 3/3 -Password: {{.InputLine "wrong3"}} -Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could not decrypt key with given password) -`) -} - -// https://github.com/ethereum/go-ethereum/issues/1785 -func TestUnlockFlagMultiIndex(t *testing.T) { - t.Parallel() - geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')") - - geth.Expect(` -Unlocking account 0 | Attempt 1/3 -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "foobar"}} -Unlocking account 2 | Attempt 1/3 -Password: {{.InputLine "foobar"}} -undefined -`) - geth.ExpectExit() - - wantMessages := []string{ - "Unlocked account", - "=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8", - "=0x289d485D9771714CCe91D3393D764E1311907ACc", - } - for _, m := range wantMessages { - if !strings.Contains(geth.StderrText(), m) { - t.Errorf("stderr text does not contain %q", m) - } - } -} - -func TestUnlockFlagPasswordFile(t *testing.T) { - t.Parallel() - geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "console", "--exec", "loadScript('testdata/empty.js')") - - geth.Expect(` -undefined -`) - geth.ExpectExit() - - wantMessages := []string{ - "Unlocked account", - "=0x7EF5A6135f1FD6a02593eEdC869c6D41D934aef8", - "=0x289d485D9771714CCe91D3393D764E1311907ACc", - } - for _, m := range wantMessages { - if !strings.Contains(geth.StderrText(), m) { - t.Errorf("stderr text does not contain %q", m) - } - } -} - -func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) { - t.Parallel() - geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", - "testdata/wrong-passwords.txt", "--unlock", "0,2") - defer geth.ExpectExit() - geth.Expect(` -Fatal: Failed to unlock account 0 (could not decrypt key with given password) -`) -} - -func TestUnlockFlagAmbiguous(t *testing.T) { - t.Parallel() - store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") - geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", - store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", - "console", "--exec", "loadScript('testdata/empty.js')") - defer geth.ExpectExit() - - // Helper for the expect template, returns absolute keystore path. - geth.SetTemplateFunc("keypath", func(file string) string { - abs, _ := filepath.Abs(filepath.Join(store, file)) - return abs - }) - geth.Expect(` -Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "foobar"}} -Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a: - keystore://{{keypath "1"}} - keystore://{{keypath "2"}} -Testing your password against all of them... -Your password unlocked keystore://{{keypath "1"}} -In order to avoid this warning, you need to remove the following duplicate key files: - keystore://{{keypath "2"}} -undefined -`) - geth.ExpectExit() - - wantMessages := []string{ - "Unlocked account", - "=0xf466859eAD1932D743d622CB74FC058882E8648A", - } - for _, m := range wantMessages { - if !strings.Contains(geth.StderrText(), m) { - t.Errorf("stderr text does not contain %q", m) - } - } -} - -func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) { - t.Parallel() - store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") - geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", - store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a") - - defer geth.ExpectExit() - - // Helper for the expect template, returns absolute keystore path. - geth.SetTemplateFunc("keypath", func(file string) string { - abs, _ := filepath.Abs(filepath.Join(store, file)) - return abs - }) - geth.Expect(` -Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 -!! Unsupported terminal, password will be echoed. -Password: {{.InputLine "wrong"}} -Multiple key files exist for address f466859ead1932d743d622cb74fc058882e8648a: - keystore://{{keypath "1"}} - keystore://{{keypath "2"}} -Testing your password against all of them... -Fatal: None of the listed files could be unlocked. -`) - geth.ExpectExit() -} diff --git a/cmd/geth/attach_test.go b/cmd/geth/attach_test.go deleted file mode 100644 index ceae3a122e..0000000000 --- a/cmd/geth/attach_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package main - -import ( - "fmt" - "net" - "net/http" - "sync/atomic" - "testing" -) - -type testHandler struct { - body func(http.ResponseWriter, *http.Request) -} - -func (t *testHandler) ServeHTTP(out http.ResponseWriter, in *http.Request) { - t.body(out, in) -} - -// TestAttachWithHeaders tests that 'geth attach' with custom headers works, i.e -// that custom headers are forwarded to the target. -func TestAttachWithHeaders(t *testing.T) { - t.Parallel() - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal(err) - } - port := ln.Addr().(*net.TCPAddr).Port - testReceiveHeaders(t, ln, "attach", "-H", "first: one", "-H", "second: two", fmt.Sprintf("http://localhost:%d", port)) - // This way to do it fails due to flag ordering: - // - // testReceiveHeaders(t, ln, "-H", "first: one", "-H", "second: two", "attach", fmt.Sprintf("http://localhost:%d", port)) - // This is fixed in a follow-up PR. -} - -// TestRemoteDbWithHeaders tests that 'geth db --remotedb' with custom headers works, i.e -// that custom headers are forwarded to the target. -func TestRemoteDbWithHeaders(t *testing.T) { - t.Parallel() - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatal(err) - } - port := ln.Addr().(*net.TCPAddr).Port - testReceiveHeaders(t, ln, "db", "metadata", "--remotedb", fmt.Sprintf("http://localhost:%d", port), "-H", "first: one", "-H", "second: two") -} - -func testReceiveHeaders(t *testing.T, ln net.Listener, gethArgs ...string) { - var ok atomic.Uint32 - server := &http.Server{ - Addr: "localhost:0", - Handler: &testHandler{func(w http.ResponseWriter, r *http.Request) { - // We expect two headers - if have, want := r.Header.Get("first"), "one"; have != want { - t.Fatalf("missing header, have %v want %v", have, want) - } - if have, want := r.Header.Get("second"), "two"; have != want { - t.Fatalf("missing header, have %v want %v", have, want) - } - ok.Store(1) - }}} - go server.Serve(ln) - defer server.Close() - runGeth(t, gethArgs...).WaitExit() - if ok.Load() != 1 { - t.Fatal("Test fail, expected invocation to succeed") - } -} diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go deleted file mode 100644 index 59c0e0015e..0000000000 --- a/cmd/geth/consolecmd_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "crypto/rand" - "math/big" - "path/filepath" - "runtime" - "strconv" - "strings" - "testing" - "time" - - "github.com/ethereum/go-ethereum/params" -) - -const ( - ipcAPIs = "admin:1.0 debug:1.0 eth:1.0 mev:1.0 miner:1.0 net:1.0 parlia:1.0 rpc:1.0 txpool:1.0 web3:1.0" - httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0" -) - -// spawns geth with the given command line args, using a set of flags to minimise -// memory and disk IO. If the args don't set --datadir, the -// child g gets a temporary data directory. -func runMinimalGeth(t *testing.T, args ...string) *testgeth { - // --networkid=1337 to avoid cache bump - // --syncmode=full to avoid allocating fast sync bloom - allArgs := []string{"--networkid", "1337", "--syncmode=full", "--port", "0", - "--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64", - "--datadir.minfreedisk", "0"} - return runGeth(t, append(allArgs, args...)...) -} - -// Tests that a node embedded within a console can be started up properly and -// then terminated by closing the input stream. -func TestConsoleWelcome(t *testing.T) { - t.Parallel() - coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - - // Start a geth console, make sure it's cleaned up and terminate the console - geth := runMinimalGeth(t, "--miner.etherbase", coinbase, "console") - - // Gather all the infos the welcome message needs to contain - geth.SetTemplateFunc("goos", func() string { return runtime.GOOS }) - geth.SetTemplateFunc("goarch", func() string { return runtime.GOARCH }) - geth.SetTemplateFunc("gover", runtime.Version) - geth.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") }) - geth.SetTemplateFunc("niltime", func() string { - return time.Unix(0x5e9da7ce, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)") - }) - geth.SetTemplateFunc("apis", func() string { return ipcAPIs }) - - // Verify the actual welcome message to the required template - geth.Expect(` -Welcome to the Geth JavaScript console! - -instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}} -coinbase: {{.Etherbase}} -at block: 0 ({{niltime}}) - datadir: {{.Datadir}} - modules: {{apis}} - -To exit, press ctrl-d or type exit -> {{.InputLine "exit"}} -`) - geth.ExpectExit() -} - -// Tests that a console can be attached to a running node via various means. -func TestAttachWelcome(t *testing.T) { - var ( - ipc string - httpPort string - wsPort string - ) - // Configure the instance for IPC attachment - if runtime.GOOS == "windows" { - ipc = `\\.\pipe\geth` + strconv.Itoa(trulyRandInt(100000, 999999)) - } else { - ipc = filepath.Join(t.TempDir(), "geth.ipc") - } - // And HTTP + WS attachment - p := trulyRandInt(1024, 65533) // Yeah, sometimes this will fail, sorry :P - httpPort = strconv.Itoa(p) - wsPort = strconv.Itoa(p + 1) - geth := runMinimalGeth(t, "--miner.etherbase", "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182", - "--ipcpath", ipc, - "--http", "--http.port", httpPort, - "--ws", "--ws.port", wsPort) - t.Run("ipc", func(t *testing.T) { - waitForEndpoint(t, ipc, 3*time.Second) - testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) - }) - t.Run("http", func(t *testing.T) { - endpoint := "http://127.0.0.1:" + httpPort - waitForEndpoint(t, endpoint, 3*time.Second) - testAttachWelcome(t, geth, endpoint, httpAPIs) - }) - t.Run("ws", func(t *testing.T) { - endpoint := "ws://127.0.0.1:" + wsPort - waitForEndpoint(t, endpoint, 3*time.Second) - testAttachWelcome(t, geth, endpoint, httpAPIs) - }) - geth.Kill() -} - -func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { - // Attach to a running geth node and terminate immediately - attach := runGeth(t, "attach", endpoint) - defer attach.ExpectExit() - attach.CloseStdin() - - // Gather all the infos the welcome message needs to contain - attach.SetTemplateFunc("goos", func() string { return runtime.GOOS }) - attach.SetTemplateFunc("goarch", func() string { return runtime.GOARCH }) - attach.SetTemplateFunc("gover", runtime.Version) - attach.SetTemplateFunc("gethver", func() string { return params.VersionWithCommit("", "") }) - attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase }) - attach.SetTemplateFunc("niltime", func() string { - return time.Unix(0x5e9da7ce, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)") - }) - attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") }) - attach.SetTemplateFunc("datadir", func() string { return geth.Datadir }) - attach.SetTemplateFunc("apis", func() string { return apis }) - - // Verify the actual welcome message to the required template - attach.Expect(` -Welcome to the Geth JavaScript console! - -instance: Geth/v{{gethver}}/{{goos}}-{{goarch}}/{{gover}} -coinbase: {{etherbase}} -at block: 0 ({{niltime}}){{if ipc}} - datadir: {{datadir}}{{end}} - modules: {{apis}} - -To exit, press ctrl-d or type exit -> {{.InputLine "exit" }} -`) - attach.ExpectExit() -} - -// trulyRandInt generates a crypto random integer used by the console tests to -// not clash network ports with other tests running concurrently. -func trulyRandInt(lo, hi int) int { - num, _ := rand.Int(rand.Reader, big.NewInt(int64(hi-lo))) - return int(num.Int64()) + lo -} diff --git a/cmd/geth/exportcmd_test.go b/cmd/geth/exportcmd_test.go deleted file mode 100644 index 9570b1ffd2..0000000000 --- a/cmd/geth/exportcmd_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "bytes" - "fmt" - "os" - "testing" - - "github.com/ethereum/go-ethereum/common" -) - -// TestExport does a basic test of "geth export", exporting the test-genesis. -func TestExport(t *testing.T) { - t.Parallel() - outfile := fmt.Sprintf("%v/testExport.out", os.TempDir()) - defer os.Remove(outfile) - geth := runGeth(t, "--datadir", initGeth(t), "export", outfile) - geth.WaitExit() - if have, want := geth.ExitStatus(), 0; have != want { - t.Errorf("exit error, have %d want %d", have, want) - } - have, err := os.ReadFile(outfile) - if err != nil { - t.Fatal(err) - } - want := common.FromHex("0xf9026bf90266a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347940000000000000000000000000000000000000000a08758259b018f7bce3d2be2ddb62f325eaeea0a0c188cf96623eab468a4413e03a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000180837a12008080b875000000000000000000000000000000000000000000000000000000000000000002f0d131f1f97aef08aec6e3291b957d9efe71050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000880000000000000000c0c0") - if !bytes.Equal(have, want) { - t.Fatalf("wrong content exported") - } -} diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go deleted file mode 100644 index 6a76781112..0000000000 --- a/cmd/geth/genesis_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "testing" -) - -var customGenesisTests = []struct { - genesis string - query string - result string -}{ - // Genesis file with an empty chain configuration (ensure missing fields work) - { - genesis: `{ - "alloc" : {}, - "coinbase" : "0x0000000000000000000000000000000000000000", - "difficulty" : "0x20000", - "extraData" : "", - "gasLimit" : "0x2fefd8", - "nonce" : "0x0000000000001338", - "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp" : "0x00", - "config" : { - "terminalTotalDifficultyPassed": true - } - }`, - query: "eth.getBlock(0).nonce", - result: "0x0000000000001338", - }, - // Genesis file with specific chain configurations - { - genesis: `{ - "alloc" : {}, - "coinbase" : "0x0000000000000000000000000000000000000000", - "difficulty" : "0x20000", - "extraData" : "", - "gasLimit" : "0x2fefd8", - "nonce" : "0x0000000000001339", - "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp" : "0x00", - "config" : { - "homesteadBlock" : 42, - "daoForkBlock" : 141, - "daoForkSupport" : true, - "terminalTotalDifficultyPassed" : true - } - }`, - query: "eth.getBlock(0).nonce", - result: "0x0000000000001339", - }, -} - -// Tests that initializing Geth with a custom genesis block and chain definitions -// work properly. -func TestCustomGenesis(t *testing.T) { - t.Parallel() - for i, tt := range customGenesisTests { - // Create a temporary data directory to use and inspect later - datadir := t.TempDir() - - // Initialize the data directory with the custom genesis block - json := filepath.Join(datadir, "genesis.json") - if err := os.WriteFile(json, []byte(tt.genesis), 0600); err != nil { - t.Fatalf("test %d: failed to write genesis file: %v", i, err) - } - runGeth(t, "--datadir", datadir, "init", json).WaitExit() - - // Query the custom genesis block - geth := runGeth(t, "--networkid", "1337", "--syncmode=full", "--cache", "16", - "--datadir", datadir, "--maxpeers", "0", "--port", "0", - "--nodiscover", "--nat", "none", "--ipcdisable", - "--exec", tt.query, "console") - geth.ExpectRegexp(tt.result) - geth.ExpectExit() - } -} - -// TestCustomBackend that the backend selection and detection (leveldb vs pebble) works properly. -func TestCustomBackend(t *testing.T) { - t.Parallel() - // Test pebble, but only on 64-bit platforms - if strconv.IntSize != 64 { - t.Skip("Custom backends are only available on 64-bit platform") - } - genesis := `{ - "alloc" : {}, - "coinbase" : "0x0000000000000000000000000000000000000000", - "difficulty" : "0x20000", - "extraData" : "", - "gasLimit" : "0x2fefd8", - "nonce" : "0x0000000000001338", - "mixhash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "parentHash" : "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp" : "0x00", - "config" : { - "terminalTotalDifficultyPassed": true - } - }` - type backendTest struct { - initArgs []string - initExpect string - execArgs []string - execExpect string - } - testfunc := func(t *testing.T, tt backendTest) error { - // Create a temporary data directory to use and inspect later - datadir := t.TempDir() - - // Initialize the data directory with the custom genesis block - json := filepath.Join(datadir, "genesis.json") - if err := os.WriteFile(json, []byte(genesis), 0600); err != nil { - return fmt.Errorf("failed to write genesis file: %v", err) - } - { // Init - args := append(tt.initArgs, "--datadir", datadir, "init", json) - geth := runGeth(t, args...) - geth.ExpectRegexp(tt.initExpect) - geth.ExpectExit() - } - { // Exec + query - args := append(tt.execArgs, "--networkid", "1337", "--syncmode=full", "--cache", "16", - "--datadir", datadir, "--maxpeers", "0", "--port", "0", - "--nodiscover", "--nat", "none", "--ipcdisable", - "--exec", "eth.getBlock(0).nonce", "console") - geth := runGeth(t, args...) - geth.ExpectRegexp(tt.execExpect) - geth.ExpectExit() - } - return nil - } - for i, tt := range []backendTest{ - { // When not specified, it should default to pebble - execArgs: []string{"--db.engine", "pebble"}, - execExpect: "0x0000000000001338", - }, - { // Explicit leveldb - initArgs: []string{"--db.engine", "leveldb"}, - execArgs: []string{"--db.engine", "leveldb"}, - execExpect: "0x0000000000001338", - }, - { // Explicit leveldb first, then autodiscover - initArgs: []string{"--db.engine", "leveldb"}, - execExpect: "0x0000000000001338", - }, - { // Explicit pebble - initArgs: []string{"--db.engine", "pebble"}, - execArgs: []string{"--db.engine", "pebble"}, - execExpect: "0x0000000000001338", - }, - { // Explicit pebble, then auto-discover - initArgs: []string{"--db.engine", "pebble"}, - execExpect: "0x0000000000001338", - }, - { // Can't start pebble on top of leveldb - initArgs: []string{"--db.engine", "leveldb"}, - execArgs: []string{"--db.engine", "pebble"}, - execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was pebble but found pre-existing leveldb database in specified data directory`, - }, - { // Can't start leveldb on top of pebble - initArgs: []string{"--db.engine", "pebble"}, - execArgs: []string{"--db.engine", "leveldb"}, - execExpect: `Fatal: Failed to register the Ethereum service: db.engine choice was leveldb but found pre-existing pebble database in specified data directory`, - }, - { // Reject invalid backend choice - initArgs: []string{"--db.engine", "mssql"}, - initExpect: `Fatal: Invalid choice for db.engine 'mssql', allowed 'leveldb' or 'pebble'`, - // Since the init fails, this will return the (default) mainnet genesis - // block nonce - execExpect: `0x0000000000000000`, - }, - } { - if err := testfunc(t, tt); err != nil { - t.Fatalf("test %d-leveldb: %v", i, err) - } - } -} diff --git a/cmd/geth/initnetwork_test.go b/cmd/geth/initnetwork_test.go deleted file mode 100644 index 1473f056e4..0000000000 --- a/cmd/geth/initnetwork_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "testing" -) - -var size int -var basePort int -var configPath string -var genesisPath string - -func setup(t *testing.T) { - size = 4 - _, filename, _, ok := runtime.Caller(0) - if !ok { - t.Fatalf("error getting current file path") - } - currentDirectory := filepath.Dir(filename) - configPath = filepath.Join(currentDirectory, "testdata/config.toml") - genesisPath = filepath.Join(currentDirectory, "testdata/parlia.json") - basePort = 30311 -} - -func TestInitNetworkLocalhost(t *testing.T) { - setup(t) - ipStr := "" - testInitNetwork(t, size, basePort, ipStr, configPath, genesisPath) -} - -func TestInitNetworkRemoteHosts(t *testing.T) { - setup(t) - ipStr := "192.168.24.103,172.15.67.89,10.0.17.36,203.113.45.76" - testInitNetwork(t, size, basePort, ipStr, configPath, genesisPath) -} - -func testInitNetwork(t *testing.T, size, basePort int, ipStr, configPath, genesisPath string) { - dir := t.TempDir() - geth := runGeth(t, "init-network", "--init.dir", dir, "--init.size", strconv.Itoa(size), - "--init.ips", ipStr, "--init.p2p-port", strconv.Itoa(basePort), "--config", configPath, - genesisPath) - // expect the command to complete first - geth.WaitExit() - - // Read the output of the command - files, err := os.ReadDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(files) != size { - t.Fatalf("expected %d node folders but found %d instead", size, len(files)) - } - - for i, file := range files { - if file.IsDir() { - expectedNodeDirName := fmt.Sprintf("node%d", i) - if file.Name() != expectedNodeDirName { - t.Fatalf("node dir name is %s but %s was expected", file.Name(), expectedNodeDirName) - } - configFilePath := filepath.Join(dir, file.Name(), "config.toml") - var config gethConfig - err := loadConfig(configFilePath, &config) - if err != nil { - t.Fatalf("failed to load config.toml : %v", err) - } - if ipStr == "" { - verifyConfigFileLocalhost(t, &config, i, basePort, size) - } else { - verifyConfigFileRemoteHosts(t, &config, ipStr, i, basePort, size) - } - } - } -} - -func verifyConfigFileRemoteHosts(t *testing.T, config *gethConfig, ipStr string, i, basePort, size int) { - // 1. check ip string - ips := strings.Split(ipStr, ",") - if len(ips) != size { - t.Fatalf("found %d ips in ipStr=%s instead of %d", len(ips), ipStr, size) - } - - // 2. check listening port - expectedListenAddr := fmt.Sprintf(":%d", basePort) - if config.Node.P2P.ListenAddr != expectedListenAddr { - t.Fatalf("expected ListenAddr to be %s but it is %s instead", expectedListenAddr, config.Node.P2P.ListenAddr) - } - - bootnodes := config.Node.P2P.BootstrapNodes - - // 3. check correctness of peers' hosts - for j := 0; j < i; j++ { - ip := bootnodes[j].IP().String() - if ip != ips[j] { - t.Fatalf("expected IP of bootnode to be %s but found %s instead", ips[j], ip) - } - } - - for j := i + 1; j < size; j++ { - ip := bootnodes[j-1].IP().String() - if ip != ips[j] { - t.Fatalf("expected IP of bootnode to be %s but found %s instead", ips[j-1], ip) - } - } - - // 4. check correctness of peer port numbers - for j := 0; j < size-1; j++ { - if bootnodes[j].UDP() != basePort { - t.Fatalf("expected bootnode port at position %d to be %d but got %d instead", j, basePort, bootnodes[j].UDP()) - } - } -} - -func verifyConfigFileLocalhost(t *testing.T, config *gethConfig, i int, basePort int, size int) { - // 1. check listening port - expectedListenAddr := fmt.Sprintf(":%d", basePort+i) - if config.Node.P2P.ListenAddr != expectedListenAddr { - t.Fatalf("expected ListenAddr to be %s but it is %s instead", expectedListenAddr, config.Node.P2P.ListenAddr) - } - - bootnodes := config.Node.P2P.BootstrapNodes - // 2. check correctness of peers' hosts - localhost := "127.0.0.1" - for j := 0; j < size-1; j++ { - ip := bootnodes[j].IP().String() - if ip != localhost { - t.Fatalf("expected IP of bootnode to be %s but found %s instead", localhost, ip) - } - } - - // 3. check correctness of peer port numbers - for j := 0; j < i; j++ { - if bootnodes[j].UDP() != basePort+j { - t.Fatalf("expected bootnode port at position %d to be %d but got %d instead", j, basePort+j, bootnodes[j].UDP()) - } - } - for j := i + 1; j < size; j++ { - if bootnodes[j-1].UDP() != basePort+j { - t.Fatalf("expected bootnode port at position %d to be %d but got %d instead", j-1, basePort+j, bootnodes[j-1].UDP()) - } - } -} diff --git a/cmd/geth/logging_test.go b/cmd/geth/logging_test.go deleted file mode 100644 index b5ce03f4b8..0000000000 --- a/cmd/geth/logging_test.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build integrationtests - -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "math/rand" - "os" - "os/exec" - "strings" - "testing" - - "github.com/ethereum/go-ethereum/internal/reexec" -) - -func runSelf(args ...string) ([]byte, error) { - cmd := &exec.Cmd{ - Path: reexec.Self(), - Args: append([]string{"geth-test"}, args...), - } - return cmd.CombinedOutput() -} - -func split(input io.Reader) []string { - var output []string - scanner := bufio.NewScanner(input) - scanner.Split(bufio.ScanLines) - for scanner.Scan() { - output = append(output, strings.TrimSpace(scanner.Text())) - } - return output -} - -func censor(input string, start, end int) string { - if len(input) < end { - return input - } - return input[:start] + strings.Repeat("X", end-start) + input[end:] -} - -func TestLogging(t *testing.T) { - t.Parallel() - testConsoleLogging(t, "terminal", 6, 24) - testConsoleLogging(t, "logfmt", 2, 26) -} - -func testConsoleLogging(t *testing.T, format string, tStart, tEnd int) { - haveB, err := runSelf("--log.format", format, "logtest") - if err != nil { - t.Fatal(err) - } - readFile, err := os.Open(fmt.Sprintf("testdata/logging/logtest-%v.txt", format)) - if err != nil { - t.Fatal(err) - } - wantLines := split(readFile) - haveLines := split(bytes.NewBuffer(haveB)) - for i, want := range wantLines { - if i > len(haveLines)-1 { - t.Fatalf("format %v, line %d missing, want:%v", format, i, want) - } - have := haveLines[i] - for strings.Contains(have, "Unknown config environment variable") { - // This can happen on CI runs. Drop it. - haveLines = append(haveLines[:i], haveLines[i+1:]...) - have = haveLines[i] - } - - // Black out the timestamp - have = censor(have, tStart, tEnd) - want = censor(want, tStart, tEnd) - if have != want { - t.Logf(nicediff([]byte(have), []byte(want))) - t.Fatalf("format %v, line %d\nhave %v\nwant %v", format, i, have, want) - } - } - if len(haveLines) != len(wantLines) { - t.Errorf("format %v, want %d lines, have %d", format, len(haveLines), len(wantLines)) - } -} - -func TestJsonLogging(t *testing.T) { - t.Parallel() - haveB, err := runSelf("--log.format", "json", "logtest") - if err != nil { - t.Fatal(err) - } - readFile, err := os.Open("testdata/logging/logtest-json.txt") - if err != nil { - t.Fatal(err) - } - wantLines := split(readFile) - haveLines := split(bytes.NewBuffer(haveB)) - for i, wantLine := range wantLines { - if i > len(haveLines)-1 { - t.Fatalf("format %v, line %d missing, want:%v", "json", i, wantLine) - } - haveLine := haveLines[i] - for strings.Contains(haveLine, "Unknown config environment variable") { - // This can happen on CI runs. Drop it. - haveLines = append(haveLines[:i], haveLines[i+1:]...) - haveLine = haveLines[i] - } - var have, want []byte - { - var h map[string]any - if err := json.Unmarshal([]byte(haveLine), &h); err != nil { - t.Fatal(err) - } - h["t"] = "xxx" - have, _ = json.Marshal(h) - } - { - var w map[string]any - if err := json.Unmarshal([]byte(wantLine), &w); err != nil { - t.Fatal(err) - } - w["t"] = "xxx" - want, _ = json.Marshal(w) - } - if !bytes.Equal(have, want) { - // show an intelligent diff - t.Logf(nicediff(have, want)) - t.Errorf("file content wrong") - } - } -} - -func TestVmodule(t *testing.T) { - t.Parallel() - checkOutput := func(level int, want, wantNot string) { - t.Helper() - output, err := runSelf("--log.format", "terminal", "--verbosity=0", "--log.vmodule", fmt.Sprintf("logtestcmd_active.go=%d", level), "logtest") - if err != nil { - t.Fatal(err) - } - if len(want) > 0 && !strings.Contains(string(output), want) { // trace should be present at 5 - t.Errorf("failed to find expected string ('%s') in output", want) - } - if len(wantNot) > 0 && strings.Contains(string(output), wantNot) { // trace should be present at 5 - t.Errorf("string ('%s') should not be present in output", wantNot) - } - } - checkOutput(5, "log at level trace", "") // trace should be present at 5 - checkOutput(4, "log at level debug", "log at level trace") // debug should be present at 4, but trace should be missing - checkOutput(3, "log at level info", "log at level debug") // info should be present at 3, but debug should be missing - checkOutput(2, "log at level warn", "log at level info") // warn should be present at 2, but info should be missing - checkOutput(1, "log at level error", "log at level warn") // error should be present at 1, but warn should be missing -} - -func nicediff(have, want []byte) string { - var i = 0 - for ; i < len(have) && i < len(want); i++ { - if want[i] != have[i] { - break - } - } - var end = i + 40 - var start = i - 50 - if start < 0 { - start = 0 - } - var h, w string - if end < len(have) { - h = string(have[start:end]) - } else { - h = string(have[start:]) - } - if end < len(want) { - w = string(want[start:end]) - } else { - w = string(want[start:]) - } - return fmt.Sprintf("have vs want:\n%q\n%q\n", h, w) -} - -func TestFileOut(t *testing.T) { - t.Parallel() - var ( - have, want []byte - err error - path = fmt.Sprintf("%s/test_file_out-%d", os.TempDir(), rand.Int63()) - ) - t.Cleanup(func() { os.Remove(path) }) - if want, err = runSelf(fmt.Sprintf("--log.file=%s", path), "logtest"); err != nil { - t.Fatal(err) - } - if have, err = os.ReadFile(path); err != nil { - t.Fatal(err) - } - if !bytes.Equal(have, want) { - // show an intelligent diff - t.Logf(nicediff(have, want)) - t.Errorf("file content wrong") - } -} - -func TestRotatingFileOut(t *testing.T) { - t.Parallel() - var ( - have, want []byte - err error - path = fmt.Sprintf("%s/test_file_out-%d", os.TempDir(), rand.Int63()) - ) - t.Cleanup(func() { os.Remove(path) }) - if want, err = runSelf(fmt.Sprintf("--log.file=%s", path), "--log.rotate", "logtest"); err != nil { - t.Fatal(err) - } - if have, err = os.ReadFile(path); err != nil { - t.Fatal(err) - } - if !bytes.Equal(have, want) { - // show an intelligent diff - t.Logf(nicediff(have, want)) - t.Errorf("file content wrong") - } -} diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 9763794f7e..260244cf7f 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -27,6 +27,8 @@ import ( "github.com/ethereum/go-ethereum/params" + "go.uber.org/automaxprocs/maxprocs" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" @@ -40,7 +42,6 @@ import ( "github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" - "go.uber.org/automaxprocs/maxprocs" // Force-load the tracer engines to trigger registration _ "github.com/ethereum/go-ethereum/eth/tracers/js" @@ -181,6 +182,7 @@ var ( utils.LogDebugFlag, utils.LogBacktraceAtFlag, utils.BlobExtraReserveFlag, + utils.DownloaderDisableSyncFlag, }, utils.NetworkFlags, utils.DatabaseFlags) rpcFlags = []cli.Flag{ diff --git a/cmd/geth/pruneblock_test.go b/cmd/geth/pruneblock_test.go deleted file mode 100644 index b2a93f65aa..0000000000 --- a/cmd/geth/pruneblock_test.go +++ /dev/null @@ -1,253 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "bytes" - "encoding/hex" - "fmt" - "math/big" - "os" - "path/filepath" - "testing" - "time" - - "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state/pruner" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/leveldb" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/triedb" -) - -var ( - canonicalSeed = 1 - blockPruneBackUpBlockNumber = 128 - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - balance = big.NewInt(100000000000000000) - gspec = &core.Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: balance}}, BaseFee: big.NewInt(params.InitialBaseFee)} - signer = types.LatestSigner(gspec.Config) - config = &core.CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, // Disable snapshot - TriesInMemory: 128, - } - engine = ethash.NewFullFaker() -) - -func TestOfflineBlockPrune(t *testing.T) { - //Corner case for 0 remain in ancinetStore. - testOfflineBlockPruneWithAmountReserved(t, 0) - //General case. - testOfflineBlockPruneWithAmountReserved(t, 100) -} - -func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, ancient string, namespace string, readonly, disableFreeze, isLastOffset, pruneAncientData bool) (ethdb.Database, error) { - kvdb, err := leveldb.New(file, cache, handles, namespace, readonly) - if err != nil { - return nil, err - } - frdb, err := rawdb.NewDatabaseWithFreezer(kvdb, ancient, namespace, readonly, disableFreeze, isLastOffset, pruneAncientData, false) - if err != nil { - kvdb.Close() - return nil, err - } - return frdb, nil -} - -func testOfflineBlockPruneWithAmountReserved(t *testing.T, amountReserved uint64) { - datadir := t.TempDir() - - chaindbPath := filepath.Join(datadir, "chaindata") - oldAncientPath := filepath.Join(chaindbPath, "ancient") - newAncientPath := filepath.Join(chaindbPath, "ancient_back") - - db, blocks, blockList, receiptsList, externTdList, startBlockNumber, _ := BlockchainCreator(t, chaindbPath, oldAncientPath, amountReserved) - node, _ := startEthService(t, gspec, blocks, chaindbPath) - defer node.Close() - - //Initialize a block pruner for pruning, only remain amountReserved blocks backward. - testBlockPruner := pruner.NewBlockPruner(db, node, oldAncientPath, newAncientPath, amountReserved) - if err := testBlockPruner.BlockPruneBackUp(chaindbPath, 512, utils.MakeDatabaseHandles(0), "", false, false); err != nil { - t.Fatalf("Failed to back up block: %v", err) - } - - dbBack, err := NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, newAncientPath, "", false, true, false, false) - if err != nil { - t.Fatalf("failed to create database with ancient backend") - } - defer dbBack.Close() - - //check against if the backup data matched original one - for blockNumber := startBlockNumber; blockNumber < startBlockNumber+amountReserved; blockNumber++ { - blockHash := rawdb.ReadCanonicalHash(dbBack, blockNumber) - block := rawdb.ReadBlock(dbBack, blockHash, blockNumber) - - if block.Hash() != blockHash { - t.Fatalf("block data did not match between oldDb and backupDb") - } - if blockList[blockNumber-startBlockNumber].Hash() != blockHash { - t.Fatalf("block data did not match between oldDb and backupDb") - } - - receipts := rawdb.ReadRawReceipts(dbBack, blockHash, blockNumber) - if err := checkReceiptsRLP(receipts, receiptsList[blockNumber-startBlockNumber]); err != nil { - t.Fatalf("receipts did not match between oldDb and backupDb") - } - // // Calculate the total difficulty of the block - td := rawdb.ReadTd(dbBack, blockHash, blockNumber) - if td == nil { - t.Fatalf("Failed to ReadTd: %v", consensus.ErrUnknownAncestor) - } - if td.Cmp(externTdList[blockNumber-startBlockNumber]) != 0 { - t.Fatalf("externTd did not match between oldDb and backupDb") - } - } - - //check if ancientDb freezer replaced successfully - testBlockPruner.AncientDbReplacer() - if _, err := os.Stat(newAncientPath); err != nil { - if !os.IsNotExist(err) { - t.Fatalf("ancientDb replaced unsuccessfully") - } - } - if _, err := os.Stat(oldAncientPath); err != nil { - t.Fatalf("ancientDb replaced unsuccessfully") - } -} - -func BlockchainCreator(t *testing.T, chaindbPath, AncientPath string, blockRemain uint64) (ethdb.Database, []*types.Block, []*types.Block, []types.Receipts, []*big.Int, uint64, *core.BlockChain) { - //create a database with ancient freezer - db, err := NewLevelDBDatabaseWithFreezer(chaindbPath, 0, 0, AncientPath, "", false, false, false, false) - if err != nil { - t.Fatalf("failed to create database with ancient backend") - } - defer db.Close() - - triedb := triedb.NewDatabase(db, nil) - defer triedb.Close() - - genesis := gspec.MustCommit(db, triedb) - // Initialize a fresh chain with only a genesis block - blockchain, err := core.NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to create chain: %v", err) - } - - // Make chain starting from genesis - blocks, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 500, func(i int, block *core.BlockGen) { - block.SetCoinbase(common.Address{0: byte(canonicalSeed), 19: byte(i)}) - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key) - if err != nil { - panic(err) - } - block.AddTx(tx) - block.SetDifficulty(big.NewInt(1000000)) - }) - if _, err := blockchain.InsertChain(blocks); err != nil { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - - // Force run a freeze cycle - type freezer interface { - Freeze(threshold uint64) error - Ancients() (uint64, error) - } - db.(freezer).Freeze(10) - - frozen, err := db.Ancients() - //make sure there're frozen items - if err != nil || frozen == 0 { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - if frozen < blockRemain { - t.Fatalf("block amount is not enough for pruning: %v", err) - } - - oldOffSet := rawdb.ReadOffSetOfCurrentAncientFreezer(db) - // Get the actual start block number. - startBlockNumber := frozen - blockRemain + oldOffSet - // Initialize the slice to buffer the block data left. - blockList := make([]*types.Block, 0, blockPruneBackUpBlockNumber) - receiptsList := make([]types.Receipts, 0, blockPruneBackUpBlockNumber) - externTdList := make([]*big.Int, 0, blockPruneBackUpBlockNumber) - // All ancient data within the most recent 128 blocks write into memory buffer for future new ancient_back directory usage. - for blockNumber := startBlockNumber; blockNumber < frozen+oldOffSet; blockNumber++ { - blockHash := rawdb.ReadCanonicalHash(db, blockNumber) - block := rawdb.ReadBlock(db, blockHash, blockNumber) - blockList = append(blockList, block) - receipts := rawdb.ReadRawReceipts(db, blockHash, blockNumber) - receiptsList = append(receiptsList, receipts) - // Calculate the total difficulty of the block - td := rawdb.ReadTd(db, blockHash, blockNumber) - if td == nil { - t.Fatalf("Failed to ReadTd: %v", consensus.ErrUnknownAncestor) - } - externTdList = append(externTdList, td) - } - - return db, blocks, blockList, receiptsList, externTdList, startBlockNumber, blockchain -} - -func checkReceiptsRLP(have, want types.Receipts) error { - if len(have) != len(want) { - return fmt.Errorf("receipts sizes mismatch: have %d, want %d", len(have), len(want)) - } - for i := 0; i < len(want); i++ { - rlpHave, err := rlp.EncodeToBytes(have[i]) - if err != nil { - return err - } - rlpWant, err := rlp.EncodeToBytes(want[i]) - if err != nil { - return err - } - if !bytes.Equal(rlpHave, rlpWant) { - return fmt.Errorf("receipt #%d: receipt mismatch: have %s, want %s", i, hex.EncodeToString(rlpHave), hex.EncodeToString(rlpWant)) - } - } - return nil -} - -// startEthService creates a full node instance for testing. -func startEthService(t *testing.T, genesis *core.Genesis, blocks []*types.Block, chaindbPath string) (*node.Node, *eth.Ethereum) { - t.Helper() - n, err := node.New(&node.Config{DataDir: chaindbPath}) - if err != nil { - t.Fatal("can't create node:", err) - } - - if err := n.Start(); err != nil { - t.Fatal("can't start node:", err) - } - - return n, nil -} diff --git a/cmd/geth/run_test.go b/cmd/geth/run_test.go deleted file mode 100644 index 1d32880325..0000000000 --- a/cmd/geth/run_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - "github.com/ethereum/go-ethereum/internal/cmdtest" - "github.com/ethereum/go-ethereum/internal/reexec" - "github.com/ethereum/go-ethereum/rpc" -) - -type testgeth struct { - *cmdtest.TestCmd - - // template variables for expect - Datadir string - Etherbase string -} - -func init() { - // Run the app if we've been exec'd as "geth-test" in runGeth. - reexec.Register("geth-test", func() { - if err := app.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - os.Exit(0) - }) -} - -func TestMain(m *testing.M) { - // check if we have been reexec'd - if reexec.Init() { - return - } - os.Exit(m.Run()) -} - -func initGeth(t *testing.T) string { - args := []string{"--networkid=42", "init", "./testdata/clique.json"} - t.Logf("Initializing geth: %v ", args) - g := runGeth(t, args...) - datadir := g.Datadir - g.WaitExit() - return datadir -} - -// spawns geth with the given command line args. If the args don't set --datadir, the -// child g gets a temporary data directory. -func runGeth(t *testing.T, args ...string) *testgeth { - tt := &testgeth{} - tt.TestCmd = cmdtest.NewTestCmd(t, tt) - for i, arg := range args { - switch arg { - case "--datadir": - if i < len(args)-1 { - tt.Datadir = args[i+1] - } - case "--miner.etherbase": - if i < len(args)-1 { - tt.Etherbase = args[i+1] - } - } - } - if tt.Datadir == "" { - // The temporary datadir will be removed automatically if something fails below. - tt.Datadir = t.TempDir() - args = append([]string{"--datadir", tt.Datadir}, args...) - } - - // Boot "geth". This actually runs the test binary but the TestMain - // function will prevent any tests from running. - tt.Run("geth-test", args...) - - return tt -} - -// waitForEndpoint attempts to connect to an RPC endpoint until it succeeds. -func waitForEndpoint(t *testing.T, endpoint string, timeout time.Duration) { - probe := func() bool { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - c, err := rpc.DialContext(ctx, endpoint) - if c != nil { - _, err = c.SupportedModules() - c.Close() - } - return err == nil - } - - start := time.Now() - for { - if probe() { - return - } - if time.Since(start) > timeout { - t.Fatal("endpoint", endpoint, "did not open within", timeout) - } - time.Sleep(200 * time.Millisecond) - } -} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 8707840692..f0b063c7f5 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -351,7 +351,7 @@ var ( } SyncModeFlag = &flags.TextMarshalerFlag{ Name: "syncmode", - Usage: `Blockchain sync mode ("snap" or "full")`, + Usage: `Blockchain sync mode ("nosync")`, Value: &defaultSyncMode, Category: flags.StateCategory, } @@ -390,6 +390,11 @@ var ( Value: ethconfig.Defaults.TransactionHistory, Category: flags.StateCategory, } + // Downloader settings + DownloaderDisableSyncFlag = &cli.BoolFlag{ + Name: "downloader.nosync", + Usage: "Disable synchronization with other peers", + } // Transaction pool settings TxPoolLocalsFlag = &cli.StringFlag{ Name: "txpool.locals", diff --git a/cmd/utils/history_test.go b/cmd/utils/history_test.go deleted file mode 100644 index ba206e9823..0000000000 --- a/cmd/utils/history_test.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package utils - -import ( - "bytes" - "crypto/sha256" - "io" - "math/big" - "os" - "path" - "strings" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/internal/era" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/triedb" -) - -var ( - count uint64 = 128 - step uint64 = 16 -) - -func TestHistoryImportAndExport(t *testing.T) { - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - genesis = &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{address: {Balance: big.NewInt(1000000000000000000)}}, - } - signer = types.LatestSigner(genesis.Config) - ) - - // Generate chain. - db, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), int(count), func(i int, g *core.BlockGen) { - if i == 0 { - return - } - tx, err := types.SignNewTx(key, signer, &types.DynamicFeeTx{ - ChainID: genesis.Config.ChainID, - Nonce: uint64(i - 1), - GasTipCap: common.Big0, - GasFeeCap: g.PrevBlock(0).BaseFee(), - Gas: 50000, - To: &common.Address{0xaa}, - Value: big.NewInt(int64(i)), - Data: nil, - AccessList: nil, - }) - if err != nil { - t.Fatalf("error creating tx: %v", err) - } - g.AddTx(tx) - }) - - // Initialize BlockChain. - chain, err := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("unable to initialize chain: %v", err) - } - if _, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("error insterting chain: %v", err) - } - - // Make temp directory for era files. - dir, err := os.MkdirTemp("", "history-export-test") - if err != nil { - t.Fatalf("error creating temp test directory: %v", err) - } - defer os.RemoveAll(dir) - - // Export history to temp directory. - if err := ExportHistory(chain, dir, 0, count, step); err != nil { - t.Fatalf("error exporting history: %v", err) - } - - // Read checksums. - b, err := os.ReadFile(path.Join(dir, "checksums.txt")) - if err != nil { - t.Fatalf("failed to read checksums: %v", err) - } - checksums := strings.Split(string(b), "\n") - - // Verify each Era. - entries, _ := era.ReadDir(dir, "mainnet") - for i, filename := range entries { - func() { - f, err := os.Open(path.Join(dir, filename)) - if err != nil { - t.Fatalf("error opening era file: %v", err) - } - var ( - h = sha256.New() - buf = bytes.NewBuffer(nil) - ) - if _, err := io.Copy(h, f); err != nil { - t.Fatalf("unable to recalculate checksum: %v", err) - } - if got, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; got != want { - t.Fatalf("checksum %d does not match: got %s, want %s", i, got, want) - } - e, err := era.From(f) - if err != nil { - t.Fatalf("error opening era: %v", err) - } - defer e.Close() - it, err := era.NewIterator(e) - if err != nil { - t.Fatalf("error making era reader: %v", err) - } - for j := 0; it.Next(); j++ { - n := i*int(step) + j - if it.Error() != nil { - t.Fatalf("error reading block entry %d: %v", n, it.Error()) - } - block, receipts, err := it.BlockAndReceipts() - if err != nil { - t.Fatalf("error reading block entry %d: %v", n, err) - } - want := chain.GetBlockByNumber(uint64(n)) - if want, got := uint64(n), block.NumberU64(); want != got { - t.Fatalf("blocks out of order: want %d, got %d", want, got) - } - if want.Hash() != block.Hash() { - t.Fatalf("block hash mismatch %d: want %s, got %s", n, want.Hash().Hex(), block.Hash().Hex()) - } - if got := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); got != want.TxHash() { - t.Fatalf("tx hash %d mismatch: want %s, got %s", n, want.TxHash(), got) - } - if got := types.CalcUncleHash(block.Uncles()); got != want.UncleHash() { - t.Fatalf("uncle hash %d mismatch: want %s, got %s", n, want.UncleHash(), got) - } - if got := types.DeriveSha(receipts, trie.NewStackTrie(nil)); got != want.ReceiptHash() { - t.Fatalf("receipt root %d mismatch: want %s, got %s", n, want.ReceiptHash(), got) - } - } - }() - } - - // Now import Era. - freezer := t.TempDir() - db2, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false) - if err != nil { - panic(err) - } - t.Cleanup(func() { - db2.Close() - }) - - genesis.MustCommit(db2, triedb.NewDatabase(db, triedb.HashDefaults)) - imported, err := core.NewBlockChain(db2, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("unable to initialize chain: %v", err) - } - if err := ImportHistory(imported, db2, dir, "mainnet"); err != nil { - t.Fatalf("failed to import chain: %v", err) - } - if have, want := imported.CurrentHeader(), chain.CurrentHeader(); have.Hash() != want.Hash() { - t.Fatalf("imported chain does not match expected, have (%d, %s) want (%d, %s)", have.Number, have.Hash(), want.Number, want.Hash()) - } -} diff --git a/consensus/clique/clique_test.go b/consensus/clique/clique_test.go deleted file mode 100644 index f479ac6169..0000000000 --- a/consensus/clique/clique_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package clique - -import ( - "fmt" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" -) - -// This test case is a repro of an annoying bug that took us forever to catch. -// In Clique PoA networks (Görli, etc), consecutive blocks might have -// the same state root (no block subsidy, empty block). If a node crashes, the -// chain ends up losing the recent state and needs to regenerate it from blocks -// already in the database. The bug was that processing the block *prior* to an -// empty one **also completes** the empty one, ending up in a known-block error. -func TestReimportMirroredState(t *testing.T) { - // Initialize a Clique chain with a single signer - var ( - db = rawdb.NewMemoryDatabase() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr = crypto.PubkeyToAddress(key.PublicKey) - engine = New(params.AllCliqueProtocolChanges.Clique, db) - signer = new(types.HomesteadSigner) - ) - genspec := &core.Genesis{ - Config: params.AllCliqueProtocolChanges, - ExtraData: make([]byte, extraVanity+common.AddressLength+extraSeal), - Alloc: map[common.Address]types.Account{ - addr: {Balance: big.NewInt(10000000000000000)}, - }, - BaseFee: big.NewInt(params.InitialBaseFee), - } - copy(genspec.ExtraData[extraVanity:], addr[:]) - - // Generate a batch of blocks, each properly signed - chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genspec, nil, engine, vm.Config{}, nil, nil) - defer chain.Stop() - - _, blocks, _ := core.GenerateChainWithGenesis(genspec, engine, 3, func(i int, block *core.BlockGen) { - // The chain maker doesn't have access to a chain, so the difficulty will be - // lets unset (nil). Set it here to the correct value. - block.SetDifficulty(diffInTurn) - - // We want to simulate an empty middle block, having the same state as the - // first one. The last is needs a state change again to force a reorg. - if i != 1 { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr), common.Address{0x00}, new(big.Int), params.TxGas, block.BaseFee(), nil), signer, key) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain, tx) - } - }) - for i, block := range blocks { - header := block.Header() - if i > 0 { - header.ParentHash = blocks[i-1].Hash() - } - header.Extra = make([]byte, extraVanity+extraSeal) - header.Difficulty = diffInTurn - - sig, _ := crypto.Sign(SealHash(header).Bytes(), key) - copy(header.Extra[len(header.Extra)-extraSeal:], sig) - blocks[i] = block.WithSeal(header) - txHash := common.Hash{} - if block.Transactions().Len() > 0 { - txHash = block.Transactions()[0].Hash() - } - fmt.Println("check", block.Number(), block.Hash(), block.Root(), fmt.Sprintf("%+v", txHash)) - } - // Insert the first two blocks and make sure the chain is valid - db = rawdb.NewMemoryDatabase() - chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) - defer chain.Stop() - - if _, err := chain.InsertChain(blocks[:2]); err != nil { - t.Fatalf("failed to insert initial blocks: %v", err) - } - if head := chain.CurrentBlock().Number.Uint64(); head != 2 { - t.Fatalf("chain head mismatch: have %d, want %d", head, 2) - } - - // Simulate a crash by creating a new chain on top of the database, without - // flushing the dirty states out. Insert the last block, triggering a sidechain - // reimport. - chain, _ = core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) - defer chain.Stop() - - if _, err := chain.InsertChain(blocks[2:]); err != nil { - t.Fatalf("failed to insert final block: %v", err) - } - if head := chain.CurrentBlock().Number.Uint64(); head != 3 { - t.Fatalf("chain head mismatch: have %d, want %d", head, 3) - } -} - -func TestSealHash(t *testing.T) { - have := SealHash(&types.Header{ - Difficulty: new(big.Int), - Number: new(big.Int), - Extra: make([]byte, 32+65), - BaseFee: new(big.Int), - }) - want := common.HexToHash("0xbd3d1fa43fbc4c5bfcc91b179ec92e2861df3654de60468beb908ff805359e8f") - if have != want { - t.Errorf("have %x, want %x", have, want) - } -} diff --git a/consensus/clique/snapshot_test.go b/consensus/clique/snapshot_test.go deleted file mode 100644 index 3d72ebe70c..0000000000 --- a/consensus/clique/snapshot_test.go +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package clique - -import ( - "bytes" - "crypto/ecdsa" - "fmt" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "golang.org/x/exp/slices" -) - -// testerAccountPool is a pool to maintain currently active tester accounts, -// mapped from textual names used in the tests below to actual Ethereum private -// keys capable of signing transactions. -type testerAccountPool struct { - accounts map[string]*ecdsa.PrivateKey -} - -func newTesterAccountPool() *testerAccountPool { - return &testerAccountPool{ - accounts: make(map[string]*ecdsa.PrivateKey), - } -} - -// checkpoint creates a Clique checkpoint signer section from the provided list -// of authorized signers and embeds it into the provided header. -func (ap *testerAccountPool) checkpoint(header *types.Header, signers []string) { - auths := make([]common.Address, len(signers)) - for i, signer := range signers { - auths[i] = ap.address(signer) - } - slices.SortFunc(auths, common.Address.Cmp) - for i, auth := range auths { - copy(header.Extra[extraVanity+i*common.AddressLength:], auth.Bytes()) - } -} - -// address retrieves the Ethereum address of a tester account by label, creating -// a new account if no previous one exists yet. -func (ap *testerAccountPool) address(account string) common.Address { - // Return the zero account for non-addresses - if account == "" { - return common.Address{} - } - // Ensure we have a persistent key for the account - if ap.accounts[account] == nil { - ap.accounts[account], _ = crypto.GenerateKey() - } - // Resolve and return the Ethereum address - return crypto.PubkeyToAddress(ap.accounts[account].PublicKey) -} - -// sign calculates a Clique digital signature for the given block and embeds it -// back into the header. -func (ap *testerAccountPool) sign(header *types.Header, signer string) { - // Ensure we have a persistent key for the signer - if ap.accounts[signer] == nil { - ap.accounts[signer], _ = crypto.GenerateKey() - } - // Sign the header and embed the signature in extra data - sig, _ := crypto.Sign(SealHash(header).Bytes(), ap.accounts[signer]) - copy(header.Extra[len(header.Extra)-extraSeal:], sig) -} - -// testerVote represents a single block signed by a particular account, where -// the account may or may not have cast a Clique vote. -type testerVote struct { - signer string - voted string - auth bool - checkpoint []string - newbatch bool -} - -type cliqueTest struct { - epoch uint64 - signers []string - votes []testerVote - results []string - failure error -} - -// Tests that Clique signer voting is evaluated correctly for various simple and -// complex scenarios, as well as that a few special corner cases fail correctly. -func TestClique(t *testing.T) { - // Define the various voting scenarios to test - tests := []cliqueTest{ - { - // Single signer, no votes cast - signers: []string{"A"}, - votes: []testerVote{{signer: "A"}}, - results: []string{"A"}, - }, { - // Single signer, voting to add two others (only accept first, second needs 2 votes) - signers: []string{"A"}, - votes: []testerVote{ - {signer: "A", voted: "B", auth: true}, - {signer: "B"}, - {signer: "A", voted: "C", auth: true}, - }, - results: []string{"A", "B"}, - }, { - // Two signers, voting to add three others (only accept first two, third needs 3 votes already) - signers: []string{"A", "B"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: true}, - {signer: "B", voted: "C", auth: true}, - {signer: "A", voted: "D", auth: true}, - {signer: "B", voted: "D", auth: true}, - {signer: "C"}, - {signer: "A", voted: "E", auth: true}, - {signer: "B", voted: "E", auth: true}, - }, - results: []string{"A", "B", "C", "D"}, - }, { - // Single signer, dropping itself (weird, but one less cornercase by explicitly allowing this) - signers: []string{"A"}, - votes: []testerVote{ - {signer: "A", voted: "A", auth: false}, - }, - results: []string{}, - }, { - // Two signers, actually needing mutual consent to drop either of them (not fulfilled) - signers: []string{"A", "B"}, - votes: []testerVote{ - {signer: "A", voted: "B", auth: false}, - }, - results: []string{"A", "B"}, - }, { - // Two signers, actually needing mutual consent to drop either of them (fulfilled) - signers: []string{"A", "B"}, - votes: []testerVote{ - {signer: "A", voted: "B", auth: false}, - {signer: "B", voted: "B", auth: false}, - }, - results: []string{"A"}, - }, { - // Three signers, two of them deciding to drop the third - signers: []string{"A", "B", "C"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: false}, - {signer: "B", voted: "C", auth: false}, - }, - results: []string{"A", "B"}, - }, { - // Four signers, consensus of two not being enough to drop anyone - signers: []string{"A", "B", "C", "D"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: false}, - {signer: "B", voted: "C", auth: false}, - }, - results: []string{"A", "B", "C", "D"}, - }, { - // Four signers, consensus of three already being enough to drop someone - signers: []string{"A", "B", "C", "D"}, - votes: []testerVote{ - {signer: "A", voted: "D", auth: false}, - {signer: "B", voted: "D", auth: false}, - {signer: "C", voted: "D", auth: false}, - }, - results: []string{"A", "B", "C"}, - }, { - // Authorizations are counted once per signer per target - signers: []string{"A", "B"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: true}, - {signer: "B"}, - {signer: "A", voted: "C", auth: true}, - {signer: "B"}, - {signer: "A", voted: "C", auth: true}, - }, - results: []string{"A", "B"}, - }, { - // Authorizing multiple accounts concurrently is permitted - signers: []string{"A", "B"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: true}, - {signer: "B"}, - {signer: "A", voted: "D", auth: true}, - {signer: "B"}, - {signer: "A"}, - {signer: "B", voted: "D", auth: true}, - {signer: "A"}, - {signer: "B", voted: "C", auth: true}, - }, - results: []string{"A", "B", "C", "D"}, - }, { - // Deauthorizations are counted once per signer per target - signers: []string{"A", "B"}, - votes: []testerVote{ - {signer: "A", voted: "B", auth: false}, - {signer: "B"}, - {signer: "A", voted: "B", auth: false}, - {signer: "B"}, - {signer: "A", voted: "B", auth: false}, - }, - results: []string{"A", "B"}, - }, { - // Deauthorizing multiple accounts concurrently is permitted - signers: []string{"A", "B", "C", "D"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: false}, - {signer: "B"}, - {signer: "C"}, - {signer: "A", voted: "D", auth: false}, - {signer: "B"}, - {signer: "C"}, - {signer: "A"}, - {signer: "B", voted: "D", auth: false}, - {signer: "C", voted: "D", auth: false}, - {signer: "A"}, - {signer: "B", voted: "C", auth: false}, - }, - results: []string{"A", "B"}, - }, { - // Votes from deauthorized signers are discarded immediately (deauth votes) - signers: []string{"A", "B", "C"}, - votes: []testerVote{ - {signer: "C", voted: "B", auth: false}, - {signer: "A", voted: "C", auth: false}, - {signer: "B", voted: "C", auth: false}, - {signer: "A", voted: "B", auth: false}, - }, - results: []string{"A", "B"}, - }, { - // Votes from deauthorized signers are discarded immediately (auth votes) - signers: []string{"A", "B", "C"}, - votes: []testerVote{ - {signer: "C", voted: "D", auth: true}, - {signer: "A", voted: "C", auth: false}, - {signer: "B", voted: "C", auth: false}, - {signer: "A", voted: "D", auth: true}, - }, - results: []string{"A", "B"}, - }, { - // Cascading changes are not allowed, only the account being voted on may change - signers: []string{"A", "B", "C", "D"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: false}, - {signer: "B"}, - {signer: "C"}, - {signer: "A", voted: "D", auth: false}, - {signer: "B", voted: "C", auth: false}, - {signer: "C"}, - {signer: "A"}, - {signer: "B", voted: "D", auth: false}, - {signer: "C", voted: "D", auth: false}, - }, - results: []string{"A", "B", "C"}, - }, { - // Changes reaching consensus out of bounds (via a deauth) execute on touch - signers: []string{"A", "B", "C", "D"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: false}, - {signer: "B"}, - {signer: "C"}, - {signer: "A", voted: "D", auth: false}, - {signer: "B", voted: "C", auth: false}, - {signer: "C"}, - {signer: "A"}, - {signer: "B", voted: "D", auth: false}, - {signer: "C", voted: "D", auth: false}, - {signer: "A"}, - {signer: "C", voted: "C", auth: true}, - }, - results: []string{"A", "B"}, - }, { - // Changes reaching consensus out of bounds (via a deauth) may go out of consensus on first touch - signers: []string{"A", "B", "C", "D"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: false}, - {signer: "B"}, - {signer: "C"}, - {signer: "A", voted: "D", auth: false}, - {signer: "B", voted: "C", auth: false}, - {signer: "C"}, - {signer: "A"}, - {signer: "B", voted: "D", auth: false}, - {signer: "C", voted: "D", auth: false}, - {signer: "A"}, - {signer: "B", voted: "C", auth: true}, - }, - results: []string{"A", "B", "C"}, - }, { - // Ensure that pending votes don't survive authorization status changes. This - // corner case can only appear if a signer is quickly added, removed and then - // re-added (or the inverse), while one of the original voters dropped. If a - // past vote is left cached in the system somewhere, this will interfere with - // the final signer outcome. - signers: []string{"A", "B", "C", "D", "E"}, - votes: []testerVote{ - {signer: "A", voted: "F", auth: true}, // Authorize F, 3 votes needed - {signer: "B", voted: "F", auth: true}, - {signer: "C", voted: "F", auth: true}, - {signer: "D", voted: "F", auth: false}, // Deauthorize F, 4 votes needed (leave A's previous vote "unchanged") - {signer: "E", voted: "F", auth: false}, - {signer: "B", voted: "F", auth: false}, - {signer: "C", voted: "F", auth: false}, - {signer: "D", voted: "F", auth: true}, // Almost authorize F, 2/3 votes needed - {signer: "E", voted: "F", auth: true}, - {signer: "B", voted: "A", auth: false}, // Deauthorize A, 3 votes needed - {signer: "C", voted: "A", auth: false}, - {signer: "D", voted: "A", auth: false}, - {signer: "B", voted: "F", auth: true}, // Finish authorizing F, 3/3 votes needed - }, - results: []string{"B", "C", "D", "E", "F"}, - }, { - // Epoch transitions reset all votes to allow chain checkpointing - epoch: 3, - signers: []string{"A", "B"}, - votes: []testerVote{ - {signer: "A", voted: "C", auth: true}, - {signer: "B"}, - {signer: "A", checkpoint: []string{"A", "B"}}, - {signer: "B", voted: "C", auth: true}, - }, - results: []string{"A", "B"}, - }, { - // An unauthorized signer should not be able to sign blocks - signers: []string{"A"}, - votes: []testerVote{ - {signer: "B"}, - }, - failure: errUnauthorizedSigner, - }, { - // An authorized signer that signed recently should not be able to sign again - signers: []string{"A", "B"}, - votes: []testerVote{ - {signer: "A"}, - {signer: "A"}, - }, - failure: errRecentlySigned, - }, { - // Recent signatures should not reset on checkpoint blocks imported in a batch - epoch: 3, - signers: []string{"A", "B", "C"}, - votes: []testerVote{ - {signer: "A"}, - {signer: "B"}, - {signer: "A", checkpoint: []string{"A", "B", "C"}}, - {signer: "A"}, - }, - failure: errRecentlySigned, - }, { - // Recent signatures should not reset on checkpoint blocks imported in a new - // batch (https://github.com/ethereum/go-ethereum/issues/17593). Whilst this - // seems overly specific and weird, it was a Rinkeby consensus split. - epoch: 3, - signers: []string{"A", "B", "C"}, - votes: []testerVote{ - {signer: "A"}, - {signer: "B"}, - {signer: "A", checkpoint: []string{"A", "B", "C"}}, - {signer: "A", newbatch: true}, - }, - failure: errRecentlySigned, - }, - } - - // Run through the scenarios and test them - for i, tt := range tests { - t.Run(fmt.Sprint(i), tt.run) - } -} - -func (tt *cliqueTest) run(t *testing.T) { - // Create the account pool and generate the initial set of signers - accounts := newTesterAccountPool() - - signers := make([]common.Address, len(tt.signers)) - for j, signer := range tt.signers { - signers[j] = accounts.address(signer) - } - for j := 0; j < len(signers); j++ { - for k := j + 1; k < len(signers); k++ { - if bytes.Compare(signers[j][:], signers[k][:]) > 0 { - signers[j], signers[k] = signers[k], signers[j] - } - } - } - // Create the genesis block with the initial set of signers - genesis := &core.Genesis{ - ExtraData: make([]byte, extraVanity+common.AddressLength*len(signers)+extraSeal), - BaseFee: big.NewInt(params.InitialBaseFee), - } - for j, signer := range signers { - copy(genesis.ExtraData[extraVanity+j*common.AddressLength:], signer[:]) - } - - // Assemble a chain of headers from the cast votes - config := *params.TestChainConfig - config.Clique = ¶ms.CliqueConfig{ - Period: 1, - Epoch: tt.epoch, - } - genesis.Config = &config - - engine := New(config.Clique, rawdb.NewMemoryDatabase()) - engine.fakeDiff = true - - _, blocks, _ := core.GenerateChainWithGenesis(genesis, engine, len(tt.votes), func(j int, gen *core.BlockGen) { - // Cast the vote contained in this block - gen.SetCoinbase(accounts.address(tt.votes[j].voted)) - if tt.votes[j].auth { - var nonce types.BlockNonce - copy(nonce[:], nonceAuthVote) - gen.SetNonce(nonce) - } - }) - // Iterate through the blocks and seal them individually - for j, block := range blocks { - // Get the header and prepare it for signing - header := block.Header() - if j > 0 { - header.ParentHash = blocks[j-1].Hash() - } - header.Extra = make([]byte, extraVanity+extraSeal) - if auths := tt.votes[j].checkpoint; auths != nil { - header.Extra = make([]byte, extraVanity+len(auths)*common.AddressLength+extraSeal) - accounts.checkpoint(header, auths) - } - header.Difficulty = diffInTurn // Ignored, we just need a valid number - - // Generate the signature, embed it into the header and the block - accounts.sign(header, tt.votes[j].signer) - blocks[j] = block.WithSeal(header) - } - // Split the blocks up into individual import batches (cornercase testing) - batches := [][]*types.Block{nil} - for j, block := range blocks { - if tt.votes[j].newbatch { - batches = append(batches, nil) - } - batches[len(batches)-1] = append(batches[len(batches)-1], block) - } - // Pass all the headers through clique and ensure tallying succeeds - chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create test chain: %v", err) - } - defer chain.Stop() - - for j := 0; j < len(batches)-1; j++ { - if k, err := chain.InsertChain(batches[j]); err != nil { - t.Fatalf("failed to import batch %d, block %d: %v", j, k, err) - } - } - if _, err = chain.InsertChain(batches[len(batches)-1]); err != tt.failure { - t.Errorf("failure mismatch: have %v, want %v", err, tt.failure) - } - if tt.failure != nil { - return - } - - // No failure was produced or requested, generate the final voting snapshot - head := blocks[len(blocks)-1] - - snap, err := engine.snapshot(chain, head.NumberU64(), head.Hash(), nil) - if err != nil { - t.Fatalf("failed to retrieve voting snapshot: %v", err) - } - // Verify the final list of signers against the expected ones - signers = make([]common.Address, len(tt.results)) - for j, signer := range tt.results { - signers[j] = accounts.address(signer) - } - for j := 0; j < len(signers); j++ { - for k := j + 1; k < len(signers); k++ { - if bytes.Compare(signers[j][:], signers[k][:]) > 0 { - signers[j], signers[k] = signers[k], signers[j] - } - } - } - result := snap.signers() - if len(result) != len(signers) { - t.Fatalf("signers mismatch: have %x, want %x", result, signers) - } - for j := 0; j < len(result); j++ { - if !bytes.Equal(result[j][:], signers[j][:]) { - t.Fatalf("signer %d: signer mismatch: have %x, want %x", j, result[j], signers[j]) - } - } -} diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 69b82d408c..ea7bfcf210 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -14,11 +14,12 @@ import ( "sync" "time" + "golang.org/x/crypto/sha3" + lru "github.com/hashicorp/golang-lru" "github.com/holiman/uint256" "github.com/prysmaticlabs/prysm/v5/crypto/bls" "github.com/willf/bitset" - "golang.org/x/crypto/sha3" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" diff --git a/console/bridge_test.go b/console/bridge_test.go deleted file mode 100644 index e57e294fc5..0000000000 --- a/console/bridge_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package console - -import ( - "testing" - - "github.com/dop251/goja" - "github.com/ethereum/go-ethereum/internal/jsre" -) - -// TestUndefinedAsParam ensures that personal functions can receive -// `undefined` as a parameter. -func TestUndefinedAsParam(t *testing.T) { - b := bridge{} - call := jsre.Call{} - call.Arguments = []goja.Value{goja.Undefined()} - - b.UnlockAccount(call) - b.Sign(call) - b.Sleep(call) -} - -// TestNullAsParam ensures that personal functions can receive -// `null` as a parameter. -func TestNullAsParam(t *testing.T) { - b := bridge{} - call := jsre.Call{} - call.Arguments = []goja.Value{goja.Null()} - - b.UnlockAccount(call) - b.Sign(call) - b.Sleep(call) -} diff --git a/console/console_test.go b/console/console_test.go deleted file mode 100644 index a13be6a99d..0000000000 --- a/console/console_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package console - -import ( - "bytes" - "errors" - "fmt" - "os" - "strings" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/console/prompt" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/internal/jsre" - "github.com/ethereum/go-ethereum/miner" - "github.com/ethereum/go-ethereum/node" -) - -const ( - testInstance = "console-tester" - testAddress = "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" -) - -// hookedPrompter implements UserPrompter to simulate use input via channels. -type hookedPrompter struct { - scheduler chan string -} - -func (p *hookedPrompter) PromptInput(prompt string) (string, error) { - // Send the prompt to the tester - select { - case p.scheduler <- prompt: - case <-time.After(time.Second): - return "", errors.New("prompt timeout") - } - // Retrieve the response and feed to the console - select { - case input := <-p.scheduler: - return input, nil - case <-time.After(time.Second): - return "", errors.New("input timeout") - } -} - -func (p *hookedPrompter) PromptPassword(prompt string) (string, error) { - return "", errors.New("not implemented") -} -func (p *hookedPrompter) PromptConfirm(prompt string) (bool, error) { - return false, errors.New("not implemented") -} -func (p *hookedPrompter) SetHistory(history []string) {} -func (p *hookedPrompter) AppendHistory(command string) {} -func (p *hookedPrompter) ClearHistory() {} -func (p *hookedPrompter) SetWordCompleter(completer prompt.WordCompleter) {} - -// tester is a console test environment for the console tests to operate on. -type tester struct { - workspace string - stack *node.Node - ethereum *eth.Ethereum - console *Console - input *hookedPrompter - output *bytes.Buffer -} - -// newTester creates a test environment based on which the console can operate. -// Please ensure you call Close() on the returned tester to avoid leaks. -func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester { - // Create a temporary storage for the node keys and initialize it - workspace := t.TempDir() - - // Create a networkless protocol stack and start an Ethereum service within - stack, err := node.New(&node.Config{DataDir: workspace, UseLightweightKDF: true, Name: testInstance}) - if err != nil { - t.Fatalf("failed to create node: %v", err) - } - ethConf := ðconfig.Config{ - Genesis: core.DeveloperGenesisBlock(11_500_000, nil), - Miner: miner.Config{ - Etherbase: common.HexToAddress(testAddress), - }, - } - if confOverride != nil { - confOverride(ethConf) - } - ethBackend, err := eth.New(stack, ethConf) - if err != nil { - t.Fatalf("failed to register Ethereum protocol: %v", err) - } - // Start the node and assemble the JavaScript console around it - if err = stack.Start(); err != nil { - t.Fatalf("failed to start test stack: %v", err) - } - client := stack.Attach() - t.Cleanup(func() { - client.Close() - }) - - prompter := &hookedPrompter{scheduler: make(chan string)} - printer := new(bytes.Buffer) - - console, err := New(Config{ - DataDir: stack.DataDir(), - DocRoot: "testdata", - Client: client, - Prompter: prompter, - Printer: printer, - Preload: []string{"preload.js"}, - }) - if err != nil { - t.Fatalf("failed to create JavaScript console: %v", err) - } - // Create the final tester and return - return &tester{ - workspace: workspace, - stack: stack, - ethereum: ethBackend, - console: console, - input: prompter, - output: printer, - } -} - -// Close cleans up any temporary data folders and held resources. -func (env *tester) Close(t *testing.T) { - if err := env.console.Stop(false); err != nil { - t.Errorf("failed to stop embedded console: %v", err) - } - if err := env.stack.Close(); err != nil { - t.Errorf("failed to tear down embedded node: %v", err) - } - os.RemoveAll(env.workspace) -} - -// Tests that the node lists the correct welcome message, notably that it contains -// the instance name, coinbase account, block number, data directory and supported -// console modules. -func TestWelcome(t *testing.T) { - tester := newTester(t, nil) - defer tester.Close(t) - - tester.console.Welcome() - - output := tester.output.String() - if want := "Welcome"; !strings.Contains(output, want) { - t.Fatalf("console output missing welcome message: have\n%s\nwant also %s", output, want) - } - if want := fmt.Sprintf("instance: %s", testInstance); !strings.Contains(output, want) { - t.Fatalf("console output missing instance: have\n%s\nwant also %s", output, want) - } - if want := fmt.Sprintf("coinbase: %s", testAddress); !strings.Contains(output, want) { - t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want) - } - if want := "at block: 0"; !strings.Contains(output, want) { - t.Fatalf("console output missing sync status: have\n%s\nwant also %s", output, want) - } - if want := fmt.Sprintf("datadir: %s", tester.workspace); !strings.Contains(output, want) { - t.Fatalf("console output missing coinbase: have\n%s\nwant also %s", output, want) - } -} - -// Tests that JavaScript statement evaluation works as intended. -func TestEvaluate(t *testing.T) { - tester := newTester(t, nil) - defer tester.Close(t) - - tester.console.Evaluate("2 + 2") - if output := tester.output.String(); !strings.Contains(output, "4") { - t.Fatalf("statement evaluation failed: have %s, want %s", output, "4") - } -} - -// Tests that the console can be used in interactive mode. -func TestInteractive(t *testing.T) { - // Create a tester and run an interactive console in the background - tester := newTester(t, nil) - defer tester.Close(t) - - go tester.console.Interactive() - - // Wait for a prompt and send a statement back - select { - case <-tester.input.scheduler: - case <-time.After(time.Second): - t.Fatalf("initial prompt timeout") - } - select { - case tester.input.scheduler <- "2+2": - case <-time.After(time.Second): - t.Fatalf("input feedback timeout") - } - // Wait for the second prompt and ensure first statement was evaluated - select { - case <-tester.input.scheduler: - case <-time.After(time.Second): - t.Fatalf("secondary prompt timeout") - } - if output := tester.output.String(); !strings.Contains(output, "4") { - t.Fatalf("statement evaluation failed: have %s, want %s", output, "4") - } -} - -// Tests that preloaded JavaScript files have been executed before user is given -// input. -func TestPreload(t *testing.T) { - tester := newTester(t, nil) - defer tester.Close(t) - - tester.console.Evaluate("preloaded") - if output := tester.output.String(); !strings.Contains(output, "some-preloaded-string") { - t.Fatalf("preloaded variable missing: have %s, want %s", output, "some-preloaded-string") - } -} - -// Tests that the JavaScript objects returned by statement executions are properly -// pretty printed instead of just displaying "[object]". -func TestPrettyPrint(t *testing.T) { - tester := newTester(t, nil) - defer tester.Close(t) - - tester.console.Evaluate("obj = {int: 1, string: 'two', list: [3, 3, 3], obj: {null: null, func: function(){}}}") - - // Define some specially formatted fields - var ( - one = jsre.NumberColor("1") - two = jsre.StringColor("\"two\"") - three = jsre.NumberColor("3") - null = jsre.SpecialColor("null") - fun = jsre.FunctionColor("function()") - ) - // Assemble the actual output we're after and verify - want := `{ - int: ` + one + `, - list: [` + three + `, ` + three + `, ` + three + `], - obj: { - null: ` + null + `, - func: ` + fun + ` - }, - string: ` + two + ` -} -` - if output := tester.output.String(); output != want { - t.Fatalf("pretty print mismatch: have %s, want %s", output, want) - } -} - -// Tests that the JavaScript exceptions are properly formatted and colored. -func TestPrettyError(t *testing.T) { - tester := newTester(t, nil) - defer tester.Close(t) - tester.console.Evaluate("throw 'hello'") - - want := jsre.ErrorColor("hello") + "\n\tat :1:1(1)\n\n" - if output := tester.output.String(); output != want { - t.Fatalf("pretty error mismatch: have %s, want %s", output, want) - } -} - -// Tests that tests if the number of indents for JS input is calculated correct. -func TestIndenting(t *testing.T) { - testCases := []struct { - input string - expectedIndentCount int - }{ - {`var a = 1;`, 0}, - {`"some string"`, 0}, - {`"some string with (parenthesis`, 0}, - {`"some string with newline - ("`, 0}, - {`function v(a,b) {}`, 0}, - {`function f(a,b) { var str = "asd("; };`, 0}, - {`function f(a) {`, 1}, - {`function f(a, function(b) {`, 2}, - {`function f(a, function(b) { - var str = "a)}"; - });`, 0}, - {`function f(a,b) { - var str = "a{b(" + a, ", " + b; - }`, 0}, - {`var str = "\"{"`, 0}, - {`var str = "'("`, 0}, - {`var str = "\\{"`, 0}, - {`var str = "\\\\{"`, 0}, - {`var str = 'a"{`, 0}, - {`var obj = {`, 1}, - {`var obj = { {a:1`, 2}, - {`var obj = { {a:1}`, 1}, - {`var obj = { {a:1}, b:2}`, 0}, - {`var obj = {}`, 0}, - {`var obj = { - a: 1, b: 2 - }`, 0}, - {`var test = }`, -1}, - {`var str = "a\""; var obj = {`, 1}, - } - - for i, tt := range testCases { - counted := countIndents(tt.input) - if counted != tt.expectedIndentCount { - t.Errorf("test %d: invalid indenting: have %d, want %d", i, counted, tt.expectedIndentCount) - } - } -} diff --git a/core/bench_test.go b/core/bench_test.go deleted file mode 100644 index 97713868a5..0000000000 --- a/core/bench_test.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "crypto/ecdsa" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/params" -) - -func BenchmarkInsertChain_empty_memdb(b *testing.B) { - benchInsertChain(b, false, nil) -} -func BenchmarkInsertChain_empty_diskdb(b *testing.B) { - benchInsertChain(b, true, nil) -} -func BenchmarkInsertChain_valueTx_memdb(b *testing.B) { - benchInsertChain(b, false, genValueTx(0)) -} -func BenchmarkInsertChain_valueTx_diskdb(b *testing.B) { - benchInsertChain(b, true, genValueTx(0)) -} -func BenchmarkInsertChain_valueTx_100kB_memdb(b *testing.B) { - benchInsertChain(b, false, genValueTx(100*1024)) -} -func BenchmarkInsertChain_valueTx_100kB_diskdb(b *testing.B) { - benchInsertChain(b, true, genValueTx(100*1024)) -} -func BenchmarkInsertChain_uncles_memdb(b *testing.B) { - benchInsertChain(b, false, genUncles) -} -func BenchmarkInsertChain_uncles_diskdb(b *testing.B) { - benchInsertChain(b, true, genUncles) -} -func BenchmarkInsertChain_ring200_memdb(b *testing.B) { - benchInsertChain(b, false, genTxRing(200)) -} -func BenchmarkInsertChain_ring200_diskdb(b *testing.B) { - benchInsertChain(b, true, genTxRing(200)) -} -func BenchmarkInsertChain_ring1000_memdb(b *testing.B) { - benchInsertChain(b, false, genTxRing(1000)) -} -func BenchmarkInsertChain_ring1000_diskdb(b *testing.B) { - benchInsertChain(b, true, genTxRing(1000)) -} - -var ( - // This is the content of the genesis block used by the benchmarks. - benchRootKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - benchRootAddr = crypto.PubkeyToAddress(benchRootKey.PublicKey) - benchRootFunds = math.BigPow(2, 200) -) - -// genValueTx returns a block generator that includes a single -// value-transfer transaction with n bytes of extra data in each -// block. -func genValueTx(nbytes int) func(int, *BlockGen) { - return func(i int, gen *BlockGen) { - toaddr := common.Address{} - data := make([]byte, nbytes) - gas, _ := IntrinsicGas(data, nil, false, false, false, false) - signer := gen.Signer() - gasPrice := big.NewInt(0) - if gen.header.BaseFee != nil { - gasPrice = gen.header.BaseFee - } - tx, _ := types.SignNewTx(benchRootKey, signer, &types.LegacyTx{ - Nonce: gen.TxNonce(benchRootAddr), - To: &toaddr, - Value: big.NewInt(1), - Gas: gas, - Data: data, - GasPrice: gasPrice, - }) - gen.AddTx(tx) - } -} - -var ( - ringKeys = make([]*ecdsa.PrivateKey, 1000) - ringAddrs = make([]common.Address, len(ringKeys)) -) - -func init() { - ringKeys[0] = benchRootKey - ringAddrs[0] = benchRootAddr - for i := 1; i < len(ringKeys); i++ { - ringKeys[i], _ = crypto.GenerateKey() - ringAddrs[i] = crypto.PubkeyToAddress(ringKeys[i].PublicKey) - } -} - -// genTxRing returns a block generator that sends ether in a ring -// among n accounts. This is creates n entries in the state database -// and fills the blocks with many small transactions. -func genTxRing(naccounts int) func(int, *BlockGen) { - from := 0 - availableFunds := new(big.Int).Set(benchRootFunds) - return func(i int, gen *BlockGen) { - block := gen.PrevBlock(i - 1) - gas := block.GasLimit() - gasPrice := big.NewInt(0) - if gen.header.BaseFee != nil { - gasPrice = gen.header.BaseFee - } - signer := gen.Signer() - for { - gas -= params.TxGas - if gas < params.TxGas { - break - } - to := (from + 1) % naccounts - burn := new(big.Int).SetUint64(params.TxGas) - burn.Mul(burn, gen.header.BaseFee) - availableFunds.Sub(availableFunds, burn) - if availableFunds.Cmp(big.NewInt(1)) < 0 { - panic("not enough funds") - } - tx, err := types.SignNewTx(ringKeys[from], signer, - &types.LegacyTx{ - Nonce: gen.TxNonce(ringAddrs[from]), - To: &ringAddrs[to], - Value: availableFunds, - Gas: params.TxGas, - GasPrice: gasPrice, - }) - if err != nil { - panic(err) - } - gen.AddTx(tx) - from = to - } - } -} - -// genUncles generates blocks with two uncle headers. -func genUncles(i int, gen *BlockGen) { - if i >= 7 { - b2 := gen.PrevBlock(i - 6).Header() - b2.Extra = []byte("foo") - gen.AddUncle(b2) - b3 := gen.PrevBlock(i - 6).Header() - b3.Extra = []byte("bar") - gen.AddUncle(b3) - } -} - -func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { - // Create the database in memory or in a temporary directory. - var db ethdb.Database - var err error - if !disk { - db = rawdb.NewMemoryDatabase() - } else { - dir := b.TempDir() - db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false) - if err != nil { - b.Fatalf("cannot create temporary database: %v", err) - } - defer db.Close() - } - - // Generate a chain of b.N blocks using the supplied block - // generator function. - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}}, - } - _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), b.N, gen) - - // Time the insertion of the new chain. - // State and blocks are stored in the same DB. - chainman, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer chainman.Stop() - b.ReportAllocs() - b.ResetTimer() - if i, err := chainman.InsertChain(chain); err != nil { - b.Fatalf("insert error (block %d): %v\n", i, err) - } -} - -func BenchmarkChainRead_header_10k(b *testing.B) { - benchReadChain(b, false, 10000) -} -func BenchmarkChainRead_full_10k(b *testing.B) { - benchReadChain(b, true, 10000) -} -func BenchmarkChainRead_header_100k(b *testing.B) { - benchReadChain(b, false, 100000) -} -func BenchmarkChainRead_full_100k(b *testing.B) { - benchReadChain(b, true, 100000) -} -func BenchmarkChainRead_header_500k(b *testing.B) { - benchReadChain(b, false, 500000) -} -func BenchmarkChainRead_full_500k(b *testing.B) { - benchReadChain(b, true, 500000) -} -func BenchmarkChainWrite_header_10k(b *testing.B) { - benchWriteChain(b, false, 10000) -} -func BenchmarkChainWrite_full_10k(b *testing.B) { - benchWriteChain(b, true, 10000) -} -func BenchmarkChainWrite_header_100k(b *testing.B) { - benchWriteChain(b, false, 100000) -} -func BenchmarkChainWrite_full_100k(b *testing.B) { - benchWriteChain(b, true, 100000) -} -func BenchmarkChainWrite_header_500k(b *testing.B) { - benchWriteChain(b, false, 500000) -} -func BenchmarkChainWrite_full_500k(b *testing.B) { - benchWriteChain(b, true, 500000) -} - -// makeChainForBench writes a given number of headers or empty blocks/receipts -// into a database. -func makeChainForBench(db ethdb.Database, genesis *Genesis, full bool, count uint64) { - var hash common.Hash - for n := uint64(0); n < count; n++ { - header := &types.Header{ - Coinbase: common.Address{}, - Number: big.NewInt(int64(n)), - ParentHash: hash, - Difficulty: big.NewInt(1), - UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyTxsHash, - ReceiptHash: types.EmptyReceiptsHash, - } - if n == 0 { - header = genesis.ToBlock().Header() - } - hash = header.Hash() - - rawdb.WriteHeader(db, header) - rawdb.WriteCanonicalHash(db, hash, n) - rawdb.WriteTd(db, hash, n, big.NewInt(int64(n+1))) - - if n == 0 { - rawdb.WriteChainConfig(db, hash, genesis.Config) - } - rawdb.WriteHeadHeaderHash(db, hash) - - if full || n == 0 { - block := types.NewBlockWithHeader(header) - rawdb.WriteBody(db, hash, n, block.Body()) - rawdb.WriteReceipts(db, hash, n, nil) - rawdb.WriteHeadBlockHash(db, hash) - } - } -} - -func benchWriteChain(b *testing.B, full bool, count uint64) { - genesis := &Genesis{Config: params.AllEthashProtocolChanges} - for i := 0; i < b.N; i++ { - dir := b.TempDir() - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) - if err != nil { - b.Fatalf("error opening database at %v: %v", dir, err) - } - makeChainForBench(db, genesis, full, count) - db.Close() - } -} - -func benchReadChain(b *testing.B, full bool, count uint64) { - dir := b.TempDir() - - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) - if err != nil { - b.Fatalf("error opening database at %v: %v", dir, err) - } - genesis := &Genesis{Config: params.AllEthashProtocolChanges} - makeChainForBench(db, genesis, full, count) - db.Close() - cacheConfig := *defaultCacheConfig - cacheConfig.TrieDirtyDisabled = true - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) - if err != nil { - b.Fatalf("error opening database at %v: %v", dir, err) - } - chain, err := NewBlockChain(db, &cacheConfig, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if err != nil { - b.Fatalf("error creating chain: %v", err) - } - - for n := uint64(0); n < count; n++ { - header := chain.GetHeaderByNumber(n) - if full { - hash := header.Hash() - rawdb.ReadBody(db, hash, n) - rawdb.ReadReceipts(db, hash, n, header.Time, chain.Config()) - } - } - chain.Stop() - db.Close() - } -} diff --git a/core/block_validator_test.go b/core/block_validator_test.go deleted file mode 100644 index 1ab82ea0be..0000000000 --- a/core/block_validator_test.go +++ /dev/null @@ -1,272 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/clique" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" -) - -// Tests that simple header verification works, for both good and bad blocks. -func TestHeaderVerification(t *testing.T) { - testHeaderVerification(t, rawdb.HashScheme) - testHeaderVerification(t, rawdb.PathScheme) -} - -func testHeaderVerification(t *testing.T, scheme string) { - // Create a simple chain to verify - var ( - gspec = &Genesis{Config: params.TestChainConfig} - _, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 8, nil) - ) - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer chain.Stop() - - for i := 0; i < len(blocks); i++ { - for j, valid := range []bool{true, false} { - var results <-chan error - - if valid { - engine := ethash.NewFaker() - _, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}) - } else { - engine := ethash.NewFakeFailer(headers[i].Number.Uint64()) - _, results = engine.VerifyHeaders(chain, []*types.Header{headers[i]}) - } - // Wait for the verification result - select { - case result := <-results: - if (result == nil) != valid { - t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, result, valid) - } - case <-time.After(time.Second): - t.Fatalf("test %d.%d: verification timeout", i, j) - } - // Make sure no more data is returned - select { - case result := <-results: - t.Fatalf("test %d.%d: unexpected result returned: %v", i, j, result) - case <-time.After(25 * time.Millisecond): - } - } - chain.InsertChain(blocks[i : i+1]) - } -} - -func TestHeaderVerificationForMergingClique(t *testing.T) { testHeaderVerificationForMerging(t, true) } -func TestHeaderVerificationForMergingEthash(t *testing.T) { testHeaderVerificationForMerging(t, false) } - -// Tests the verification for eth1/2 merging, including pre-merge and post-merge -func testHeaderVerificationForMerging(t *testing.T, isClique bool) { - var ( - gspec *Genesis - preBlocks []*types.Block - postBlocks []*types.Block - engine consensus.Engine - merger = consensus.NewMerger(rawdb.NewMemoryDatabase()) - ) - if isClique { - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr = crypto.PubkeyToAddress(key.PublicKey) - config = *params.AllCliqueProtocolChanges - ) - engine = beacon.New(clique.New(params.AllCliqueProtocolChanges.Clique, rawdb.NewMemoryDatabase())) - gspec = &Genesis{ - Config: &config, - ExtraData: make([]byte, 32+common.AddressLength+crypto.SignatureLength), - Alloc: map[common.Address]types.Account{ - addr: {Balance: big.NewInt(1)}, - }, - BaseFee: big.NewInt(params.InitialBaseFee), - Difficulty: new(big.Int), - } - copy(gspec.ExtraData[32:], addr[:]) - - td := 0 - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 8, nil) - for i, block := range blocks { - header := block.Header() - if i > 0 { - header.ParentHash = blocks[i-1].Hash() - } - header.Extra = make([]byte, 32+crypto.SignatureLength) - header.Difficulty = big.NewInt(2) - - sig, _ := crypto.Sign(engine.SealHash(header).Bytes(), key) - copy(header.Extra[len(header.Extra)-crypto.SignatureLength:], sig) - blocks[i] = block.WithSeal(header) - - // calculate td - td += int(block.Difficulty().Uint64()) - } - preBlocks = blocks - gspec.Config.TerminalTotalDifficulty = big.NewInt(int64(td)) - postBlocks, _ = GenerateChain(gspec.Config, preBlocks[len(preBlocks)-1], engine, genDb, 8, nil) - } else { - config := *params.TestChainConfig - gspec = &Genesis{Config: &config} - engine = beacon.New(ethash.NewFaker()) - td := int(params.GenesisDifficulty.Uint64()) - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 8, nil) - for _, block := range blocks { - // calculate td - td += int(block.Difficulty().Uint64()) - } - preBlocks = blocks - gspec.Config.TerminalTotalDifficulty = big.NewInt(int64(td)) - t.Logf("Set ttd to %v\n", gspec.Config.TerminalTotalDifficulty) - postBlocks, _ = GenerateChain(gspec.Config, preBlocks[len(preBlocks)-1], engine, genDb, 8, func(i int, gen *BlockGen) { - gen.SetPoS() - }) - } - // Assemble header batch - preHeaders := make([]*types.Header, len(preBlocks)) - for i, block := range preBlocks { - preHeaders[i] = block.Header() - t.Logf("Pre-merge header: %d", block.NumberU64()) - } - postHeaders := make([]*types.Header, len(postBlocks)) - for i, block := range postBlocks { - postHeaders[i] = block.Header() - t.Logf("Post-merge header: %d", block.NumberU64()) - } - // Run the header checker for blocks one-by-one, checking for both valid and invalid nonces - chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) - defer chain.Stop() - - // Verify the blocks before the merging - for i := 0; i < len(preBlocks); i++ { - _, results := engine.VerifyHeaders(chain, []*types.Header{preHeaders[i]}) - // Wait for the verification result - select { - case result := <-results: - if result != nil { - t.Errorf("pre-block %d: verification failed %v", i, result) - } - case <-time.After(time.Second): - t.Fatalf("pre-block %d: verification timeout", i) - } - // Make sure no more data is returned - select { - case result := <-results: - t.Fatalf("pre-block %d: unexpected result returned: %v", i, result) - case <-time.After(25 * time.Millisecond): - } - chain.InsertChain(preBlocks[i : i+1]) - } - - // Make the transition - merger.ReachTTD() - merger.FinalizePoS() - - // Verify the blocks after the merging - for i := 0; i < len(postBlocks); i++ { - _, results := engine.VerifyHeaders(chain, []*types.Header{postHeaders[i]}) - // Wait for the verification result - select { - case result := <-results: - if result != nil { - t.Errorf("post-block %d: verification failed %v", i, result) - } - case <-time.After(time.Second): - t.Fatalf("test %d: verification timeout", i) - } - // Make sure no more data is returned - select { - case result := <-results: - t.Fatalf("post-block %d: unexpected result returned: %v", i, result) - case <-time.After(25 * time.Millisecond): - } - chain.InsertBlockWithoutSetHead(postBlocks[i]) - } - - // Verify the blocks with pre-merge blocks and post-merge blocks - var headers []*types.Header - for _, block := range preBlocks { - headers = append(headers, block.Header()) - } - for _, block := range postBlocks { - headers = append(headers, block.Header()) - } - _, results := engine.VerifyHeaders(chain, headers) - for i := 0; i < len(headers); i++ { - select { - case result := <-results: - if result != nil { - t.Errorf("test %d: verification failed %v", i, result) - } - case <-time.After(time.Second): - t.Fatalf("test %d: verification timeout", i) - } - } - // Make sure no more data is returned - select { - case result := <-results: - t.Fatalf("unexpected result returned: %v", result) - case <-time.After(25 * time.Millisecond): - } -} - -func TestCalcGasLimit(t *testing.T) { - for i, tc := range []struct { - pGasLimit uint64 - max uint64 - min uint64 - }{ - {20000000, 20078124, 19921876}, - {40000000, 40156249, 39843751}, - } { - // Increase - if have, want := CalcGasLimit(tc.pGasLimit, 2*tc.pGasLimit), tc.max; have != want { - t.Errorf("test %d: have %d want <%d", i, have, want) - } - // Decrease - if have, want := CalcGasLimit(tc.pGasLimit, 0), tc.min; have != want { - t.Errorf("test %d: have %d want >%d", i, have, want) - } - // Small decrease - if have, want := CalcGasLimit(tc.pGasLimit, tc.pGasLimit-1), tc.pGasLimit-1; have != want { - t.Errorf("test %d: have %d want %d", i, have, want) - } - // Small increase - if have, want := CalcGasLimit(tc.pGasLimit, tc.pGasLimit+1), tc.pGasLimit+1; have != want { - t.Errorf("test %d: have %d want %d", i, have, want) - } - // No change - if have, want := CalcGasLimit(tc.pGasLimit, tc.pGasLimit), tc.pGasLimit; have != want { - t.Errorf("test %d: have %d want %d", i, have, want) - } - } -} diff --git a/core/blockarchiver/client.go b/core/blockarchiver/client.go new file mode 100644 index 0000000000..97d9398b2e --- /dev/null +++ b/core/blockarchiver/client.go @@ -0,0 +1,229 @@ +package blockarchiver + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + + bundlesdk "github.com/bnb-chain/greenfield-bundle-sdk/bundle" +) + +// Client is a client to interact with the block archiver service +type Client struct { + hc *http.Client + blockArchiverHost string + spHost string + bucketName string +} + +func New(blockAchieverHost, spHost, bucketName string) (*Client, error) { + transport := &http.Transport{ + DisableCompression: true, + MaxIdleConnsPerHost: 1000, + MaxConnsPerHost: 1000, + IdleConnTimeout: 90 * time.Second, + } + client := &http.Client{ + Timeout: 10 * time.Minute, + Transport: transport, + } + return &Client{hc: client, blockArchiverHost: blockAchieverHost, spHost: spHost, bucketName: bucketName}, nil +} + +func (c *Client) GetBlockByHash(ctx context.Context, hash common.Hash) (*Block, error) { + payload := preparePayload("eth_getBlockByHash", []interface{}{hash.String(), "true"}) + body, err := c.postRequest(ctx, payload) + if err != nil { + return nil, err + } + getBlockResp := GetBlockResponse{} + err = json.Unmarshal(body, &getBlockResp) + if err != nil { + return nil, err + } + return getBlockResp.Result, nil +} + +func (c *Client) GetBlockByNumber(ctx context.Context, number uint64) (*Block, error) { + payload := preparePayload("eth_getBlockByNumber", []interface{}{Int64ToHex(int64(number)), "true"}) + body, err := c.postRequest(ctx, payload) + if err != nil { + return nil, err + } + getBlockResp := GetBlockResponse{} + err = json.Unmarshal(body, &getBlockResp) + if err != nil { + return nil, err + } + return getBlockResp.Result, nil +} + +func (c *Client) GetLatestBlock(ctx context.Context) (*Block, error) { + payload := preparePayload("eth_getBlockByNumber", []interface{}{"latest", "true"}) + body, err := c.postRequest(ctx, payload) + if err != nil { + return nil, err + } + getBlockResp := GetBlockResponse{} + err = json.Unmarshal(body, &getBlockResp) + if err != nil { + return nil, err + } + return getBlockResp.Result, nil +} + +// GetBundleName returns the bundle name by a specific block number +func (c *Client) GetBundleName(ctx context.Context, blockNum uint64) (string, error) { + req, err := http.NewRequestWithContext(ctx, "GET", c.blockArchiverHost+fmt.Sprintf("/bsc/v1/blocks/%d/bundle/name", blockNum), nil) + if err != nil { + return "", err + } + resp, err := c.hc.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return "", errors.New("failed to get bundle name") + } + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + getBundleNameResp := GetBundleNameResponse{} + err = json.Unmarshal(body, &getBundleNameResp) + if err != nil { + return "", err + } + return getBundleNameResp.Data, nil +} + +// GetBundleBlocksByBlockNum returns the bundle blocks by block number that within the range +func (c *Client) GetBundleBlocksByBlockNum(ctx context.Context, blockNum uint64) ([]*Block, error) { + payload := preparePayload("eth_getBundledBlockByNumber", []interface{}{Int64ToHex(int64(blockNum))}) + body, err := c.postRequest(ctx, payload) + if err != nil { + return nil, err + } + getBlocksResp := GetBlocksResponse{} + err = json.Unmarshal(body, &getBlocksResp) + if err != nil { + return nil, err + } + return getBlocksResp.Result, nil +} + +// GetBundleBlocks returns the bundle blocks by object name +func (c *Client) GetBundleBlocks(ctx context.Context, objectName string) ([]*Block, error) { + var urlStr string + parts := strings.Split(c.spHost, "//") + urlStr = parts[0] + "//" + c.bucketName + "." + parts[1] + "/" + objectName + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, urlStr, nil) + if err != nil { + return nil, err + } + resp, err := c.hc.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + tempFile, err := os.CreateTemp("", "bundle") + if err != nil { + fmt.Printf("Failed to create temporary file: %v\n", err) + return nil, err + } + defer os.Remove(tempFile.Name()) + // Write the content to the temporary file + _, err = tempFile.Write(body) + if err != nil { + fmt.Printf("Failed to write downloaded bundle to file: %v\n", err) + return nil, err + } + defer tempFile.Close() + + bundleObjects, err := bundlesdk.NewBundleFromFile(tempFile.Name()) + if err != nil { + fmt.Printf("Failed to create bundle from file: %v\n", err) + return nil, err + } + var blocksInfo []*Block + for _, objMeta := range bundleObjects.GetBundleObjectsMeta() { + objFile, _, err := bundleObjects.GetObject(objMeta.Name) + if err != nil { + return nil, err + } + + var objectInfo []byte + objectInfo, err = io.ReadAll(objFile) + if err != nil { + objFile.Close() + return nil, err + } + objFile.Close() + + var blockInfo *Block + err = json.Unmarshal(objectInfo, &blockInfo) + if err != nil { + return nil, err + } + blocksInfo = append(blocksInfo, blockInfo) + } + + return blocksInfo, nil +} + +// postRequest sends a POST request to the block archiver service +func (c *Client) postRequest(ctx context.Context, payload map[string]interface{}) ([]byte, error) { + // Encode payload to JSON + payloadBytes, err := json.Marshal(payload) + if err != nil { + return nil, err + } + + // post call to block archiver + req, err := http.NewRequestWithContext(ctx, "POST", c.blockArchiverHost, bytes.NewBuffer(payloadBytes)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + // Perform the HTTP request + resp, err := c.hc.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusOK { + return nil, errors.New("failed to get response") + } + defer resp.Body.Close() + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + return body, nil +} + +// preparePayload prepares the payload for the request +func preparePayload(method string, params []interface{}) map[string]interface{} { + return map[string]interface{}{ + "jsonrpc": "2.0", + "method": method, + "params": params, + "id": 1, + } +} diff --git a/core/blockarchiver/config.go b/core/blockarchiver/config.go new file mode 100644 index 0000000000..3dcc41929f --- /dev/null +++ b/core/blockarchiver/config.go @@ -0,0 +1,12 @@ +package blockarchiver + +type BlockArchiverConfig struct { + RPCAddress string + SPAddress string + BucketName string + BlockCacheSize int64 +} + +var DefaultBlockArchiverConfig = BlockArchiverConfig{ + BlockCacheSize: 50000, +} diff --git a/core/blockarchiver/converter.go b/core/blockarchiver/converter.go new file mode 100644 index 0000000000..fb381a9b82 --- /dev/null +++ b/core/blockarchiver/converter.go @@ -0,0 +1,321 @@ +package blockarchiver + +import ( + "errors" + "math/big" + "strconv" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" +) + +// HexToUint64 converts hex string to uint64 +func HexToUint64(hexStr string) (uint64, error) { + intValue, err := strconv.ParseUint(hexStr, 0, 64) + if err != nil { + return 0, err + } + return intValue, nil +} + +// Int64ToHex converts int64 to hex string +func Int64ToHex(int64 int64) string { + return "0x" + strconv.FormatInt(int64, 16) +} + +// HexToBigInt converts hex string to big.Int +func HexToBigInt(hexStr string) (*big.Int, error) { + if hexStr == "" { + hexStr = "0x0" + } + hexStr = hexStr[2:] + bigInt := new(big.Int) + _, success := bigInt.SetString(hexStr, 16) + if !success { + return nil, errors.New("error converting hexadecimal string to big.Int") + } + return bigInt, nil +} + +// convertBlock converts a block to a general block +func convertBlock(block *Block) (*GeneralBlock, error) { + if block == nil { + return nil, errors.New("block is nil") + } + diffculty, err := HexToBigInt(block.Difficulty) + if err != nil { + return nil, err + } + number, err := HexToBigInt(block.Number) + if err != nil { + return nil, err + } + gaslimit, err := HexToUint64(block.GasLimit) + if err != nil { + return nil, err + } + gasUsed, err := HexToUint64(block.GasUsed) + if err != nil { + return nil, err + } + ts, err := HexToUint64(block.Timestamp) + if err != nil { + return nil, err + } + nonce, err := HexToUint64(block.Nonce) + if err != nil { + return nil, err + } + totalDifficulty, err := HexToBigInt(block.TotalDifficulty) + if err != nil { + return nil, err + } + + var withdrawals *common.Hash + if block.WithdrawalsRoot != "" { + hash := common.HexToHash(block.WithdrawalsRoot) + withdrawals = &hash + } + + var baseFeePerGas *big.Int + if block.BaseFeePerGas != "" { + baseFeePerGas, err = HexToBigInt(block.BaseFeePerGas) + if err != nil { + return nil, err + } + } + var blobGasUsed *uint64 + if block.BlobGasUsed != "" { + blobGas, err := HexToUint64(block.BlobGasUsed) + if err != nil { + return nil, err + } + blobGasUsed = &blobGas + } + var excessBlobGas *uint64 + if block.ExcessBlobGas != "" { + blobGas, err := HexToUint64(block.ExcessBlobGas) + if err != nil { + return nil, err + } + excessBlobGas = &blobGas + } + var parentBeaconRoot *common.Hash + if block.ParentBeaconRoot != "" { + hash := common.HexToHash(block.ParentBeaconRoot) + parentBeaconRoot = &hash + } + + header := &types.Header{ + ParentHash: common.HexToHash(block.ParentHash), + UncleHash: common.HexToHash(block.Sha3Uncles), + Coinbase: common.HexToAddress(block.Miner), + Root: common.HexToHash(block.StateRoot), + TxHash: common.HexToHash(block.TransactionsRoot), + ReceiptHash: common.HexToHash(block.ReceiptsRoot), + Bloom: types.BytesToBloom(hexutil.MustDecode(block.LogsBloom)), + Difficulty: diffculty, + Number: number, + GasLimit: gaslimit, + GasUsed: gasUsed, + Time: ts, + Extra: hexutil.MustDecode(block.ExtraData), + MixDigest: common.HexToHash(block.MixHash), + Nonce: types.EncodeNonce(nonce), + WithdrawalsHash: withdrawals, + BlobGasUsed: blobGasUsed, + ExcessBlobGas: excessBlobGas, + ParentBeaconRoot: parentBeaconRoot, + } + if baseFeePerGas != nil { + header.BaseFee = baseFeePerGas + } + + txs := make([]*types.Transaction, 0) + for _, tx := range block.Transactions { + nonce, err := HexToUint64(tx.Nonce) + if err != nil { + return nil, err + } + var toAddr *common.Address + if tx.To != "" { + addr := common.HexToAddress(tx.To) + toAddr = &addr + } + val, err := HexToBigInt(tx.Value) + if err != nil { + return nil, err + } + gas, err := HexToUint64(tx.Gas) + if err != nil { + return nil, err + } + gasPrice, err := HexToBigInt(tx.GasPrice) + if err != nil { + return nil, err + } + v, err := HexToBigInt(tx.V) + if err != nil { + return nil, err + } + r, err := HexToBigInt(tx.R) + if err != nil { + return nil, err + } + s, err := HexToBigInt(tx.S) + if err != nil { + return nil, err + } + input := hexutil.MustDecode(tx.Input) + switch tx.Type { + case "0x0": + // create a new transaction + legacyTx := &types.LegacyTx{ + Nonce: nonce, + To: toAddr, + Value: val, + Gas: gas, + GasPrice: gasPrice, + Data: input, + V: v, + R: r, + S: s, + } + if toAddr != nil { + legacyTx.To = toAddr + } + txn := types.NewTx(legacyTx) + txs = append(txs, txn) + case "0x1": + chainId, err := HexToBigInt(tx.ChainId) + if err != nil { + return nil, err + } + var accessList types.AccessList + for _, access := range tx.AccessList { + var keys []common.Hash + for _, key := range access.StorageKeys { + storageKey := common.HexToHash(key) + keys = append(keys, storageKey) + } + accessList = append(accessList, types.AccessTuple{ + Address: common.HexToAddress(access.Address), + StorageKeys: keys, + }) + } + txn := types.NewTx(&types.AccessListTx{ + ChainID: chainId, + Nonce: nonce, + GasPrice: gasPrice, + Gas: gas, + To: toAddr, + Value: val, + Data: input, + AccessList: accessList, + V: v, + R: r, + S: s, + }) + txs = append(txs, txn) + case "0x2": + chainId, err := HexToBigInt(tx.ChainId) + if err != nil { + return nil, err + } + gasTipCap, err := HexToBigInt(tx.MaxPriorityFeePerGas) + if err != nil { + return nil, err + } + gasFeeCap, err := HexToBigInt(tx.MaxFeePerGas) + if err != nil { + return nil, err + } + var accessList types.AccessList + for _, access := range tx.AccessList { + var keys []common.Hash + for _, key := range access.StorageKeys { + storageKey := common.HexToHash(key) + keys = append(keys, storageKey) + } + accessList = append(accessList, types.AccessTuple{ + Address: common.HexToAddress(access.Address), + StorageKeys: keys, + }) + } + txn := types.NewTx(&types.DynamicFeeTx{ + ChainID: chainId, + Nonce: nonce, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Gas: gas, + To: toAddr, + Value: val, + Data: input, + AccessList: accessList, + V: v, + R: r, + S: s, + }) + txs = append(txs, txn) + case "0x3": + chainId, err := HexToUint64(tx.ChainId) + if err != nil { + return nil, err + } + gasTipCap, err := HexToUint64(tx.MaxPriorityFeePerGas) + if err != nil { + return nil, err + } + gasFeeCap, err := HexToUint64(tx.MaxFeePerGas) + if err != nil { + return nil, err + } + maxFeePerBlobGas, err := HexToBigInt(tx.MaxFeePerBlobGas) + if err != nil { + return nil, err + } + + var accessList types.AccessList + for _, access := range tx.AccessList { + var keys []common.Hash + for _, key := range access.StorageKeys { + storageKey := common.HexToHash(key) + keys = append(keys, storageKey) + } + accessList = append(accessList, types.AccessTuple{ + Address: common.HexToAddress(access.Address), + StorageKeys: keys, + }) + } + var blobHashes []common.Hash + for _, blob := range tx.BlobVersionedHashes { + blobHash := common.HexToHash(blob) + blobHashes = append(blobHashes, blobHash) + } + transaction := types.NewTx(&types.BlobTx{ + ChainID: uint256.NewInt(chainId), + Nonce: nonce, + GasTipCap: uint256.NewInt(gasTipCap), + GasFeeCap: uint256.NewInt(gasFeeCap), + Gas: gas, + To: *toAddr, + Value: uint256.NewInt(val.Uint64()), + Data: input, + AccessList: accessList, + V: uint256.MustFromBig(v), + R: uint256.MustFromBig(r), + S: uint256.MustFromBig(s), + BlobFeeCap: uint256.NewInt(maxFeePerBlobGas.Uint64()), + BlobHashes: blobHashes, + }) + txs = append(txs, transaction) + } + } + newBlock := types.NewBlockWithHeader(header).WithBody(txs, nil) + return &GeneralBlock{ + Block: newBlock, + TotalDifficulty: totalDifficulty, + }, nil +} diff --git a/core/blockarchiver/service.go b/core/blockarchiver/service.go new file mode 100644 index 0000000000..5e100e4eb5 --- /dev/null +++ b/core/blockarchiver/service.go @@ -0,0 +1,207 @@ +package blockarchiver + +import ( + "context" + "errors" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/lru" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +const ( + GetBlockTimeout = 5 * time.Second + + RPCTimeout = 30 * time.Second +) + +var _ BlockArchiver = (*BlockArchiverService)(nil) + +type BlockArchiver interface { + GetLatestBlock() (*GeneralBlock, error) + GetBlockByNumber(number uint64) (*types.Body, *types.Header, error) + GetBlockByHash(hash common.Hash) (*types.Body, *types.Header, error) +} + +type BlockArchiverService struct { + // client to interact with the block archiver service + client *Client + // injected from BlockChain, the specified block is always read and write simultaneously in bodyCache and headerCache. + bodyCache *lru.Cache[common.Hash, *types.Body] + // injected from BlockChain.headerChain + headerCache *lru.Cache[common.Hash, *types.Header] + // hashCache is a cache for block number to hash mapping + hashCache *lru.Cache[uint64, common.Hash] + // requestLock is a lock to avoid concurrent fetching of the same bundle of blocks + requestLock *RequestLock +} + +// NewBlockArchiverService creates a new block archiver service +// the bodyCache and headerCache are injected from the BlockChain +func NewBlockArchiverService(blockArchiver, sp, bucketName string, + bodyCache *lru.Cache[common.Hash, *types.Body], + headerCache *lru.Cache[common.Hash, *types.Header], + cacheSize int, +) (BlockArchiver, error) { + client, err := New(blockArchiver, sp, bucketName) + if err != nil { + return nil, err + } + b := &BlockArchiverService{ + client: client, + bodyCache: bodyCache, + headerCache: headerCache, + hashCache: lru.NewCache[uint64, common.Hash](cacheSize), + requestLock: NewRequestLock(), + } + go b.cacheStats() + return b, nil +} + +// GetLatestBlock returns the latest block +func (c *BlockArchiverService) GetLatestBlock() (*GeneralBlock, error) { + ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) + defer cancel() + blockResp, err := c.client.GetLatestBlock(ctx) + if err != nil { + log.Error("failed to get latest block", "err", err) + return nil, err + } + block, err := convertBlock(blockResp) + if err != nil { + log.Error("failed to convert block", "block", blockResp, "err", err) + return nil, err + } + return block, nil +} + +// GetLatestHeader returns the latest header +func (c *BlockArchiverService) GetLatestHeader() (*types.Header, error) { + block, err := c.GetLatestBlock() + if err != nil { + log.Error("failed to get latest block", "err", err) + return nil, err + } + return block.Header(), nil +} + +// GetBlockByNumber returns the block by number +func (c *BlockArchiverService) GetBlockByNumber(number uint64) (*types.Body, *types.Header, error) { + log.Debug("get block by number", "number", number) + hash, found := c.hashCache.Get(number) + if found { + log.Debug("GetBlockByNumber found in cache", number) + body, foundB := c.bodyCache.Get(hash) + header, foundH := c.headerCache.Get(hash) + if foundB && foundH { + return body, header, nil + } + } + return c.getBlockByNumber(number) +} + +// getBlockByNumber returns the block by number +func (c *BlockArchiverService) getBlockByNumber(number uint64) (*types.Body, *types.Header, error) { + // to avoid concurrent fetching of the same bundle of blocks, requestLock applies here + // if the number is within any of the ranges, should not fetch the bundle from the block archiver service but + // wait for a while and fetch from the cache + if c.requestLock.IsWithinAnyRange(number) { + log.Debug("getBlockByNumber is within any range", number) + if blockRange := c.requestLock.GetRangeForNumber(number); blockRange != nil { + select { + case <-blockRange.done: + hash, found := c.hashCache.Get(number) + if found { + body, foundB := c.bodyCache.Get(hash) + header, foundH := c.headerCache.Get(hash) + if foundB && foundH { + return body, header, nil + } + } + case <-time.After(GetBlockTimeout): + return nil, nil, errors.New("block not found") + } + } + } + // fetch the bundle range + log.Info("fetching bundle of blocks", "number", number) + ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) + defer cancel() + + bundleName, err := c.client.GetBundleName(ctx, number) + if err != nil { + log.Error("failed to get bundle name", "number", number, "err", err) + return nil, nil, err + } + + start, end, err := ParseBundleName(bundleName) + if err != nil { + log.Error("failed to parse bundle name", "bundleName", bundleName, "err", err) + return nil, nil, err + } + // add lock to avoid concurrent fetching of the same bundle of blocks + c.requestLock.AddRange(start, end) + defer c.requestLock.RemoveRange(start, end) + ctx, cancel = context.WithTimeout(context.Background(), RPCTimeout) + defer cancel() + + blocks, err := c.client.GetBundleBlocks(ctx, bundleName) + if err != nil { + log.Error("failed to get bundle blocks", "bundleName", bundleName, "err", err) + return nil, nil, err + } + var body *types.Body + var header *types.Header + + log.Debug("populating block cache", "start", start, "end", end) + for _, b := range blocks { + block, err := convertBlock(b) + if err != nil { + log.Error("failed to convert block", "block", b, "err", err) + return nil, nil, err + } + c.bodyCache.Add(block.Hash(), block.Body()) + c.headerCache.Add(block.Hash(), block.Header()) + c.hashCache.Add(block.NumberU64(), block.Hash()) + if block.NumberU64() == number { + body = block.Body() + header = block.Header() + } + } + return body, header, nil +} + +// GetBlockByHash returns the block by hash +func (c *BlockArchiverService) GetBlockByHash(hash common.Hash) (*types.Body, *types.Header, error) { + log.Debug("get block by hash", "hash", hash.Hex()) + body, foundB := c.bodyCache.Get(hash) + header, foundH := c.headerCache.Get(hash) + if foundB && foundH { + return body, header, nil + } + ctx, cancel := context.WithTimeout(context.Background(), RPCTimeout) + defer cancel() + block, err := c.client.GetBlockByHash(ctx, hash) + if err != nil { + log.Error("failed to get block by hash", "hash", hash, "err", err) + return nil, nil, err + } + if block == nil { + log.Debug("block is nil", "hash", hash) + return nil, nil, nil + } + number, err := HexToUint64(block.Number) + if err != nil { + log.Error("failed to convert block number", "block", block, "err", err) + return nil, nil, err + } + return c.getBlockByNumber(number) +} + +func (c *BlockArchiverService) cacheStats() { + for range time.NewTicker(1 * time.Minute).C { + log.Info("block archiver cache stats", "bodyCache", c.bodyCache.Len(), "headerCache", c.headerCache.Len(), "hashCache", c.hashCache.Len()) + } +} diff --git a/core/blockarchiver/types.go b/core/blockarchiver/types.go new file mode 100644 index 0000000000..4bc2c1b960 --- /dev/null +++ b/core/blockarchiver/types.go @@ -0,0 +1,193 @@ +package blockarchiver + +import ( + "math/big" + "strconv" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/core/types" +) + +// JsonError represents an error in JSON format +type JsonError struct { + Code int `json:"code"` + Message string `json:"message"` + Data interface{} `json:"data,omitempty"` +} + +// Block represents a block in the Ethereum blockchain +type Block struct { + WithdrawalsRoot string `json:"withdrawalsRoot"` + Withdrawals []string `json:"withdrawals"` + Hash string `json:"hash"` + ParentHash string `json:"parentHash"` + Sha3Uncles string `json:"sha3Uncles"` + Miner string `json:"miner"` + StateRoot string `json:"stateRoot"` + TransactionsRoot string `json:"transactionsRoot"` + ReceiptsRoot string `json:"receiptsRoot"` + LogsBloom string `json:"logsBloom"` + Difficulty string `json:"difficulty"` + Number string `json:"number"` + GasLimit string `json:"gasLimit"` + GasUsed string `json:"gasUsed"` + Timestamp string `json:"timestamp"` + ExtraData string `json:"extraData"` + MixHash string `json:"mixHash"` + Nonce string `json:"nonce"` + Size string `json:"size"` + TotalDifficulty string `json:"totalDifficulty"` + BaseFeePerGas string `json:"baseFeePerGas"` + Transactions []Transaction `json:"transactions"` + Uncles []string `json:"uncles"` + BlobGasUsed string `json:"blobGasUsed"` + ExcessBlobGas string `json:"excessBlobGas"` + ParentBeaconRoot string `json:"parentBeaconBlockRoot"` +} + +// GetBlockResponse represents a response from the getBlock RPC call +type GetBlockResponse struct { + ID int64 `json:"id,omitempty"` + Error *JsonError `json:"error,omitempty"` + Jsonrpc string `json:"jsonrpc,omitempty"` + Result *Block `json:"result,omitempty"` +} + +// GetBlocksResponse represents a response from the getBlocks RPC call +type GetBlocksResponse struct { + ID int64 `json:"id,omitempty"` + Error *JsonError `json:"error,omitempty"` + Jsonrpc string `json:"jsonrpc,omitempty"` + Result []*Block `json:"result,omitempty"` +} + +// GetBundleNameResponse represents a response from the getBundleName RPC call +type GetBundleNameResponse struct { + Data string `json:"data"` +} + +// Transaction represents a transaction in the Ethereum blockchain +type Transaction struct { + BlockHash string `json:"blockHash"` + BlockNumber string `json:"blockNumber"` + From string `json:"from"` + Gas string `json:"gas"` + GasPrice string `json:"gasPrice"` + Hash string `json:"hash"` + Input string `json:"input"` + Nonce string `json:"nonce"` + To string `json:"to"` + TransactionIndex string `json:"transactionIndex"` + Value string `json:"value"` + Type string `json:"type"` + AccessList []AccessTuple `json:"accessList"` + ChainId string `json:"chainId"` + V string `json:"v"` + R string `json:"r"` + S string `json:"s"` + YParity string `json:"yParity"` + MaxPriorityFeePerGas string `json:"maxPriorityFeePerGas"` + MaxFeePerGas string `json:"maxFeePerGas"` + MaxFeePerDataGas string `json:"maxFeePerDataGas"` + MaxFeePerBlobGas string `json:"maxFeePerBlobGas"` + BlobVersionedHashes []string `json:"blobVersionedHashes"` +} + +// AccessTuple represents a tuple of an address and a list of storage keys +type AccessTuple struct { + Address string + StorageKeys []string +} + +// GeneralBlock represents a block in the Ethereum blockchain +type GeneralBlock struct { + *types.Block + TotalDifficulty *big.Int `json:"totalDifficulty"` // Total difficulty in the canonical chain up to and including this block. +} + +// Range represents a range of Block numbers +type Range struct { + from uint64 + to uint64 + // done is a channel closed when the range is removed + done chan struct{} +} + +// RequestLock is a lock for making sure we don't fetch the same bundle concurrently +type RequestLock struct { + // TODO + // there is tradeoff between using a Map or List of ranges, in this case, the lookup needs to be populated every + // time a new range is added, but the lookup is faster. If we use a list, we need to iterate over the list to check + // if the number is within any of the ranges, but we don't need to populate the lookup every time a new range is added. + rangeMap map[uint64]*Range + lookupMap map[uint64]*Range + mu sync.RWMutex +} + +// NewRequestLock creates a new RequestLock +func NewRequestLock() *RequestLock { + return &RequestLock{ + rangeMap: make(map[uint64]*Range), + lookupMap: make(map[uint64]*Range), + } +} + +// IsWithinAnyRange checks if the number is within any of the cached ranges +func (rl *RequestLock) IsWithinAnyRange(num uint64) bool { + rl.mu.RLock() + defer rl.mu.RUnlock() + _, exists := rl.lookupMap[num] + return exists +} + +// AddRange adds a new range to the cache +func (rl *RequestLock) AddRange(from, to uint64) { + rl.mu.Lock() + defer rl.mu.Unlock() + newRange := &Range{ + from: from, + to: to, + done: make(chan struct{}), + } + rl.rangeMap[from] = newRange + // provide fast lookup + for i := from; i <= to; i++ { + rl.lookupMap[i] = newRange + } +} + +// RemoveRange removes a range from the cache +func (rl *RequestLock) RemoveRange(from, to uint64) { + rl.mu.Lock() + defer rl.mu.Unlock() + + r, exists := rl.rangeMap[from] + if !exists { + return + } + delete(rl.rangeMap, from) + for i := from; i <= to; i++ { + delete(rl.lookupMap, i) + } + close(r.done) +} + +func (rl *RequestLock) GetRangeForNumber(number uint64) *Range { + rl.mu.RLock() + defer rl.mu.RUnlock() + return rl.lookupMap[number] +} + +func ParseBundleName(bundleName string) (uint64, uint64, error) { + parts := strings.Split(bundleName, "_") + startHeight, err := strconv.ParseUint(parts[1][1:], 10, 64) + if err != nil { + return 0, 0, err + } + endHeight, err := strconv.ParseUint(parts[2][1:], 10, 64) + if err != nil { + return 0, 0, err + } + return startHeight, endHeight, nil +} diff --git a/core/blockchain.go b/core/blockchain.go index f6f9a22c8d..ba14777ddc 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -28,9 +28,13 @@ import ( "sync/atomic" "time" + "golang.org/x/crypto/sha3" + mapset "github.com/deckarep/golang-set/v2" + "github.com/ethereum/go-ethereum/core/blockarchiver" exlru "github.com/hashicorp/golang-lru" - "golang.org/x/crypto/sha3" + + "golang.org/x/exp/slices" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" @@ -56,7 +60,6 @@ import ( "github.com/ethereum/go-ethereum/triedb" "github.com/ethereum/go-ethereum/triedb/hashdb" "github.com/ethereum/go-ethereum/triedb/pathdb" - "golang.org/x/exp/slices" ) var ( @@ -316,6 +319,10 @@ type BlockChain struct { // monitor doubleSignMonitor *monitor.DoubleSignMonitor + + // block archiver service for fetching blocks from blob hub, and it will access the bodyCache, hc.headerCache + blockArchiverConfig *blockarchiver.BlockArchiverConfig + blockArchiverService blockarchiver.BlockArchiver } // NewBlockChain returns a fully initialised block chain using information @@ -367,11 +374,10 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis quit: make(chan struct{}), triesInMemory: cacheConfig.TriesInMemory, chainmu: syncx.NewClosableMutex(), - bodyCache: lru.NewCache[common.Hash, *types.Body](bodyCacheLimit), bodyRLPCache: lru.NewCache[common.Hash, rlp.RawValue](bodyCacheLimit), + blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), receiptsCache: lru.NewCache[common.Hash, []*types.Receipt](receiptsCacheLimit), sidecarsCache: lru.NewCache[common.Hash, types.BlobSidecars](sidecarsCacheLimit), - blockCache: lru.NewCache[common.Hash, *types.Block](blockCacheLimit), txLookupCache: lru.NewCache[common.Hash, txLookup](txLookupCacheLimit), futureBlocks: lru.NewCache[common.Hash, *types.Block](maxFutureBlocks), badBlockCache: lru.NewCache[common.Hash, time.Time](maxBadBlockLimit), @@ -394,14 +400,16 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis if err != nil { return nil, err } - bc.genesisBlock = bc.GetBlockByNumber(0) + genesisBlock := bc.GetBlockByNumber(0) + + bc.genesisBlock = genesisBlock if bc.genesisBlock == nil { return nil, ErrNoGenesis } bc.highestVerifiedHeader.Store(nil) - bc.currentBlock.Store(nil) - bc.currentSnapBlock.Store(nil) + bc.currentBlock.Store(genesisBlock.Header()) + bc.currentSnapBlock.Store(genesisBlock.Header()) bc.chasingHead.Store(nil) // Update chain info data metrics @@ -413,131 +421,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis if bc.empty() { rawdb.InitDatabaseFromFreezer(bc.db) } - // Load blockchain states from disk - if err := bc.loadLastState(); err != nil { - return nil, err - } - // Make sure the state associated with the block is available, or log out - // if there is no available state, waiting for state sync. - head := bc.CurrentBlock() - if !bc.HasState(head.Root) { - if head.Number.Uint64() == 0 { - // The genesis state is missing, which is only possible in the path-based - // scheme. This situation occurs when the initial state sync is not finished - // yet, or the chain head is rewound below the pivot point. In both scenarios, - // there is no possible recovery approach except for rerunning a snap sync. - // Do nothing here until the state syncer picks it up. - log.Info("Genesis state is missing, wait state sync") - } else { - // Head state is missing, before the state recovery, find out the - // disk layer point of snapshot(if it's enabled). Make sure the - // rewound point is lower than disk layer. - var diskRoot common.Hash - if bc.cacheConfig.SnapshotLimit > 0 { - diskRoot = rawdb.ReadSnapshotRoot(bc.db) - } - if bc.triedb.Scheme() == rawdb.PathScheme && !bc.NoTries() { - recoverable, _ := bc.triedb.Recoverable(diskRoot) - if !bc.HasState(diskRoot) && !recoverable { - diskRoot = bc.triedb.Head() - } - } - if diskRoot != (common.Hash{}) { - log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash(), "diskRoot", diskRoot) - - snapDisk, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, diskRoot, true) - if err != nil { - return nil, err - } - // Chain rewound, persist old snapshot number to indicate recovery procedure - if snapDisk != 0 { - rawdb.WriteSnapshotRecoveryNumber(bc.db, snapDisk) - } - } else { - log.Warn("Head state missing, repairing", "number", head.Number, "hash", head.Hash()) - if _, err := bc.setHeadBeyondRoot(head.Number.Uint64(), 0, common.Hash{}, true); err != nil { - return nil, err - } - } - } - } - // Ensure that a previous crash in SetHead doesn't leave extra ancients - if frozen, err := bc.db.ItemAmountInAncient(); err == nil && frozen > 0 { - frozen, err = bc.db.Ancients() - if err != nil { - return nil, err - } - var ( - needRewind bool - low uint64 - ) - // The head full block may be rolled back to a very low height due to - // blockchain repair. If the head full block is even lower than the ancient - // chain, truncate the ancient store. - fullBlock := bc.CurrentBlock() - if fullBlock != nil && fullBlock.Hash() != bc.genesisBlock.Hash() && fullBlock.Number.Uint64() < frozen-1 { - needRewind = true - low = fullBlock.Number.Uint64() - } - // In snap sync, it may happen that ancient data has been written to the - // ancient store, but the LastFastBlock has not been updated, truncate the - // extra data here. - snapBlock := bc.CurrentSnapBlock() - if snapBlock != nil && snapBlock.Number.Uint64() < frozen-1 { - needRewind = true - if snapBlock.Number.Uint64() < low || low == 0 { - low = snapBlock.Number.Uint64() - } - } - if needRewind { - log.Error("Truncating ancient chain", "from", bc.CurrentHeader().Number.Uint64(), "to", low) - if err := bc.SetHead(low); err != nil { - return nil, err - } - } - } - // The first thing the node will do is reconstruct the verification data for - // the head block (ethash cache or clique voting snapshot). Might as well do - // it in advance. - bc.engine.VerifyHeader(bc, bc.CurrentHeader()) - - // Check the current state of the block hashes and make sure that we do not have any of the bad blocks in our chain - for hash := range BadHashes { - if header := bc.GetHeaderByHash(hash); header != nil { - // get the canonical block corresponding to the offending header's number - headerByNumber := bc.GetHeaderByNumber(header.Number.Uint64()) - // make sure the headerByNumber (if present) is in our current canonical chain - if headerByNumber != nil && headerByNumber.Hash() == header.Hash() { - log.Error("Found bad hash, rewinding chain", "number", header.Number, "hash", header.ParentHash) - if err := bc.SetHead(header.Number.Uint64() - 1); err != nil { - return nil, err - } - log.Error("Chain rewind was successful, resuming normal operation") - } - } - } - // Load any existing snapshot, regenerating it if loading failed - if bc.cacheConfig.SnapshotLimit > 0 { - // If the chain was rewound past the snapshot persistent layer (causing - // a recovery block number to be persisted to disk), check if we're still - // in recovery mode and in that case, don't invalidate the snapshot on a - // head mismatch. - var recover bool - - head := bc.CurrentBlock() - if layer := rawdb.ReadSnapshotRecoveryNumber(bc.db); layer != nil && *layer >= head.Number.Uint64() { - log.Warn("Enabling snapshot recovery", "chainhead", head.Number, "diskbase", *layer) - recover = true - } - snapconfig := snapshot.Config{ - CacheSize: bc.cacheConfig.SnapshotLimit, - Recovery: recover, - NoBuild: bc.cacheConfig.SnapshotNoBuild, - AsyncBuild: !bc.cacheConfig.SnapshotWait, - } - bc.snaps, _ = snapshot.New(snapconfig, bc.db, bc.triedb, head.Root, int(bc.cacheConfig.TriesInMemory), bc.NoTries()) - } // do options before start any routine for _, option := range options { bc, err = option(bc) @@ -545,40 +429,37 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis return nil, err } } - // Start future block processor. - bc.wg.Add(1) - go bc.updateFutureBlocks() + // blockCacheSize is used when init the below caches + // bodyCache, hc.headerCache + // the size of the cache is set to the same value. + cacheSize := int(bc.blockArchiverConfig.BlockCacheSize) - // Need persist and prune diff layer - if bc.db.DiffStore() != nil { - bc.wg.Add(1) - go bc.trustedDiffLayerLoop() - } - if bc.pipeCommit { - // check current block and rewind invalid one - bc.wg.Add(1) - go bc.rewindInvalidHeaderBlockLoop() - } + bc.hc.headerCache = lru.NewCache[common.Hash, *types.Header](cacheSize) + bc.bodyCache = lru.NewCache[common.Hash, *types.Body](cacheSize) - if bc.doubleSignMonitor != nil { - bc.wg.Add(1) - go bc.startDoubleSignMonitor() + // block archiver service + blockArchiverService, err := blockarchiver.NewBlockArchiverService( + bc.blockArchiverConfig.RPCAddress, + bc.blockArchiverConfig.SPAddress, + bc.blockArchiverConfig.BucketName, + bc.bodyCache, + bc.hc.headerCache, + cacheSize, + ) + if err != nil { + return nil, err } - // Rewind the chain in case of an incompatible config upgrade. - if compat, ok := genesisErr.(*params.ConfigCompatError); ok { - log.Warn("Rewinding chain to upgrade configuration", "err", compat) - if compat.RewindToTime > 0 { - bc.SetHeadWithTimestamp(compat.RewindToTime) - } else { - bc.SetHead(compat.RewindToBlock) - } - rawdb.WriteChainConfig(db, genesisHash, chainConfig) - } - // Start tx indexer if it's enabled. - if txLookupLimit != nil { - bc.txIndexer = newTxIndexer(*txLookupLimit, bc) + bc.blockArchiverService = blockArchiverService + bc.hc.blockArchiverService = blockArchiverService + + // init the current header cache + if err = bc.updateCurrentHeader(); err != nil { + return nil, err } + // The header will be updated periodically in the background + go bc.UpdateCurrentHeaderLoop() + return bc, nil } @@ -3152,6 +3033,13 @@ func EnableDoubleSignChecker(bc *BlockChain) (*BlockChain, error) { return bc, nil } +func EnableBlockArchiverConfig(config *blockarchiver.BlockArchiverConfig) BlockChainOption { + return func(bc *BlockChain) (*BlockChain, error) { + bc.blockArchiverConfig = config + return bc, nil + } +} + func (bc *BlockChain) GetVerifyResult(blockNumber uint64, blockHash common.Hash, diffHash common.Hash) *VerifyResult { var res VerifyResult res.BlockNumber = blockNumber @@ -3277,3 +3165,37 @@ func (bc *BlockChain) SetTrieFlushInterval(interval time.Duration) { func (bc *BlockChain) GetTrieFlushInterval() time.Duration { return time.Duration(bc.flushInterval.Load()) } + +func (bc *BlockChain) UpdateCurrentHeaderLoop() { + ticket := time.NewTicker(30 * time.Second) + defer ticket.Stop() + for { + select { + case <-ticket.C: + err := bc.updateCurrentHeader() + if err != nil { + log.Error("failed to update current header", "err", err) + continue + } + case <-bc.quit: + return + } + } +} + +func (bc *BlockChain) updateCurrentHeader() error { + block, err := bc.blockArchiverService.GetLatestBlock() + if err != nil { + return err + } + header := block.Header() + + bc.hc.SetCurrentHeader(header) + + bc.currentBlock.Store(header) + headBlockGauge.Update(int64(header.Number.Uint64())) + + bc.hc.tdCache.Add(block.Hash(), block.TotalDifficulty) + log.Info("update current header", "number", header.Number, "hash", header.Hash()) + return nil +} diff --git a/core/blockchain_diff_test.go b/core/blockchain_diff_test.go deleted file mode 100644 index 8ec14bce43..0000000000 --- a/core/blockchain_diff_test.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Tests that abnormal program termination (i.e.crash) and restart doesn't leave -// the database in some strange state with gaps in the chain, nor with block data -// dangling in the future. - -package core - -import ( - "encoding/hex" - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/ethdb/memorydb" - "github.com/ethereum/go-ethereum/params" -) - -var ( - // testKey is a private key to use for funding a tester account. - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - contractCode, _ = hex.DecodeString("608060405260016000806101000a81548160ff02191690831515021790555034801561002a57600080fd5b506101688061003a6000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806389a2d8011461003b578063b0483f4814610059575b600080fd5b610043610075565b60405161005091906100f4565b60405180910390f35b610073600480360381019061006e91906100bc565b61008b565b005b60008060009054906101000a900460ff16905090565b806000806101000a81548160ff02191690831515021790555050565b6000813590506100b68161011b565b92915050565b6000602082840312156100ce57600080fd5b60006100dc848285016100a7565b91505092915050565b6100ee8161010f565b82525050565b600060208201905061010960008301846100e5565b92915050565b60008115159050919050565b6101248161010f565b811461012f57600080fd5b5056fea264697066735822122092f788b569bfc3786e90601b5dbec01cfc3d76094164fd66ca7d599c4239fc5164736f6c63430008000033") - contractAddr = common.HexToAddress("0xe74a3c7427cda785e0000d42a705b1f3fd371e09") - contractData1, _ = hex.DecodeString("b0483f480000000000000000000000000000000000000000000000000000000000000000") - contractData2, _ = hex.DecodeString("b0483f480000000000000000000000000000000000000000000000000000000000000001") - commonGas = 192138 - // testAddr is the Ethereum address of the tester account. - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) - - // testBlocks is the test parameters array for specific blocks. - testBlocks = []testBlockParam{ - { - // This txs params also used to default block. - blockNr: 11, - txs: []testTransactionParam{ - { - to: &common.Address{0x01}, - value: big.NewInt(1), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - }, - }, - { - blockNr: 12, - txs: []testTransactionParam{ - { - to: &common.Address{0x01}, - value: big.NewInt(1), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - { - to: &common.Address{0x02}, - value: big.NewInt(2), - gasPrice: big.NewInt(params.InitialBaseFee + 1), - data: nil, - }, - { - to: nil, - value: big.NewInt(0), - gasPrice: big.NewInt(params.InitialBaseFee + 1), - data: contractCode, - }, - }, - }, - { - blockNr: 13, - txs: []testTransactionParam{ - { - to: &common.Address{0x01}, - value: big.NewInt(1), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - { - to: &common.Address{0x02}, - value: big.NewInt(2), - gasPrice: big.NewInt(params.InitialBaseFee + 1), - data: nil, - }, - { - to: &common.Address{0x03}, - value: big.NewInt(3), - gasPrice: big.NewInt(params.InitialBaseFee + 2), - data: nil, - }, - { - to: &contractAddr, - value: big.NewInt(0), - gasPrice: big.NewInt(params.InitialBaseFee + 2), - data: contractData1, - }, - }, - }, - { - blockNr: 14, - txs: []testTransactionParam{ - { - to: &contractAddr, - value: big.NewInt(0), - gasPrice: big.NewInt(params.InitialBaseFee + 2), - data: contractData2, - }, - }, - }, - { - blockNr: 15, - txs: []testTransactionParam{}, - }, - } -) - -type testTransactionParam struct { - to *common.Address - value *big.Int - gasPrice *big.Int - data []byte -} - -type testBlockParam struct { - blockNr int - txs []testTransactionParam -} - -// testBackend is a mock implementation of the live Ethereum message handler. Its -// purpose is to allow testing the request/reply workflows and wire serialization -// in the `eth` protocol without actually doing any data processing. -type testBackend struct { - db ethdb.Database - chain *BlockChain -} - -// newTestBackend creates an empty chain and wraps it into a mock backend. -func newTestBackend(blocks int, light bool) *testBackend { - return newTestBackendWithGenerator(blocks, light) -} - -// newTestBackend creates a chain with a number of explicitly defined blocks and -// wraps it into a mock backend. -func newTestBackendWithGenerator(blocks int, lightProcess bool) *testBackend { - signer := types.HomesteadSigner{} - // Create a database pre-initialize with a genesis block - db := rawdb.NewMemoryDatabase() - db.SetDiffStore(memorydb.New()) - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - chain, _ := NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil, EnablePersistDiff(860000)) - generator := func(i int, block *BlockGen) { - // The chain maker doesn't have access to a chain, so the difficulty will be - // lets unset (nil). Set it here to the correct value. - block.SetCoinbase(testAddr) - - for idx, testBlock := range testBlocks { - // Specific block setting, the index in this generator has 1 diff from specified blockNr. - if i+1 == testBlock.blockNr { - for _, testTransaction := range testBlock.txs { - var transaction *types.Transaction - if testTransaction.to == nil { - transaction = types.NewContractCreation(block.TxNonce(testAddr), - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data) - } else { - transaction = types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to, - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data) - } - tx, err := types.SignTx(transaction, signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain, tx) - } - break - } - - // Default block setting. - if idx == len(testBlocks)-1 { - // We want to simulate an empty middle block, having the same state as the - // first one. The last is needs a state change again to force a reorg. - for _, testTransaction := range testBlocks[0].txs { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to, - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data), signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain, tx) - } - } - } - } - bs, _ := GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, generator) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - - return &testBackend{ - db: db, - chain: chain, - } -} - -// close tears down the transaction pool and chain behind the mock backend. -func (b *testBackend) close() { - b.chain.Stop() -} - -func (b *testBackend) Chain() *BlockChain { return b.chain } - -func TestFreezeDiffLayer(t *testing.T) { - blockNum := 1024 - fullBackend := newTestBackend(blockNum, true) - defer fullBackend.close() - for len(fullBackend.chain.diffQueueBuffer) > 0 { - // Wait for the buffer to be zero. - } - // Minus one empty block. - if fullBackend.chain.diffQueue.Size() > blockNum-1 && fullBackend.chain.diffQueue.Size() < blockNum-2 { - t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum-1, fullBackend.chain.diffQueue.Size()) - } - - time.Sleep(diffLayerFreezerRecheckInterval + 2*time.Second) - if fullBackend.chain.diffQueue.Size() != int(fullBackend.chain.triesInMemory) { - t.Errorf("size of diff queue is wrong, expected: %d, get: %d", blockNum, fullBackend.chain.diffQueue.Size()) - } - - block := fullBackend.chain.GetBlockByNumber(uint64(blockNum / 2)) - diffStore := fullBackend.chain.db.DiffStore() - rawData := rawdb.ReadDiffLayerRLP(diffStore, block.Hash()) - if len(rawData) == 0 { - t.Error("do not find diff layer in db") - } -} - -// newTwoForkedBlockchains returns two blockchains, these two chains are generated by different -// generators, they have some same parent blocks, the number of same blocks are determined by -// testBlocks, once chain1 inserted a non-default block, chain1 and chain2 get forked. -func newTwoForkedBlockchains(len1, len2 int) (chain1 *BlockChain, chain2 *BlockChain) { - signer := types.HomesteadSigner{} - // Create a database pre-initialize with a genesis block - db1 := rawdb.NewMemoryDatabase() - db1.SetDiffStore(memorydb.New()) - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - engine1 := ethash.NewFaker() - chain1, _ = NewBlockChain(db1, nil, gspec, nil, engine1, vm.Config{}, nil, nil, EnablePersistDiff(860000), EnableBlockValidator(params.TestChainConfig, engine1, 0, nil)) - generator1 := func(i int, block *BlockGen) { - // The chain maker doesn't have access to a chain, so the difficulty will be - // lets unset (nil). Set it here to the correct value. - block.SetCoinbase(testAddr) - - for idx, testBlock := range testBlocks { - // Specific block setting, the index in this generator has 1 diff from specified blockNr. - if i+1 == testBlock.blockNr { - for _, testTransaction := range testBlock.txs { - var transaction *types.Transaction - if testTransaction.to == nil { - transaction = types.NewContractCreation(block.TxNonce(testAddr), - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data) - } else { - transaction = types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to, - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data) - } - tx, err := types.SignTx(transaction, signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain1, tx) - } - break - } - - // Default block setting. - if idx == len(testBlocks)-1 { - // We want to simulate an empty middle block, having the same state as the - // first one. The last is needs a state change again to force a reorg. - for _, testTransaction := range testBlocks[0].txs { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to, - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data), signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain1, tx) - } - } - } - } - bs1, _ := GenerateChain(params.TestChainConfig, chain1.Genesis(), ethash.NewFaker(), db1, len1, generator1) - if _, err := chain1.InsertChain(bs1); err != nil { - panic(err) - } - waitDifflayerCached(chain1, bs1) - - // Create a database pre-initialize with a genesis block - db2 := rawdb.NewMemoryDatabase() - db2.SetDiffStore(memorydb.New()) - gspec2 := &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - engine2 := ethash.NewFaker() - chain2, _ = NewBlockChain(db2, nil, gspec2, nil, ethash.NewFaker(), vm.Config{}, nil, nil, EnablePersistDiff(860000), EnableBlockValidator(params.TestChainConfig, engine2, 0, nil)) - generator2 := func(i int, block *BlockGen) { - // The chain maker doesn't have access to a chain, so the difficulty will be - // lets unset (nil). Set it here to the correct value. - block.SetCoinbase(testAddr) - // We want to simulate an empty middle block, having the same state as the - // first one. The last is needs a state change again to force a reorg. - for _, testTransaction := range testBlocks[0].txs { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to, - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data), signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain1, tx) - } - } - bs2, _ := GenerateChain(params.TestChainConfig, chain2.Genesis(), ethash.NewFaker(), db2, len2, generator2) - if _, err := chain2.InsertChain(bs2); err != nil { - panic(err) - } - waitDifflayerCached(chain2, bs2) - - return chain1, chain2 -} - -func waitDifflayerCached(chain *BlockChain, bs types.Blocks) { - for _, block := range bs { - // wait for all difflayers to be cached - for block.Header().TxHash != types.EmptyRootHash && - chain.GetTrustedDiffLayer(block.Hash()) == nil { - time.Sleep(time.Second) - } - } -} - -func testGetRootByDiffHash(t *testing.T, chain1, chain2 *BlockChain, blockNumber uint64, status types.VerifyStatus) { - block2 := chain2.GetBlockByNumber(blockNumber) - if block2 == nil { - t.Fatalf("failed to find block, number: %v", blockNumber) - } - expect := VerifyResult{ - Status: status, - BlockNumber: blockNumber, - BlockHash: block2.Hash(), - } - if status.Code&0xff00 == types.StatusVerified.Code { - expect.Root = block2.Root() - } - - diffLayer2 := chain2.GetTrustedDiffLayer(block2.Hash()) - if diffLayer2 == nil { - t.Fatal("failed to find diff layer") - } - diffHash2 := types.EmptyRootHash - if status != types.StatusDiffHashMismatch { - var err error - diffHash2, err = CalculateDiffHash(diffLayer2) - if err != nil { - t.Fatalf("failed to compute diff hash: %v", err) - } - } - - if status == types.StatusPartiallyVerified { - block1 := chain1.GetBlockByNumber(blockNumber) - if block1 == nil { - t.Fatalf("failed to find block, number: %v", blockNumber) - } - chain1.diffLayerCache.Remove(block1.Hash()) - } - - result := chain1.GetVerifyResult(blockNumber, block2.Hash(), diffHash2) - if result.Status != expect.Status { - t.Fatalf("failed to verify block, number: %v, expect status: %v, real status: %v", blockNumber, expect.Status, result.Status) - } - if result.Root != expect.Root { - t.Fatalf("failed to verify block, number: %v, expect root: %v, real root: %v", blockNumber, expect.Root, result.Root) - } -} - -func TestGetRootByDiffHash(t *testing.T) { - len1 := 23 // length of blockchain1 - len2 := 35 // length of blockchain2 - plen := 11 // length of same parent blocks, which determined by testBlocks. - - chain1, chain2 := newTwoForkedBlockchains(len1, len2) - defer chain1.Stop() - defer chain2.Stop() - - hash1 := chain1.GetBlockByNumber(uint64(plen)).Hash() - hash2 := chain2.GetBlockByNumber(uint64(plen)).Hash() - if hash1 != hash2 { - t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", plen, hash2, hash1) - } - - testGetRootByDiffHash(t, chain1, chain2, 10, types.StatusFullVerified) - testGetRootByDiffHash(t, chain1, chain2, 2, types.StatusPartiallyVerified) - testGetRootByDiffHash(t, chain1, chain2, 10, types.StatusDiffHashMismatch) - testGetRootByDiffHash(t, chain1, chain2, 12, types.StatusImpossibleFork) - testGetRootByDiffHash(t, chain1, chain2, 20, types.StatusPossibleFork) - testGetRootByDiffHash(t, chain1, chain2, 24, types.StatusBlockNewer) - testGetRootByDiffHash(t, chain1, chain2, 35, types.StatusBlockTooNew) -} diff --git a/core/blockchain_notries_test.go b/core/blockchain_notries_test.go deleted file mode 100644 index 57b150701a..0000000000 --- a/core/blockchain_notries_test.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Tests that abnormal program termination (i.e.crash) and restart doesn't leave -// the database in some strange state with gaps in the chain, nor with block data -// dangling in the future. - -package core - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/ethdb/memorydb" - "github.com/ethereum/go-ethereum/params" -) - -func newMockVerifyPeer() *mockVerifyPeer { - return &mockVerifyPeer{} -} - -type requestRoot struct { - blockNumber uint64 - blockHash common.Hash - diffHash common.Hash -} - -type verifFailedStatus struct { - status types.VerifyStatus - blockNumber uint64 -} - -// mockVerifyPeer is a mocking struct that simulates p2p signals for verification tasks. -type mockVerifyPeer struct { - callback func(*requestRoot) -} - -func (peer *mockVerifyPeer) setCallBack(callback func(*requestRoot)) { - peer.callback = callback -} - -func (peer *mockVerifyPeer) RequestRoot(blockNumber uint64, blockHash common.Hash, diffHash common.Hash) error { - if peer.callback != nil { - peer.callback(&requestRoot{blockNumber, blockHash, diffHash}) - } - return nil -} - -func (peer *mockVerifyPeer) ID() string { - return "mock_peer" -} - -type mockVerifyPeers struct { - peers []VerifyPeer -} - -func (peers *mockVerifyPeers) GetVerifyPeers() []VerifyPeer { - return peers.peers -} - -func newMockRemoteVerifyPeer(peers []VerifyPeer) *mockVerifyPeers { - return &mockVerifyPeers{peers} -} - -func makeTestBackendWithRemoteValidator(blocks int, mode VerifyMode, failed *verifFailedStatus) (*testBackend, *testBackend, []*types.Block, error) { - signer := types.HomesteadSigner{} - - // Create a database pre-initialize with a genesis block - db := rawdb.NewMemoryDatabase() - db.SetDiffStore(memorydb.New()) - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}}, - } - engine := ethash.NewFaker() - - db2 := rawdb.NewMemoryDatabase() - db2.SetDiffStore(memorydb.New()) - gspec2 := &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}}, - } - engine2 := ethash.NewFaker() - - peer := newMockVerifyPeer() - peers := []VerifyPeer{peer} - - verifier, err := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, - nil, nil, EnablePersistDiff(100000), EnableBlockValidator(params.TestChainConfig, engine2, LocalVerify, nil)) - if err != nil { - return nil, nil, nil, err - } - - fastnode, err := NewBlockChain(db2, nil, gspec2, nil, engine2, vm.Config{}, - nil, nil, EnableBlockValidator(params.TestChainConfig, engine2, mode, newMockRemoteVerifyPeer(peers))) - if err != nil { - return nil, nil, nil, err - } - - generator := func(i int, block *BlockGen) { - // The chain maker doesn't have access to a chain, so the difficulty will be - // lets unset (nil). Set it here to the correct value. - block.SetCoinbase(testAddr) - - for idx, testBlock := range testBlocks { - // Specific block setting, the index in this generator has 1 diff from specified blockNr. - if i+1 == testBlock.blockNr { - for _, testTransaction := range testBlock.txs { - var transaction *types.Transaction - if testTransaction.to == nil { - transaction = types.NewContractCreation(block.TxNonce(testAddr), - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data) - } else { - transaction = types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to, - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data) - } - tx, err := types.SignTx(transaction, signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(verifier, tx) - } - break - } - - // Default block setting. - if idx == len(testBlocks)-1 { - // We want to simulate an empty middle block, having the same state as the - // first one. The last is needs a state change again to force a reorg. - for _, testTransaction := range testBlocks[0].txs { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), *testTransaction.to, - testTransaction.value, uint64(commonGas), testTransaction.gasPrice, testTransaction.data), signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(verifier, tx) - } - } - } - } - peer.setCallBack(func(req *requestRoot) { - if fastnode.validator != nil && fastnode.validator.RemoteVerifyManager() != nil { - resp := verifier.GetVerifyResult(req.blockNumber, req.blockHash, req.diffHash) - if failed != nil && req.blockNumber == failed.blockNumber { - resp.Status = failed.status - } - fastnode.validator.RemoteVerifyManager(). - HandleRootResponse( - resp, peer.ID()) - } - }) - - bs, _ := GenerateChain(params.TestChainConfig, verifier.Genesis(), ethash.NewFaker(), db, blocks, generator) - if _, err := verifier.InsertChain(bs); err != nil { - return nil, nil, nil, err - } - waitDifflayerCached(verifier, bs) - - return &testBackend{ - db: db, - chain: verifier, - }, - &testBackend{ - db: db2, - chain: fastnode, - }, bs, nil -} - -func TestFastNode(t *testing.T) { - // test full mode and succeed - _, fastnode, blocks, err := makeTestBackendWithRemoteValidator(2048, FullVerify, nil) - if err != nil { - t.Fatalf(err.Error()) - } - _, err = fastnode.chain.InsertChain(blocks) - if err != nil { - t.Fatalf(err.Error()) - } - // test full mode and failed - failed := &verifFailedStatus{status: types.StatusDiffHashMismatch, blockNumber: 204} - _, fastnode, blocks, err = makeTestBackendWithRemoteValidator(2048, FullVerify, failed) - if err != nil { - t.Fatalf(err.Error()) - } - _, err = fastnode.chain.InsertChain(blocks) - if err == nil || fastnode.chain.CurrentBlock().Number.Uint64() != failed.blockNumber+10 { - t.Fatalf("blocks insert should be failed at height %d", failed.blockNumber+11) - } - // test insecure mode and succeed - _, fastnode, blocks, err = makeTestBackendWithRemoteValidator(2048, InsecureVerify, nil) - if err != nil { - t.Fatalf(err.Error()) - } - _, err = fastnode.chain.InsertChain(blocks) - if err != nil { - t.Fatalf(err.Error()) - } - // test insecure mode and failed - failed = &verifFailedStatus{status: types.StatusImpossibleFork, blockNumber: 204} - _, fastnode, blocks, err = makeTestBackendWithRemoteValidator(2048, FullVerify, failed) - if err != nil { - t.Fatalf(err.Error()) - } - _, err = fastnode.chain.InsertChain(blocks) - if err == nil || fastnode.chain.CurrentBlock().Number.Uint64() != failed.blockNumber+10 { - t.Fatalf("blocks insert should be failed at height %d", failed.blockNumber+11) - } -} diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index d440590b8b..f48786974e 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -113,20 +113,8 @@ func (bc *BlockChain) GetHeadersFrom(number, count uint64) []rlp.RawValue { // GetBody retrieves a block body (transactions and uncles) from the database by // hash, caching it if found. func (bc *BlockChain) GetBody(hash common.Hash) *types.Body { - // Short circuit if the body's already in the cache, retrieve otherwise - if cached, ok := bc.bodyCache.Get(hash); ok { - return cached - } - number := bc.hc.GetBlockNumber(hash) - if number == nil { - return nil - } - body := rawdb.ReadBody(bc.db, hash, *number) - if body == nil { - return nil - } - // Cache the found body for next time and return - bc.bodyCache.Add(hash, body) + // get from block archiver, and the bodyCache will be updated there + body, _, _ := bc.blockArchiverService.GetBlockByHash(hash) return body } diff --git a/core/blockchain_repair_test.go b/core/blockchain_repair_test.go deleted file mode 100644 index 3cfcdafe4a..0000000000 --- a/core/blockchain_repair_test.go +++ /dev/null @@ -1,2018 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Tests that abnormal program termination (i.e.crash) and restart doesn't leave -// the database in some strange state with gaps in the chain, nor with block data -// dangling in the future. - -package core - -import ( - "math/big" - "path" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/params" -) - -// Tests a recovery for a short canonical chain where a recent block was already -// committed to disk and then the process crashed. In this case we expect the full -// chain to be rolled back to the committed block, but the chain data itself left -// in the database for replaying. -func TestShortRepair(t *testing.T) { testShortRepair(t, false) } -func TestShortRepairWithSnapshots(t *testing.T) { testShortRepair(t, true) } - -func testShortRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Frozen: none - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 8, - expSidechainBlocks: 0, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a short canonical chain where the fast sync pivot point was -// already committed, after which the process crashed. In this case we expect the full -// chain to be rolled back to the committed block, but the chain data itself left in -// the database for replaying. -func TestShortSnapSyncedRepair(t *testing.T) { testShortSnapSyncedRepair(t, false) } -func TestShortSnapSyncedRepairWithSnapshots(t *testing.T) { testShortSnapSyncedRepair(t, true) } - -func testShortSnapSyncedRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Frozen: none - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 8, - expSidechainBlocks: 0, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a short canonical chain where the fast sync pivot point was -// not yet committed, but the process crashed. In this case we expect the chain to -// detect that it was fast syncing and not delete anything, since we can just pick -// up directly where we left off. -func TestShortSnapSyncingRepair(t *testing.T) { testShortSnapSyncingRepair(t, false) } -func TestShortSnapSyncingRepairWithSnapshots(t *testing.T) { testShortSnapSyncingRepair(t, true) } - -func testShortSnapSyncingRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Frozen: none - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 8, - expSidechainBlocks: 0, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a shorter side chain, where a -// recent block was already committed to disk and then the process crashed. In this -// test scenario the side chain is below the committed block. In this case we expect -// the canonical chain to be rolled back to the committed block, but the chain data -// itself left in the database for replaying. -func TestShortOldForkedRepair(t *testing.T) { testShortOldForkedRepair(t, false) } -func TestShortOldForkedRepairWithSnapshots(t *testing.T) { testShortOldForkedRepair(t, true) } - -func testShortOldForkedRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3 - // - // Frozen: none - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 8, - expSidechainBlocks: 3, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a shorter side chain, where -// the fast sync pivot point was already committed to disk and then the process -// crashed. In this test scenario the side chain is below the committed block. In -// this case we expect the canonical chain to be rolled back to the committed block, -// but the chain data itself left in the database for replaying. -func TestShortOldForkedSnapSyncedRepair(t *testing.T) { - testShortOldForkedSnapSyncedRepair(t, false) -} -func TestShortOldForkedSnapSyncedRepairWithSnapshots(t *testing.T) { - testShortOldForkedSnapSyncedRepair(t, true) -} - -func testShortOldForkedSnapSyncedRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3 - // - // Frozen: none - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 8, - expSidechainBlocks: 3, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a shorter side chain, where -// the fast sync pivot point was not yet committed, but the process crashed. In this -// test scenario the side chain is below the committed block. In this case we expect -// the chain to detect that it was fast syncing and not delete anything, since we -// can just pick up directly where we left off. -func TestShortOldForkedSnapSyncingRepair(t *testing.T) { - testShortOldForkedSnapSyncingRepair(t, false) -} -func TestShortOldForkedSnapSyncingRepairWithSnapshots(t *testing.T) { - testShortOldForkedSnapSyncingRepair(t, true) -} - -func testShortOldForkedSnapSyncingRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3 - // - // Frozen: none - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 8, - expSidechainBlocks: 3, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a shorter side chain, where a -// recent block was already committed to disk and then the process crashed. In this -// test scenario the side chain reaches above the committed block. In this case we -// expect the canonical chain to be rolled back to the committed block, but the -// chain data itself left in the database for replaying. -func TestShortNewlyForkedRepair(t *testing.T) { testShortNewlyForkedRepair(t, false) } -func TestShortNewlyForkedRepairWithSnapshots(t *testing.T) { testShortNewlyForkedRepair(t, true) } - -func testShortNewlyForkedRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6 - // - // Frozen: none - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3->S4->S5->S6 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 6, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 8, - expSidechainBlocks: 6, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a shorter side chain, where -// the fast sync pivot point was already committed to disk and then the process -// crashed. In this test scenario the side chain reaches above the committed block. -// In this case we expect the canonical chain to be rolled back to the committed -// block, but the chain data itself left in the database for replaying. -func TestShortNewlyForkedSnapSyncedRepair(t *testing.T) { - testShortNewlyForkedSnapSyncedRepair(t, false) -} -func TestShortNewlyForkedSnapSyncedRepairWithSnapshots(t *testing.T) { - testShortNewlyForkedSnapSyncedRepair(t, true) -} - -func testShortNewlyForkedSnapSyncedRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6 - // - // Frozen: none - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3->S4->S5->S6 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 6, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 8, - expSidechainBlocks: 6, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a shorter side chain, where -// the fast sync pivot point was not yet committed, but the process crashed. In -// this test scenario the side chain reaches above the committed block. In this -// case we expect the chain to detect that it was fast syncing and not delete -// anything, since we can just pick up directly where we left off. -func TestShortNewlyForkedSnapSyncingRepair(t *testing.T) { - testShortNewlyForkedSnapSyncingRepair(t, false) -} -func TestShortNewlyForkedSnapSyncingRepairWithSnapshots(t *testing.T) { - testShortNewlyForkedSnapSyncingRepair(t, true) -} - -func testShortNewlyForkedSnapSyncingRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6 - // - // Frozen: none - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3->S4->S5->S6 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 6, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 8, - expSidechainBlocks: 6, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a longer side chain, where a -// recent block was already committed to disk and then the process crashed. In this -// case we expect the canonical chain to be rolled back to the committed block, but -// the chain data itself left in the database for replaying. -func TestShortReorgedRepair(t *testing.T) { testShortReorgedRepair(t, false) } -func TestShortReorgedRepairWithSnapshots(t *testing.T) { testShortReorgedRepair(t, true) } - -func testShortReorgedRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Frozen: none - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 10, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 8, - expSidechainBlocks: 10, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a longer side chain, where -// the fast sync pivot point was already committed to disk and then the process -// crashed. In this case we expect the canonical chain to be rolled back to the -// committed block, but the chain data itself left in the database for replaying. -func TestShortReorgedSnapSyncedRepair(t *testing.T) { - testShortReorgedSnapSyncedRepair(t, false) -} -func TestShortReorgedSnapSyncedRepairWithSnapshots(t *testing.T) { - testShortReorgedSnapSyncedRepair(t, true) -} - -func testShortReorgedSnapSyncedRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Frozen: none - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 10, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 8, - expSidechainBlocks: 10, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a short canonical chain and a longer side chain, where -// the fast sync pivot point was not yet committed, but the process crashed. In -// this case we expect the chain to detect that it was fast syncing and not delete -// anything, since we can just pick up directly where we left off. -func TestShortReorgedSnapSyncingRepair(t *testing.T) { - testShortReorgedSnapSyncingRepair(t, false) -} -func TestShortReorgedSnapSyncingRepairWithSnapshots(t *testing.T) { - testShortReorgedSnapSyncingRepair(t, true) -} - -func testShortReorgedSnapSyncingRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Frozen: none - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 10, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 8, - expSidechainBlocks: 10, - expFrozen: 0, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks where a recent -// block - newer than the ancient limit - was already committed to disk and then -// the process crashed. In this case we expect the chain to be rolled back to the -// committed block, with everything afterwards kept as fast sync data. -func TestLongShallowRepair(t *testing.T) { testLongShallowRepair(t, false) } -func TestLongShallowRepairWithSnapshots(t *testing.T) { testLongShallowRepair(t, true) } - -func testLongShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks where a recent -// block - older than the ancient limit - was already committed to disk and then -// the process crashed. In this case we expect the chain to be rolled back to the -// committed block, with everything afterwards deleted. -func TestLongDeepRepair(t *testing.T) { testLongDeepRepair(t, false) } -func TestLongDeepRepairWithSnapshots(t *testing.T) { testLongDeepRepair(t, true) } - -func testLongDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks where the fast -// sync pivot point - newer than the ancient limit - was already committed, after -// which the process crashed. In this case we expect the chain to be rolled back -// to the committed block, with everything afterwards kept as fast sync data. -func TestLongSnapSyncedShallowRepair(t *testing.T) { - testLongSnapSyncedShallowRepair(t, false) -} -func TestLongSnapSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongSnapSyncedShallowRepair(t, true) -} - -func testLongSnapSyncedShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks where the fast -// sync pivot point - older than the ancient limit - was already committed, after -// which the process crashed. In this case we expect the chain to be rolled back -// to the committed block, with everything afterwards deleted. -func TestLongSnapSyncedDeepRepair(t *testing.T) { testLongSnapSyncedDeepRepair(t, false) } -func TestLongSnapSyncedDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncedDeepRepair(t, true) } - -func testLongSnapSyncedDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks where the fast -// sync pivot point - older than the ancient limit - was not yet committed, but the -// process crashed. In this case we expect the chain to detect that it was fast -// syncing and not delete anything, since we can just pick up directly where we -// left off. -func TestLongSnapSyncingShallowRepair(t *testing.T) { - testLongSnapSyncingShallowRepair(t, false) -} -func TestLongSnapSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongSnapSyncingShallowRepair(t, true) -} - -func testLongSnapSyncingShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // - // Frozen: - // G->C1->C2 - // - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks where the fast -// sync pivot point - newer than the ancient limit - was not yet committed, but the -// process crashed. In this case we expect the chain to detect that it was fast -// syncing and not delete anything, since we can just pick up directly where we -// left off. -func TestLongSnapSyncingDeepRepair(t *testing.T) { testLongSnapSyncingDeepRepair(t, false) } -func TestLongSnapSyncingDeepRepairWithSnapshots(t *testing.T) { testLongSnapSyncingDeepRepair(t, true) } - -func testLongSnapSyncingDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected in leveldb: - // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 - // - // Expected head header : C24 - // Expected head fast block: C24 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 24, - expSidechainBlocks: 0, - expFrozen: 9, - expHeadHeader: 24, - expHeadFastBlock: 24, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where a recent block - newer than the ancient limit - was already -// committed to disk and then the process crashed. In this test scenario the side -// chain is below the committed block. In this case we expect the chain to be -// rolled back to the committed block, with everything afterwards kept as fast -// sync data; the side chain completely nuked by the freezer. -func TestLongOldForkedShallowRepair(t *testing.T) { - testLongOldForkedShallowRepair(t, false) -} -func TestLongOldForkedShallowRepairWithSnapshots(t *testing.T) { - testLongOldForkedShallowRepair(t, true) -} - -func testLongOldForkedShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where a recent block - older than the ancient limit - was already -// committed to disk and then the process crashed. In this test scenario the side -// chain is below the committed block. In this case we expect the canonical chain -// to be rolled back to the committed block, with everything afterwards deleted; -// the side chain completely nuked by the freezer. -func TestLongOldForkedDeepRepair(t *testing.T) { testLongOldForkedDeepRepair(t, false) } -func TestLongOldForkedDeepRepairWithSnapshots(t *testing.T) { testLongOldForkedDeepRepair(t, true) } - -func testLongOldForkedDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was already committed to disk and then the process crashed. In this test scenario -// the side chain is below the committed block. In this case we expect the chain -// to be rolled back to the committed block, with everything afterwards kept as -// fast sync data; the side chain completely nuked by the freezer. -func TestLongOldForkedSnapSyncedShallowRepair(t *testing.T) { - testLongOldForkedSnapSyncedShallowRepair(t, false) -} -func TestLongOldForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongOldForkedSnapSyncedShallowRepair(t, true) -} - -func testLongOldForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was already committed to disk and then the process crashed. In this test scenario -// the side chain is below the committed block. In this case we expect the canonical -// chain to be rolled back to the committed block, with everything afterwards deleted; -// the side chain completely nuked by the freezer. -func TestLongOldForkedSnapSyncedDeepRepair(t *testing.T) { - testLongOldForkedSnapSyncedDeepRepair(t, false) -} -func TestLongOldForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongOldForkedSnapSyncedDeepRepair(t, true) -} - -func testLongOldForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was not yet committed, but the process crashed. In this test scenario the side -// chain is below the committed block. In this case we expect the chain to detect -// that it was fast syncing and not delete anything. The side chain is completely -// nuked by the freezer. -func TestLongOldForkedSnapSyncingShallowRepair(t *testing.T) { - testLongOldForkedSnapSyncingShallowRepair(t, false) -} -func TestLongOldForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongOldForkedSnapSyncingShallowRepair(t, true) -} - -func testLongOldForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2 - // - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was not yet committed, but the process crashed. In this test scenario the side -// chain is below the committed block. In this case we expect the chain to detect -// that it was fast syncing and not delete anything. The side chain is completely -// nuked by the freezer. -func TestLongOldForkedSnapSyncingDeepRepair(t *testing.T) { - testLongOldForkedSnapSyncingDeepRepair(t, false) -} -func TestLongOldForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongOldForkedSnapSyncingDeepRepair(t, true) -} - -func testLongOldForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected in leveldb: - // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 - // - // Expected head header : C24 - // Expected head fast block: C24 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 24, - expSidechainBlocks: 0, - expFrozen: 9, - expHeadHeader: 24, - expHeadFastBlock: 24, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where a recent block - newer than the ancient limit - was already -// committed to disk and then the process crashed. In this test scenario the side -// chain is above the committed block. In this case we expect the chain to be -// rolled back to the committed block, with everything afterwards kept as fast -// sync data; the side chain completely nuked by the freezer. -func TestLongNewerForkedShallowRepair(t *testing.T) { - testLongNewerForkedShallowRepair(t, false) -} -func TestLongNewerForkedShallowRepairWithSnapshots(t *testing.T) { - testLongNewerForkedShallowRepair(t, true) -} - -func testLongNewerForkedShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where a recent block - older than the ancient limit - was already -// committed to disk and then the process crashed. In this test scenario the side -// chain is above the committed block. In this case we expect the canonical chain -// to be rolled back to the committed block, with everything afterwards deleted; -// the side chain completely nuked by the freezer. -func TestLongNewerForkedDeepRepair(t *testing.T) { testLongNewerForkedDeepRepair(t, false) } -func TestLongNewerForkedDeepRepairWithSnapshots(t *testing.T) { testLongNewerForkedDeepRepair(t, true) } - -func testLongNewerForkedDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was already committed to disk and then the process crashed. In this test scenario -// the side chain is above the committed block. In this case we expect the chain -// to be rolled back to the committed block, with everything afterwards kept as fast -// sync data; the side chain completely nuked by the freezer. -func TestLongNewerForkedSnapSyncedShallowRepair(t *testing.T) { - testLongNewerForkedSnapSyncedShallowRepair(t, false) -} -func TestLongNewerForkedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongNewerForkedSnapSyncedShallowRepair(t, true) -} - -func testLongNewerForkedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was already committed to disk and then the process crashed. In this test scenario -// the side chain is above the committed block. In this case we expect the canonical -// chain to be rolled back to the committed block, with everything afterwards deleted; -// the side chain completely nuked by the freezer. -func TestLongNewerForkedSnapSyncedDeepRepair(t *testing.T) { - testLongNewerForkedSnapSyncedDeepRepair(t, false) -} -func TestLongNewerForkedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongNewerForkedSnapSyncedDeepRepair(t, true) -} - -func testLongNewerForkedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was not yet committed, but the process crashed. In this test scenario the side -// chain is above the committed block. In this case we expect the chain to detect -// that it was fast syncing and not delete anything. The side chain is completely -// nuked by the freezer. -func TestLongNewerForkedSnapSyncingShallowRepair(t *testing.T) { - testLongNewerForkedSnapSyncingShallowRepair(t, false) -} -func TestLongNewerForkedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongNewerForkedSnapSyncingShallowRepair(t, true) -} - -func testLongNewerForkedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2 - // - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was not yet committed, but the process crashed. In this test scenario the side -// chain is above the committed block. In this case we expect the chain to detect -// that it was fast syncing and not delete anything. The side chain is completely -// nuked by the freezer. -func TestLongNewerForkedSnapSyncingDeepRepair(t *testing.T) { - testLongNewerForkedSnapSyncingDeepRepair(t, false) -} -func TestLongNewerForkedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongNewerForkedSnapSyncingDeepRepair(t, true) -} - -func testLongNewerForkedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected in leveldb: - // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 - // - // Expected head header : C24 - // Expected head fast block: C24 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 24, - expSidechainBlocks: 0, - expFrozen: 9, - expHeadHeader: 24, - expHeadFastBlock: 24, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a longer side -// chain, where a recent block - newer than the ancient limit - was already committed -// to disk and then the process crashed. In this case we expect the chain to be -// rolled back to the committed block, with everything afterwards kept as fast sync -// data. The side chain completely nuked by the freezer. -func TestLongReorgedShallowRepair(t *testing.T) { testLongReorgedShallowRepair(t, false) } -func TestLongReorgedShallowRepairWithSnapshots(t *testing.T) { testLongReorgedShallowRepair(t, true) } - -func testLongReorgedShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a longer side -// chain, where a recent block - older than the ancient limit - was already committed -// to disk and then the process crashed. In this case we expect the canonical chains -// to be rolled back to the committed block, with everything afterwards deleted. The -// side chain completely nuked by the freezer. -func TestLongReorgedDeepRepair(t *testing.T) { testLongReorgedDeepRepair(t, false) } -func TestLongReorgedDeepRepairWithSnapshots(t *testing.T) { testLongReorgedDeepRepair(t, true) } - -func testLongReorgedDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : none - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a longer -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was already committed to disk and then the process crashed. In this case we -// expect the chain to be rolled back to the committed block, with everything -// afterwards kept as fast sync data. The side chain completely nuked by the -// freezer. -func TestLongReorgedSnapSyncedShallowRepair(t *testing.T) { - testLongReorgedSnapSyncedShallowRepair(t, false) -} -func TestLongReorgedSnapSyncedShallowRepairWithSnapshots(t *testing.T) { - testLongReorgedSnapSyncedShallowRepair(t, true) -} - -func testLongReorgedSnapSyncedShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a longer -// side chain, where the fast sync pivot point - older than the ancient limit - -// was already committed to disk and then the process crashed. In this case we -// expect the canonical chains to be rolled back to the committed block, with -// everything afterwards deleted. The side chain completely nuked by the freezer. -func TestLongReorgedSnapSyncedDeepRepair(t *testing.T) { - testLongReorgedSnapSyncedDeepRepair(t, false) -} -func TestLongReorgedSnapSyncedDeepRepairWithSnapshots(t *testing.T) { - testLongReorgedSnapSyncedDeepRepair(t, true) -} - -func testLongReorgedSnapSyncedDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a longer -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was not yet committed, but the process crashed. In this case we expect the -// chain to detect that it was fast syncing and not delete anything, since we -// can just pick up directly where we left off. -func TestLongReorgedSnapSyncingShallowRepair(t *testing.T) { - testLongReorgedSnapSyncingShallowRepair(t, false) -} -func TestLongReorgedSnapSyncingShallowRepairWithSnapshots(t *testing.T) { - testLongReorgedSnapSyncingShallowRepair(t, true) -} - -func testLongReorgedSnapSyncingShallowRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2 - // - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 - // - // Expected head header : C18 - // Expected head fast block: C18 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 18, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 18, - expHeadFastBlock: 18, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a recovery for a long canonical chain with frozen blocks and a longer -// side chain, where the fast sync pivot point - older than the ancient limit - -// was not yet committed, but the process crashed. In this case we expect the -// chain to detect that it was fast syncing and not delete anything, since we -// can just pick up directly where we left off. -func TestLongReorgedSnapSyncingDeepRepair(t *testing.T) { - testLongReorgedSnapSyncingDeepRepair(t, false) -} -func TestLongReorgedSnapSyncingDeepRepairWithSnapshots(t *testing.T) { - testLongReorgedSnapSyncingDeepRepair(t, true) -} - -func testLongReorgedSnapSyncingDeepRepair(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G - // Pivot : C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected in leveldb: - // C8)->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 - // - // Expected head header : C24 - // Expected head fast block: C24 - // Expected head block : G - testRepair(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - expCanonicalBlocks: 24, - expSidechainBlocks: 0, - expFrozen: 9, - expHeadHeader: 24, - expHeadFastBlock: 24, - expHeadBlock: 0, - }, snapshots) -} - -func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - testRepairWithScheme(t, tt, snapshots, scheme) - } -} - -func testRepairWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) { - // It's hard to follow the test case, visualize the input - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump(true)) - - // Create a temporary persistent database - datadir := t.TempDir() - ancient := path.Join(datadir, "ancient") - - db, err := rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - AncientsDirectory: ancient, - Ephemeral: true, - }) - if err != nil { - t.Fatalf("Failed to create persistent database: %v", err) - } - defer db.Close() // Might double close, should be fine - - // Initialize a fresh chain - var ( - gspec = &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: params.AllEthashProtocolChanges, - } - engine = ethash.NewFullFaker() - config = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, // Disable snapshot by default - StateScheme: scheme, - } - ) - defer engine.Close() - if snapshots { - config.SnapshotLimit = 256 - config.SnapshotWait = true - } - config.TriesInMemory = 128 - chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to create chain: %v", err) - } - // If sidechain blocks are needed, make a light chain and import it - var sideblocks types.Blocks - if tt.sidechainBlocks > 0 { - sideblocks, _ = GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{0x01}) - }) - if _, err := chain.InsertChain(sideblocks); err != nil { - t.Fatalf("Failed to import side chain: %v", err) - } - } - canonblocks, _ := GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{0x02}) - b.SetDifficulty(big.NewInt(1000000)) - }) - if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - if tt.commitBlock > 0 { - if err := chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false); err != nil { - t.Fatalf("Failed to flush trie state: %v", err) - } - if snapshots { - if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { - t.Fatalf("Failed to flatten snapshots: %v", err) - } - } - } - - if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { - t.Fatalf("Failed to import canonical chain tail: %v", err) - } - // Force run a freeze cycle - type freezer interface { - Freeze(threshold uint64) error - Ancients() (uint64, error) - } - db.(freezer).Freeze(tt.freezeThreshold) - - // Set the simulated pivot block - if tt.pivotBlock != nil { - rawdb.WriteLastPivotNumber(db, *tt.pivotBlock) - } - // Pull the plug on the database, simulating a hard crash - chain.triedb.Close() - db.Close() - chain.stopWithoutSaving() - - // Start a new blockchain back up and see where the repair leads us - db, err = rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - AncientsDirectory: ancient, - Ephemeral: true, - }) - if err != nil { - t.Fatalf("Failed to reopen persistent database: %v", err) - } - defer db.Close() - - newChain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer newChain.Stop() - - // Iterate over all the remaining blocks and ensure there are no gaps - verifyNoGaps(t, newChain, true, canonblocks) - verifyNoGaps(t, newChain, false, sideblocks) - verifyCutoff(t, newChain, true, canonblocks, tt.expCanonicalBlocks) - verifyCutoff(t, newChain, false, sideblocks, tt.expSidechainBlocks) - - if head := newChain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { - t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) - } - if head := newChain.CurrentSnapBlock(); head.Number.Uint64() != tt.expHeadFastBlock { - t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, tt.expHeadFastBlock) - } - if head := newChain.CurrentBlock(); head.Number.Uint64() != tt.expHeadBlock { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, tt.expHeadBlock) - } - if frozen, err := db.(freezer).Ancients(); err != nil { - t.Errorf("Failed to retrieve ancient count: %v\n", err) - } else if int(frozen) != tt.expFrozen { - t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) - } -} - -// TestIssue23496 tests scenario described in https://github.com/ethereum/go-ethereum/pull/23496#issuecomment-926393893 -// Credits to @zzyalbert for finding the issue. -// -// Local chain owns these blocks: -// G B1 B2 B3 B4 -// B1: state committed -// B2: snapshot disk layer -// B3: state committed -// B4: head block -// -// Crash happens without fully persisting snapshot and in-memory states, -// chain rewinds itself to the B1 (skip B3 in order to recover snapshot) -// In this case the snapshot layer of B3 is not created because of existent -// state. -func TestIssue23496(t *testing.T) { - testIssue23496(t, rawdb.HashScheme) - testIssue23496(t, rawdb.PathScheme) -} - -func testIssue23496(t *testing.T, scheme string) { - // It's hard to follow the test case, visualize the input - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - - // Create a temporary persistent database - datadir := t.TempDir() - ancient := path.Join(datadir, "ancient") - - db, err := rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - AncientsDirectory: ancient, - }) - if err != nil { - t.Fatalf("Failed to create persistent database: %v", err) - } - defer db.Close() // Might double close, should be fine - - // Initialize a fresh chain - var ( - gspec = &Genesis{ - Config: params.TestChainConfig, - BaseFee: big.NewInt(params.InitialBaseFee), - } - engine = ethash.NewFullFaker() - ) - chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to create chain: %v", err) - } - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 4, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{0x02}) - b.SetDifficulty(big.NewInt(1000000)) - }) - - // Insert block B1 and commit the state into disk - if _, err := chain.InsertChain(blocks[:1]); err != nil { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - chain.triedb.Commit(blocks[0].Root(), false) - - // Insert block B2 and commit the snapshot into disk - if _, err := chain.InsertChain(blocks[1:2]); err != nil { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - if err := chain.snaps.Cap(blocks[1].Root(), 0); err != nil { - t.Fatalf("Failed to flatten snapshots: %v", err) - } - - // Insert block B3 and commit the state into disk - if _, err := chain.InsertChain(blocks[2:3]); err != nil { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - chain.triedb.Commit(blocks[2].Root(), false) - - // Insert the remaining blocks - if _, err := chain.InsertChain(blocks[3:]); err != nil { - t.Fatalf("Failed to import canonical chain tail: %v", err) - } - - // Pull the plug on the database, simulating a hard crash - chain.triedb.Close() - db.Close() - chain.stopWithoutSaving() - - // Start a new blockchain back up and see where the repair leads us - db, err = rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - AncientsDirectory: ancient, - Ephemeral: true, - }) - if err != nil { - t.Fatalf("Failed to reopen persistent database: %v", err) - } - defer db.Close() - - chain, err = NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() - - if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) { - t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4) - } - if head := chain.CurrentSnapBlock(); head.Number.Uint64() != uint64(4) { - t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, uint64(4)) - } - expHead := uint64(1) - if scheme == rawdb.PathScheme { - expHead = uint64(2) - } - if head := chain.CurrentBlock(); head.Number.Uint64() != expHead { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, expHead) - } - - // Reinsert B2-B4 - if _, err := chain.InsertChain(blocks[1:]); err != nil { - t.Fatalf("Failed to import canonical chain tail: %v", err) - } - if head := chain.CurrentHeader(); head.Number.Uint64() != uint64(4) { - t.Errorf("Head header mismatch: have %d, want %d", head.Number, 4) - } - if head := chain.CurrentSnapBlock(); head.Number.Uint64() != uint64(4) { - t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, uint64(4)) - } - if head := chain.CurrentBlock(); head.Number.Uint64() != uint64(4) { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, uint64(4)) - } - if layer := chain.Snapshots().Snapshot(blocks[2].Root()); layer == nil { - t.Error("Failed to regenerate the snapshot of known state") - } -} diff --git a/core/blockchain_sethead_test.go b/core/blockchain_sethead_test.go deleted file mode 100644 index 217610c33a..0000000000 --- a/core/blockchain_sethead_test.go +++ /dev/null @@ -1,2191 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Tests that setting the chain head backwards doesn't leave the database in some -// strange state with gaps in the chain, nor with block data dangling in the future. - -package core - -import ( - "fmt" - "math/big" - "path" - "strings" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum/go-ethereum/triedb/hashdb" - "github.com/ethereum/go-ethereum/triedb/pathdb" -) - -// rewindTest is a test case for chain rollback upon user request. -type rewindTest struct { - canonicalBlocks int // Number of blocks to generate for the canonical chain (heavier) - sidechainBlocks int // Number of blocks to generate for the side chain (lighter) - freezeThreshold uint64 // Block number until which to move things into the freezer - commitBlock uint64 // Block number for which to commit the state to disk - pivotBlock *uint64 // Pivot block number in case of fast sync - - setheadBlock uint64 // Block number to set head back to - expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis) - expSidechainBlocks int // Number of sidechain blocks expected to remain in the database (excl. genesis) - expFrozen int // Number of canonical blocks expected to be in the freezer (incl. genesis) - expHeadHeader uint64 // Block number of the expected head header - expHeadFastBlock uint64 // Block number of the expected head fast sync block - expHeadBlock uint64 // Block number of the expected head full block -} - -//nolint:unused -func (tt *rewindTest) dump(crash bool) string { - buffer := new(strings.Builder) - - fmt.Fprint(buffer, "Chain:\n G") - for i := 0; i < tt.canonicalBlocks; i++ { - fmt.Fprintf(buffer, "->C%d", i+1) - } - fmt.Fprint(buffer, " (HEAD)\n") - if tt.sidechainBlocks > 0 { - fmt.Fprintf(buffer, " └") - for i := 0; i < tt.sidechainBlocks; i++ { - fmt.Fprintf(buffer, "->S%d", i+1) - } - fmt.Fprintf(buffer, "\n") - } - fmt.Fprintf(buffer, "\n") - - if tt.canonicalBlocks > int(tt.freezeThreshold) { - fmt.Fprint(buffer, "Frozen:\n G") - for i := 0; i < tt.canonicalBlocks-int(tt.freezeThreshold); i++ { - fmt.Fprintf(buffer, "->C%d", i+1) - } - fmt.Fprintf(buffer, "\n\n") - } else { - fmt.Fprintf(buffer, "Frozen: none\n") - } - fmt.Fprintf(buffer, "Commit: G") - if tt.commitBlock > 0 { - fmt.Fprintf(buffer, ", C%d", tt.commitBlock) - } - fmt.Fprint(buffer, "\n") - - if tt.pivotBlock == nil { - fmt.Fprintf(buffer, "Pivot : none\n") - } else { - fmt.Fprintf(buffer, "Pivot : C%d\n", *tt.pivotBlock) - } - if crash { - fmt.Fprintf(buffer, "\nCRASH\n\n") - } else { - fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setheadBlock) - } - fmt.Fprintf(buffer, "------------------------------\n\n") - - if tt.expFrozen > 0 { - fmt.Fprint(buffer, "Expected in freezer:\n G") - for i := 0; i < tt.expFrozen-1; i++ { - fmt.Fprintf(buffer, "->C%d", i+1) - } - fmt.Fprintf(buffer, "\n\n") - } - if tt.expFrozen > 0 { - if tt.expFrozen >= tt.expCanonicalBlocks { - fmt.Fprintf(buffer, "Expected in leveldb: none\n") - } else { - fmt.Fprintf(buffer, "Expected in leveldb:\n C%d)", tt.expFrozen-1) - for i := tt.expFrozen - 1; i < tt.expCanonicalBlocks; i++ { - fmt.Fprintf(buffer, "->C%d", i+1) - } - fmt.Fprint(buffer, "\n") - if tt.expSidechainBlocks > tt.expFrozen { - fmt.Fprintf(buffer, " └") - for i := tt.expFrozen - 1; i < tt.expSidechainBlocks; i++ { - fmt.Fprintf(buffer, "->S%d", i+1) - } - fmt.Fprintf(buffer, "\n") - } - } - } else { - fmt.Fprint(buffer, "Expected in leveldb:\n G") - for i := tt.expFrozen; i < tt.expCanonicalBlocks; i++ { - fmt.Fprintf(buffer, "->C%d", i+1) - } - fmt.Fprint(buffer, "\n") - if tt.expSidechainBlocks > tt.expFrozen { - fmt.Fprintf(buffer, " └") - for i := tt.expFrozen; i < tt.expSidechainBlocks; i++ { - fmt.Fprintf(buffer, "->S%d", i+1) - } - fmt.Fprintf(buffer, "\n") - } - } - fmt.Fprintf(buffer, "\n") - fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader) - fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock) - if tt.expHeadBlock == 0 { - fmt.Fprintf(buffer, "Expected head block : G\n") - } else { - fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock) - } - return buffer.String() -} - -// Tests a sethead for a short canonical chain where a recent block was already -// committed to disk and then the sethead called. In this case we expect the full -// chain to be rolled back to the committed block. Everything above the sethead -// point should be deleted. In between the committed block and the requested head -// the data can remain as "fast sync" data to avoid redownloading it. -func TestShortSetHead(t *testing.T) { testShortSetHead(t, false) } -func TestShortSetHeadWithSnapshots(t *testing.T) { testShortSetHead(t, true) } - -func testShortSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Frozen: none - // Commit: G, C4 - // Pivot : none - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 0, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a short canonical chain where the fast sync pivot point was -// already committed, after which sethead was called. In this case we expect the -// chain to behave like in full sync mode, rolling back to the committed block -// Everything above the sethead point should be deleted. In between the committed -// block and the requested head the data can remain as "fast sync" data to avoid -// redownloading it. -func TestShortSnapSyncedSetHead(t *testing.T) { testShortSnapSyncedSetHead(t, false) } -func TestShortSnapSyncedSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncedSetHead(t, true) } - -func testShortSnapSyncedSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Frozen: none - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 0, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a short canonical chain where the fast sync pivot point was -// not yet committed, but sethead was called. In this case we expect the chain to -// detect that it was fast syncing and delete everything from the new head, since -// we can just pick up fast syncing from there. The head full block should be set -// to the genesis. -func TestShortSnapSyncingSetHead(t *testing.T) { testShortSnapSyncingSetHead(t, false) } -func TestShortSnapSyncingSetHeadWithSnapshots(t *testing.T) { testShortSnapSyncingSetHead(t, true) } - -func testShortSnapSyncingSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Frozen: none - // Commit: G - // Pivot : C4 - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 0, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a shorter side chain, where a -// recent block was already committed to disk and then sethead was called. In this -// test scenario the side chain is below the committed block. In this case we expect -// the canonical full chain to be rolled back to the committed block. Everything -// above the sethead point should be deleted. In between the committed block and -// the requested head the data can remain as "fast sync" data to avoid redownloading -// it. The side chain should be left alone as it was shorter. -func TestShortOldForkedSetHead(t *testing.T) { testShortOldForkedSetHead(t, false) } -func TestShortOldForkedSetHeadWithSnapshots(t *testing.T) { testShortOldForkedSetHead(t, true) } - -func testShortOldForkedSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3 - // - // Frozen: none - // Commit: G, C4 - // Pivot : none - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 3, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a shorter side chain, where -// the fast sync pivot point was already committed to disk and then sethead was -// called. In this test scenario the side chain is below the committed block. In -// this case we expect the canonical full chain to be rolled back to the committed -// block. Everything above the sethead point should be deleted. In between the -// committed block and the requested head the data can remain as "fast sync" data -// to avoid redownloading it. The side chain should be left alone as it was shorter. -func TestShortOldForkedSnapSyncedSetHead(t *testing.T) { - testShortOldForkedSnapSyncedSetHead(t, false) -} -func TestShortOldForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) { - testShortOldForkedSnapSyncedSetHead(t, true) -} - -func testShortOldForkedSnapSyncedSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3 - // - // Frozen: none - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 3, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a shorter side chain, where -// the fast sync pivot point was not yet committed, but sethead was called. In this -// test scenario the side chain is below the committed block. In this case we expect -// the chain to detect that it was fast syncing and delete everything from the new -// head, since we can just pick up fast syncing from there. The head full block -// should be set to the genesis. -func TestShortOldForkedSnapSyncingSetHead(t *testing.T) { - testShortOldForkedSnapSyncingSetHead(t, false) -} -func TestShortOldForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) { - testShortOldForkedSnapSyncingSetHead(t, true) -} - -func testShortOldForkedSnapSyncingSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3 - // - // Frozen: none - // Commit: G - // Pivot : C4 - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 3, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a shorter side chain, where a -// recent block was already committed to disk and then sethead was called. In this -// test scenario the side chain reaches above the committed block. In this case we -// expect the canonical full chain to be rolled back to the committed block. All -// data above the sethead point should be deleted. In between the committed block -// and the requested head the data can remain as "fast sync" data to avoid having -// to redownload it. The side chain should be truncated to the head set. -// -// The side chain could be left to be if the fork point was before the new head -// we are deleting to, but it would be exceedingly hard to detect that case and -// properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortNewlyForkedSetHead(t *testing.T) { testShortNewlyForkedSetHead(t, false) } -func TestShortNewlyForkedSetHeadWithSnapshots(t *testing.T) { testShortNewlyForkedSetHead(t, true) } - -func testShortNewlyForkedSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8 - // - // Frozen: none - // Commit: G, C4 - // Pivot : none - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3->S4->S5->S6->S7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 10, - sidechainBlocks: 8, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 7, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a shorter side chain, where -// the fast sync pivot point was already committed to disk and then sethead was -// called. In this case we expect the canonical full chain to be rolled back to -// between the committed block and the requested head the data can remain as -// "fast sync" data to avoid having to redownload it. The side chain should be -// truncated to the head set. -// -// The side chain could be left to be if the fork point was before the new head -// we are deleting to, but it would be exceedingly hard to detect that case and -// properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortNewlyForkedSnapSyncedSetHead(t *testing.T) { - testShortNewlyForkedSnapSyncedSetHead(t, false) -} -func TestShortNewlyForkedSnapSyncedSetHeadWithSnapshots(t *testing.T) { - testShortNewlyForkedSnapSyncedSetHead(t, true) -} - -func testShortNewlyForkedSnapSyncedSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8 - // - // Frozen: none - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3->S4->S5->S6->S7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 10, - sidechainBlocks: 8, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 7, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a shorter side chain, where -// the fast sync pivot point was not yet committed, but sethead was called. In -// this test scenario the side chain reaches above the committed block. In this -// case we expect the chain to detect that it was fast syncing and delete -// everything from the new head, since we can just pick up fast syncing from -// there. -// -// The side chain could be left to be if the fork point was before the new head -// we are deleting to, but it would be exceedingly hard to detect that case and -// properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortNewlyForkedSnapSyncingSetHead(t *testing.T) { - testShortNewlyForkedSnapSyncingSetHead(t, false) -} -func TestShortNewlyForkedSnapSyncingSetHeadWithSnapshots(t *testing.T) { - testShortNewlyForkedSnapSyncingSetHead(t, true) -} - -func testShortNewlyForkedSnapSyncingSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8 - // - // Frozen: none - // Commit: G - // Pivot : C4 - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3->S4->S5->S6->S7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 10, - sidechainBlocks: 8, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 7, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a longer side chain, where a -// recent block was already committed to disk and then sethead was called. In this -// case we expect the canonical full chain to be rolled back to the committed block. -// All data above the sethead point should be deleted. In between the committed -// block and the requested head the data can remain as "fast sync" data to avoid -// having to redownload it. The side chain should be truncated to the head set. -// -// The side chain could be left to be if the fork point was before the new head -// we are deleting to, but it would be exceedingly hard to detect that case and -// properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortReorgedSetHead(t *testing.T) { testShortReorgedSetHead(t, false) } -func TestShortReorgedSetHeadWithSnapshots(t *testing.T) { testShortReorgedSetHead(t, true) } - -func testShortReorgedSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Frozen: none - // Commit: G, C4 - // Pivot : none - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3->S4->S5->S6->S7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 10, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 7, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a longer side chain, where -// the fast sync pivot point was already committed to disk and then sethead was -// called. In this case we expect the canonical full chain to be rolled back to -// the committed block. All data above the sethead point should be deleted. In -// between the committed block and the requested head the data can remain as -// "fast sync" data to avoid having to redownload it. The side chain should be -// truncated to the head set. -// -// The side chain could be left to be if the fork point was before the new head -// we are deleting to, but it would be exceedingly hard to detect that case and -// properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortReorgedSnapSyncedSetHead(t *testing.T) { - testShortReorgedSnapSyncedSetHead(t, false) -} -func TestShortReorgedSnapSyncedSetHeadWithSnapshots(t *testing.T) { - testShortReorgedSnapSyncedSetHead(t, true) -} - -func testShortReorgedSnapSyncedSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Frozen: none - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3->S4->S5->S6->S7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 10, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 7, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a short canonical chain and a longer side chain, where -// the fast sync pivot point was not yet committed, but sethead was called. In -// this case we expect the chain to detect that it was fast syncing and delete -// everything from the new head, since we can just pick up fast syncing from -// there. -// -// The side chain could be left to be if the fork point was before the new head -// we are deleting to, but it would be exceedingly hard to detect that case and -// properly handle it, so we'll trade extra work in exchange for simpler code. -func TestShortReorgedSnapSyncingSetHead(t *testing.T) { - testShortReorgedSnapSyncingSetHead(t, false) -} -func TestShortReorgedSnapSyncingSetHeadWithSnapshots(t *testing.T) { - testShortReorgedSnapSyncingSetHead(t, true) -} - -func testShortReorgedSnapSyncingSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10 - // - // Frozen: none - // Commit: G - // Pivot : C4 - // - // SetHead(7) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7 - // └->S1->S2->S3->S4->S5->S6->S7 - // - // Expected head header : C7 - // Expected head fast block: C7 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 8, - sidechainBlocks: 10, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 7, - expCanonicalBlocks: 7, - expSidechainBlocks: 7, - expFrozen: 0, - expHeadHeader: 7, - expHeadFastBlock: 7, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks where a recent -// block - newer than the ancient limit - was already committed to disk and then -// sethead was called. In this case we expect the full chain to be rolled back -// to the committed block. Everything above the sethead point should be deleted. -// In between the committed block and the requested head the data can remain as -// "fast sync" data to avoid redownloading it. -func TestLongShallowSetHead(t *testing.T) { testLongShallowSetHead(t, false) } -func TestLongShallowSetHeadWithSnapshots(t *testing.T) { testLongShallowSetHead(t, true) } - -func testLongShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : none - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks where a recent -// block - older than the ancient limit - was already committed to disk and then -// sethead was called. In this case we expect the full chain to be rolled back -// to the committed block. Since the ancient limit was underflown, everything -// needs to be deleted onwards to avoid creating a gap. -func TestLongDeepSetHead(t *testing.T) { testLongDeepSetHead(t, false) } -func TestLongDeepSetHeadWithSnapshots(t *testing.T) { testLongDeepSetHead(t, true) } - -func testLongDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : none - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 6, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks where the fast -// sync pivot point - newer than the ancient limit - was already committed, after -// which sethead was called. In this case we expect the full chain to be rolled -// back to the committed block. Everything above the sethead point should be -// deleted. In between the committed block and the requested head the data can -// remain as "fast sync" data to avoid redownloading it. -func TestLongSnapSyncedShallowSetHead(t *testing.T) { - testLongSnapSyncedShallowSetHead(t, false) -} -func TestLongSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongSnapSyncedShallowSetHead(t, true) -} - -func testLongSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks where the fast -// sync pivot point - older than the ancient limit - was already committed, after -// which sethead was called. In this case we expect the full chain to be rolled -// back to the committed block. Since the ancient limit was underflown, everything -// needs to be deleted onwards to avoid creating a gap. -func TestLongSnapSyncedDeepSetHead(t *testing.T) { testLongSnapSyncedDeepSetHead(t, false) } -func TestLongSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { testLongSnapSyncedDeepSetHead(t, true) } - -func testLongSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks where the fast -// sync pivot point - newer than the ancient limit - was not yet committed, but -// sethead was called. In this case we expect the chain to detect that it was fast -// syncing and delete everything from the new head, since we can just pick up fast -// syncing from there. -func TestLongSnapSyncingShallowSetHead(t *testing.T) { - testLongSnapSyncingShallowSetHead(t, false) -} -func TestLongSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongSnapSyncingShallowSetHead(t, true) -} - -func testLongSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // - // Frozen: - // G->C1->C2 - // - // Commit: G - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks where the fast -// sync pivot point - older than the ancient limit - was not yet committed, but -// sethead was called. In this case we expect the chain to detect that it was fast -// syncing and delete everything from the new head, since we can just pick up fast -// syncing from there. -func TestLongSnapSyncingDeepSetHead(t *testing.T) { - testLongSnapSyncingDeepSetHead(t, false) -} -func TestLongSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongSnapSyncingDeepSetHead(t, true) -} - -func testLongSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6 - // - // Expected in leveldb: none - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 0, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 7, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter side -// chain, where a recent block - newer than the ancient limit - was already committed -// to disk and then sethead was called. In this case we expect the canonical full -// chain to be rolled back to the committed block. Everything above the sethead point -// should be deleted. In between the committed block and the requested head the data -// can remain as "fast sync" data to avoid redownloading it. The side chain is nuked -// by the freezer. -func TestLongOldForkedShallowSetHead(t *testing.T) { - testLongOldForkedShallowSetHead(t, false) -} -func TestLongOldForkedShallowSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedShallowSetHead(t, true) -} - -func testLongOldForkedShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : none - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter side -// chain, where a recent block - older than the ancient limit - was already committed -// to disk and then sethead was called. In this case we expect the canonical full -// chain to be rolled back to the committed block. Since the ancient limit was -// underflown, everything needs to be deleted onwards to avoid creating a gap. The -// side chain is nuked by the freezer. -func TestLongOldForkedDeepSetHead(t *testing.T) { testLongOldForkedDeepSetHead(t, false) } -func TestLongOldForkedDeepSetHeadWithSnapshots(t *testing.T) { testLongOldForkedDeepSetHead(t, true) } - -func testLongOldForkedDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : none - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 6, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was already committed to disk and then sethead was called. In this test scenario -// the side chain is below the committed block. In this case we expect the canonical -// full chain to be rolled back to the committed block. Everything above the -// sethead point should be deleted. In between the committed block and the -// requested head the data can remain as "fast sync" data to avoid redownloading -// it. The side chain is nuked by the freezer. -func TestLongOldForkedSnapSyncedShallowSetHead(t *testing.T) { - testLongOldForkedSnapSyncedShallowSetHead(t, false) -} -func TestLongOldForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedSnapSyncedShallowSetHead(t, true) -} - -func testLongOldForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was already committed to disk and then sethead was called. In this test scenario -// the side chain is below the committed block. In this case we expect the canonical -// full chain to be rolled back to the committed block. Since the ancient limit was -// underflown, everything needs to be deleted onwards to avoid creating a gap. The -// side chain is nuked by the freezer. -func TestLongOldForkedSnapSyncedDeepSetHead(t *testing.T) { - testLongOldForkedSnapSyncedDeepSetHead(t, false) -} -func TestLongOldForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedSnapSyncedDeepSetHead(t, true) -} - -func testLongOldForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6 - // - // Expected in leveldb: none - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was not yet committed, but sethead was called. In this test scenario the side -// chain is below the committed block. In this case we expect the chain to detect -// that it was fast syncing and delete everything from the new head, since we can -// just pick up fast syncing from there. The side chain is completely nuked by the -// freezer. -func TestLongOldForkedSnapSyncingShallowSetHead(t *testing.T) { - testLongOldForkedSnapSyncingShallowSetHead(t, false) -} -func TestLongOldForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedSnapSyncingShallowSetHead(t, true) -} - -func testLongOldForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2 - // - // Commit: G - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was not yet committed, but sethead was called. In this test scenario the side -// chain is below the committed block. In this case we expect the chain to detect -// that it was fast syncing and delete everything from the new head, since we can -// just pick up fast syncing from there. The side chain is completely nuked by the -// freezer. -func TestLongOldForkedSnapSyncingDeepSetHead(t *testing.T) { - testLongOldForkedSnapSyncingDeepSetHead(t, false) -} -func TestLongOldForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongOldForkedSnapSyncingDeepSetHead(t, true) -} - -func testLongOldForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6 - // - // Expected in leveldb: none - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 3, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 7, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where a recent block - newer than the ancient limit - was already -// committed to disk and then sethead was called. In this test scenario the side -// chain is above the committed block. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongShallowSetHead. -func TestLongNewerForkedShallowSetHead(t *testing.T) { - testLongNewerForkedShallowSetHead(t, false) -} -func TestLongNewerForkedShallowSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedShallowSetHead(t, true) -} - -func testLongNewerForkedShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : none - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where a recent block - older than the ancient limit - was already -// committed to disk and then sethead was called. In this test scenario the side -// chain is above the committed block. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongDeepSetHead. -func TestLongNewerForkedDeepSetHead(t *testing.T) { - testLongNewerForkedDeepSetHead(t, false) -} -func TestLongNewerForkedDeepSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedDeepSetHead(t, true) -} - -func testLongNewerForkedDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : none - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 6, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was already committed to disk and then sethead was called. In this test scenario -// the side chain is above the committed block. In this case the freezer will delete -// the sidechain since it's dangling, reverting to TestLongSnapSyncedShallowSetHead. -func TestLongNewerForkedSnapSyncedShallowSetHead(t *testing.T) { - testLongNewerForkedSnapSyncedShallowSetHead(t, false) -} -func TestLongNewerForkedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedSnapSyncedShallowSetHead(t, true) -} - -func testLongNewerForkedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was already committed to disk and then sethead was called. In this test scenario -// the side chain is above the committed block. In this case the freezer will delete -// the sidechain since it's dangling, reverting to TestLongSnapSyncedDeepSetHead. -func TestLongNewerForkedSnapSyncedDeepSetHead(t *testing.T) { - testLongNewerForkedSnapSyncedDeepSetHead(t, false) -} -func TestLongNewerForkedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedSnapSyncedDeepSetHead(t, true) -} - -func testLongNewerForkedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was not yet committed, but sethead was called. In this test scenario the side -// chain is above the committed block. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongSnapSyncinghallowSetHead. -func TestLongNewerForkedSnapSyncingShallowSetHead(t *testing.T) { - testLongNewerForkedSnapSyncingShallowSetHead(t, false) -} -func TestLongNewerForkedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedSnapSyncingShallowSetHead(t, true) -} - -func testLongNewerForkedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2 - // - // Commit: G - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a shorter -// side chain, where the fast sync pivot point - older than the ancient limit - -// was not yet committed, but sethead was called. In this test scenario the side -// chain is above the committed block. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongSnapSyncingDeepSetHead. -func TestLongNewerForkedSnapSyncingDeepSetHead(t *testing.T) { - testLongNewerForkedSnapSyncingDeepSetHead(t, false) -} -func TestLongNewerForkedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongNewerForkedSnapSyncingDeepSetHead(t, true) -} - -func testLongNewerForkedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6 - // - // Expected in leveldb: none - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 12, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 7, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a longer side -// chain, where a recent block - newer than the ancient limit - was already committed -// to disk and then sethead was called. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongShallowSetHead. -func TestLongReorgedShallowSetHead(t *testing.T) { testLongReorgedShallowSetHead(t, false) } -func TestLongReorgedShallowSetHeadWithSnapshots(t *testing.T) { testLongReorgedShallowSetHead(t, true) } - -func testLongReorgedShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : none - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a longer side -// chain, where a recent block - older than the ancient limit - was already committed -// to disk and then sethead was called. In this case the freezer will delete the -// sidechain since it's dangling, reverting to TestLongDeepSetHead. -func TestLongReorgedDeepSetHead(t *testing.T) { testLongReorgedDeepSetHead(t, false) } -func TestLongReorgedDeepSetHeadWithSnapshots(t *testing.T) { testLongReorgedDeepSetHead(t, true) } - -func testLongReorgedDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : none - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: nil, - setheadBlock: 6, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a longer -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was already committed to disk and then sethead was called. In this case the -// freezer will delete the sidechain since it's dangling, reverting to -// TestLongSnapSyncedShallowSetHead. -func TestLongReorgedSnapSyncedShallowSetHead(t *testing.T) { - testLongReorgedSnapSyncedShallowSetHead(t, false) -} -func TestLongReorgedSnapSyncedShallowSetHeadWithSnapshots(t *testing.T) { - testLongReorgedSnapSyncedShallowSetHead(t, true) -} - -func testLongReorgedSnapSyncedShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2 - // - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a longer -// side chain, where the fast sync pivot point - older than the ancient limit - -// was already committed to disk and then sethead was called. In this case the -// freezer will delete the sidechain since it's dangling, reverting to -// TestLongSnapSyncedDeepSetHead. -func TestLongReorgedSnapSyncedDeepSetHead(t *testing.T) { - testLongReorgedSnapSyncedDeepSetHead(t, false) -} -func TestLongReorgedSnapSyncedDeepSetHeadWithSnapshots(t *testing.T) { - testLongReorgedSnapSyncedDeepSetHead(t, true) -} - -func testLongReorgedSnapSyncedDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G, C4 - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4 - // - // Expected in leveldb: none - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 4, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 4, - expSidechainBlocks: 0, - expFrozen: 5, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a longer -// side chain, where the fast sync pivot point - newer than the ancient limit - -// was not yet committed, but sethead was called. In this case we expect the -// chain to detect that it was fast syncing and delete everything from the new -// head, since we can just pick up fast syncing from there. The side chain is -// completely nuked by the freezer. -func TestLongReorgedSnapSyncingShallowSetHead(t *testing.T) { - testLongReorgedSnapSyncingShallowSetHead(t, false) -} -func TestLongReorgedSnapSyncingShallowSetHeadWithSnapshots(t *testing.T) { - testLongReorgedSnapSyncingShallowSetHead(t, true) -} - -func testLongReorgedSnapSyncingShallowSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2 - // - // Commit: G - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2 - // - // Expected in leveldb: - // C2)->C3->C4->C5->C6 - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 18, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 3, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 0, - }, snapshots) -} - -// Tests a sethead for a long canonical chain with frozen blocks and a longer -// side chain, where the fast sync pivot point - older than the ancient limit - -// was not yet committed, but sethead was called. In this case we expect the -// chain to detect that it was fast syncing and delete everything from the new -// head, since we can just pick up fast syncing from there. The side chain is -// completely nuked by the freezer. -func TestLongReorgedSnapSyncingDeepSetHead(t *testing.T) { - testLongReorgedSnapSyncingDeepSetHead(t, false) -} -func TestLongReorgedSnapSyncingDeepSetHeadWithSnapshots(t *testing.T) { - testLongReorgedSnapSyncingDeepSetHead(t, true) -} - -func testLongReorgedSnapSyncingDeepSetHead(t *testing.T, snapshots bool) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10->C11->C12->C13->C14->C15->C16->C17->C18->C19->C20->C21->C22->C23->C24 (HEAD) - // └->S1->S2->S3->S4->S5->S6->S7->S8->S9->S10->S11->S12->S13->S14->S15->S16->S17->S18->S19->S20->S21->S22->S23->S24->S25->S26 - // - // Frozen: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Commit: G - // Pivot : C4 - // - // SetHead(6) - // - // ------------------------------ - // - // Expected in freezer: - // G->C1->C2->C3->C4->C5->C6 - // - // Expected in leveldb: none - // - // Expected head header : C6 - // Expected head fast block: C6 - // Expected head block : G - testSetHead(t, &rewindTest{ - canonicalBlocks: 24, - sidechainBlocks: 26, - freezeThreshold: 16, - commitBlock: 0, - pivotBlock: uint64ptr(4), - setheadBlock: 6, - expCanonicalBlocks: 6, - expSidechainBlocks: 0, - expFrozen: 7, - expHeadHeader: 6, - expHeadFastBlock: 6, - expHeadBlock: 0, - }, snapshots) -} - -func testSetHead(t *testing.T, tt *rewindTest, snapshots bool) { - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - testSetHeadWithScheme(t, tt, snapshots, scheme) - } -} - -func testSetHeadWithScheme(t *testing.T, tt *rewindTest, snapshots bool, scheme string) { - // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump(false)) - - // Create a temporary persistent database - datadir := t.TempDir() - ancient := path.Join(datadir, "ancient") - - db, err := rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - AncientsDirectory: ancient, - Ephemeral: true, - }) - if err != nil { - t.Fatalf("Failed to create persistent database: %v", err) - } - defer db.Close() - - // Initialize a fresh chain - var ( - gspec = &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: params.AllEthashProtocolChanges, - } - engine = ethash.NewFullFaker() - config = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, // Disable snapshot - StateScheme: scheme, - } - ) - if snapshots { - config.SnapshotLimit = 256 - config.SnapshotWait = true - } - config.TriesInMemory = 128 - chain, err := NewBlockChain(db, config, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to create chain: %v", err) - } - defer chain.Stop() - - // If sidechain blocks are needed, make a light chain and import it - var sideblocks types.Blocks - if tt.sidechainBlocks > 0 { - sideblocks, _ = GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{0x01}) - }) - if _, err := chain.InsertChain(sideblocks); err != nil { - t.Fatalf("Failed to import side chain: %v", err) - } - } - canonblocks, _ := GenerateChain(gspec.Config, gspec.ToBlock(), engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{0x02}) - b.SetDifficulty(big.NewInt(1000000)) - }) - if _, err := chain.InsertChain(canonblocks[:tt.commitBlock]); err != nil { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - if tt.commitBlock > 0 { - chain.triedb.Commit(canonblocks[tt.commitBlock-1].Root(), false) - if snapshots { - if err := chain.snaps.Cap(canonblocks[tt.commitBlock-1].Root(), 0); err != nil { - t.Fatalf("Failed to flatten snapshots: %v", err) - } - } - } - if _, err := chain.InsertChain(canonblocks[tt.commitBlock:]); err != nil { - t.Fatalf("Failed to import canonical chain tail: %v", err) - } - // Reopen the trie database without persisting in-memory dirty nodes. - chain.triedb.Close() - dbconfig := &triedb.Config{} - if scheme == rawdb.PathScheme { - dbconfig.PathDB = pathdb.Defaults - } else { - dbconfig.HashDB = hashdb.Defaults - } - chain.triedb = triedb.NewDatabase(chain.db, dbconfig) - chain.stateCache = state.NewDatabaseWithNodeDB(chain.db, chain.triedb) - - // Force run a freeze cycle - type freezer interface { - Freeze(threshold uint64) error - Ancients() (uint64, error) - } - db.(freezer).Freeze(tt.freezeThreshold) - - // Set the simulated pivot block - if tt.pivotBlock != nil { - rawdb.WriteLastPivotNumber(db, *tt.pivotBlock) - } - // Set the head of the chain back to the requested number - chain.SetHead(tt.setheadBlock) - - // Iterate over all the remaining blocks and ensure there are no gaps - verifyNoGaps(t, chain, true, canonblocks) - verifyNoGaps(t, chain, false, sideblocks) - verifyCutoff(t, chain, true, canonblocks, tt.expCanonicalBlocks) - verifyCutoff(t, chain, false, sideblocks, tt.expSidechainBlocks) - - if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { - t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) - } - if head := chain.CurrentSnapBlock(); head.Number.Uint64() != tt.expHeadFastBlock { - t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, tt.expHeadFastBlock) - } - if head := chain.CurrentBlock(); head.Number.Uint64() != tt.expHeadBlock { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, tt.expHeadBlock) - } - if frozen, err := db.(freezer).Ancients(); err != nil { - t.Errorf("Failed to retrieve ancient count: %v\n", err) - } else if int(frozen) != tt.expFrozen { - t.Errorf("Frozen block count mismatch: have %d, want %d", frozen, tt.expFrozen) - } -} - -// verifyNoGaps checks that there are no gaps after the initial set of blocks in -// the database and errors if found. -func verifyNoGaps(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks) { - t.Helper() - - var end uint64 - for i := uint64(0); i <= uint64(len(inserted)); i++ { - header := chain.GetHeaderByNumber(i) - if header == nil && end == 0 { - end = i - } - if header != nil && end > 0 { - if canonical { - t.Errorf("Canonical header gap between #%d-#%d", end, i-1) - } else { - t.Errorf("Sidechain header gap between #%d-#%d", end, i-1) - } - end = 0 // Reset for further gap detection - } - } - end = 0 - for i := uint64(0); i <= uint64(len(inserted)); i++ { - block := chain.GetBlockByNumber(i) - if block == nil && end == 0 { - end = i - } - if block != nil && end > 0 { - if canonical { - t.Errorf("Canonical block gap between #%d-#%d", end, i-1) - } else { - t.Errorf("Sidechain block gap between #%d-#%d", end, i-1) - } - end = 0 // Reset for further gap detection - } - } - end = 0 - for i := uint64(1); i <= uint64(len(inserted)); i++ { - receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()) - if receipts == nil && end == 0 { - end = i - } - if receipts != nil && end > 0 { - if canonical { - t.Errorf("Canonical receipt gap between #%d-#%d", end, i-1) - } else { - t.Errorf("Sidechain receipt gap between #%d-#%d", end, i-1) - } - end = 0 // Reset for further gap detection - } - } -} - -// verifyCutoff checks that there are no chain data available in the chain after -// the specified limit, but that it is available before. -func verifyCutoff(t *testing.T, chain *BlockChain, canonical bool, inserted types.Blocks, head int) { - t.Helper() - - for i := 1; i <= len(inserted); i++ { - if i <= head { - if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header == nil { - if canonical { - t.Errorf("Canonical header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } else { - t.Errorf("Sidechain header #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } - } - if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block == nil { - if canonical { - t.Errorf("Canonical block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } else { - t.Errorf("Sidechain block #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } - } - if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts == nil { - if canonical { - t.Errorf("Canonical receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } else { - t.Errorf("Sidechain receipts #%2d [%x...] missing before cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } - } - } else { - if header := chain.GetHeader(inserted[i-1].Hash(), uint64(i)); header != nil { - if canonical { - t.Errorf("Canonical header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } else { - t.Errorf("Sidechain header #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } - } - if block := chain.GetBlock(inserted[i-1].Hash(), uint64(i)); block != nil { - if canonical { - t.Errorf("Canonical block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } else { - t.Errorf("Sidechain block #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } - } - if receipts := chain.GetReceiptsByHash(inserted[i-1].Hash()); receipts != nil { - if canonical { - t.Errorf("Canonical receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } else { - t.Errorf("Sidechain receipts #%2d [%x...] present after cap %d", inserted[i-1].Number(), inserted[i-1].Hash().Bytes()[:3], head) - } - } - } - } -} - -// uint64ptr is a weird helper to allow 1-line constant pointer creation. -func uint64ptr(n uint64) *uint64 { - return &n -} diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go deleted file mode 100644 index b2dbe5cb2a..0000000000 --- a/core/blockchain_snapshot_test.go +++ /dev/null @@ -1,720 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Tests that abnormal program termination (i.e.crash) and restart can recovery -// the snapshot properly if the snapshot is enabled. - -package core - -import ( - "bytes" - "fmt" - "math/big" - "os" - "path" - "strings" - "testing" - "time" - - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/params" -) - -// snapshotTestBasic wraps the common testing fields in the snapshot tests. -type snapshotTestBasic struct { - scheme string // Disk scheme used for storing trie nodes - chainBlocks int // Number of blocks to generate for the canonical chain - snapshotBlock uint64 // Block number of the relevant snapshot disk layer - commitBlock uint64 // Block number for which to commit the state to disk - - expCanonicalBlocks int // Number of canonical blocks expected to remain in the database (excl. genesis) - expHeadHeader uint64 // Block number of the expected head header - expHeadFastBlock uint64 // Block number of the expected head fast sync block - expHeadBlock uint64 // Block number of the expected head full block - expSnapshotBottom uint64 // The block height corresponding to the snapshot disk layer - - // share fields, set in runtime - datadir string - ancient string - db ethdb.Database - genDb ethdb.Database - engine consensus.Engine - gspec *Genesis -} - -func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) { - // Create a temporary persistent database - datadir := t.TempDir() - ancient := path.Join(datadir, "ancient") - - db, err := rawdb.Open(rawdb.OpenOptions{ - Directory: datadir, - AncientsDirectory: ancient, - Ephemeral: true, - }) - if err != nil { - t.Fatalf("Failed to create persistent database: %v", err) - } - // Initialize a fresh chain - var ( - gspec = &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: params.AllEthashProtocolChanges, - } - engine = ethash.NewFullFaker() - ) - chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(basic.scheme), gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to create chain: %v", err) - } - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, basic.chainBlocks, func(i int, b *BlockGen) {}) - - // Insert the blocks with configured settings. - var breakpoints []uint64 - if basic.commitBlock > basic.snapshotBlock { - breakpoints = append(breakpoints, basic.snapshotBlock, basic.commitBlock) - } else { - breakpoints = append(breakpoints, basic.commitBlock, basic.snapshotBlock) - } - var startPoint uint64 - for _, point := range breakpoints { - if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - startPoint = point - - if basic.commitBlock > 0 && basic.commitBlock == point { - chain.TrieDB().Commit(blocks[point-1].Root(), false) - } - if basic.snapshotBlock > 0 && basic.snapshotBlock == point { - // Flushing the entire snap tree into the disk, the - // relevant (a) snapshot root and (b) snapshot generator - // will be persisted atomically. - chain.snaps.Cap(blocks[point-1].Root(), 0) - diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root() - if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) { - t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot) - } - } - } - if _, err := chain.InsertChain(blocks[startPoint:]); err != nil { - t.Fatalf("Failed to import canonical chain tail: %v", err) - } - - // Set runtime fields - basic.datadir = datadir - basic.ancient = ancient - basic.db = db - basic.genDb = genDb - basic.engine = engine - basic.gspec = gspec - return chain, blocks -} - -func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks []*types.Block) { - // Iterate over all the remaining blocks and ensure there are no gaps - verifyNoGaps(t, chain, true, blocks) - verifyCutoff(t, chain, true, blocks, basic.expCanonicalBlocks) - - if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader { - t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader) - } - if head := chain.CurrentSnapBlock(); head.Number.Uint64() != basic.expHeadFastBlock { - t.Errorf("Head fast block mismatch: have %d, want %d", head.Number, basic.expHeadFastBlock) - } - if head := chain.CurrentBlock(); head.Number.Uint64() != basic.expHeadBlock { - t.Errorf("Head block mismatch: have %d, want %d", head.Number, basic.expHeadBlock) - } - - // Check the disk layer, ensure they are matched - block := chain.GetBlockByNumber(basic.expSnapshotBottom) - if block == nil { - t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) - } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { - t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) - } - - // Check the snapshot, ensure it's integrated - if err := chain.snaps.Verify(block.Root()); err != nil { - t.Errorf("The disk layer is not integrated %v", err) - } -} - -//nolint:unused -func (basic *snapshotTestBasic) dump() string { - buffer := new(strings.Builder) - - fmt.Fprint(buffer, "Chain:\n G") - for i := 0; i < basic.chainBlocks; i++ { - fmt.Fprintf(buffer, "->C%d", i+1) - } - fmt.Fprint(buffer, " (HEAD)\n\n") - - fmt.Fprintf(buffer, "Commit: G") - if basic.commitBlock > 0 { - fmt.Fprintf(buffer, ", C%d", basic.commitBlock) - } - fmt.Fprint(buffer, "\n") - - fmt.Fprintf(buffer, "Snapshot: G") - if basic.snapshotBlock > 0 { - fmt.Fprintf(buffer, ", C%d", basic.snapshotBlock) - } - fmt.Fprint(buffer, "\n") - - //if crash { - // fmt.Fprintf(buffer, "\nCRASH\n\n") - //} else { - // fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", basic.setHead) - //} - fmt.Fprintf(buffer, "------------------------------\n\n") - - fmt.Fprint(buffer, "Expected in leveldb:\n G") - for i := 0; i < basic.expCanonicalBlocks; i++ { - fmt.Fprintf(buffer, "->C%d", i+1) - } - fmt.Fprintf(buffer, "\n\n") - fmt.Fprintf(buffer, "Expected head header : C%d\n", basic.expHeadHeader) - fmt.Fprintf(buffer, "Expected head fast block: C%d\n", basic.expHeadFastBlock) - if basic.expHeadBlock == 0 { - fmt.Fprintf(buffer, "Expected head block : G\n") - } else { - fmt.Fprintf(buffer, "Expected head block : C%d\n", basic.expHeadBlock) - } - if basic.expSnapshotBottom == 0 { - fmt.Fprintf(buffer, "Expected snapshot disk : G\n") - } else { - fmt.Fprintf(buffer, "Expected snapshot disk : C%d\n", basic.expSnapshotBottom) - } - return buffer.String() -} - -func (basic *snapshotTestBasic) teardown() { - basic.db.Close() - basic.genDb.Close() - os.RemoveAll(basic.datadir) - os.RemoveAll(basic.ancient) -} - -// snapshotTest is a test case type for normal snapshot recovery. -// It can be used for testing that restart Geth normally. -type snapshotTest struct { - snapshotTestBasic -} - -func (snaptest *snapshotTest) test(t *testing.T) { - // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump()) - chain, blocks := snaptest.prepare(t) - - // Restart the chain normally - chain.Stop() - newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer newchain.Stop() - - snaptest.verify(t, newchain, blocks) -} - -// crashSnapshotTest is a test case type for irregular snapshot recovery. -// It can be used for testing that restart Geth after the crash. -type crashSnapshotTest struct { - snapshotTestBasic -} - -func (snaptest *crashSnapshotTest) test(t *testing.T) { - // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump()) - chain, blocks := snaptest.prepare(t) - - // Pull the plug on the database, simulating a hard crash - db := chain.db - db.Close() - chain.stopWithoutSaving() - chain.triedb.Close() - - // Start a new blockchain back up and see where the repair leads us - newdb, err := rawdb.Open(rawdb.OpenOptions{ - Directory: snaptest.datadir, - AncientsDirectory: snaptest.ancient, - Ephemeral: true, - }) - if err != nil { - t.Fatalf("Failed to reopen persistent database: %v", err) - } - defer newdb.Close() - - // The interesting thing is: instead of starting the blockchain after - // the crash, we do restart twice here: one after the crash and one - // after the normal stop. It's used to ensure the broken snapshot - // can be detected all the time. - newchain, err := NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - newchain.Stop() - - newchain, err = NewBlockChain(newdb, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer newchain.Stop() - - snaptest.verify(t, newchain, blocks) -} - -// gappedSnapshotTest is a test type used to test this scenario: -// - have a complete snapshot -// - restart without enabling the snapshot -// - insert a few blocks -// - restart with enabling the snapshot again -type gappedSnapshotTest struct { - snapshotTestBasic - gapped int // Number of blocks to insert without enabling snapshot -} - -func (snaptest *gappedSnapshotTest) test(t *testing.T) { - // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump()) - chain, blocks := snaptest.prepare(t) - - // Insert blocks without enabling snapshot if gapping is required. - chain.Stop() - gappedBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, func(i int, b *BlockGen) {}) - - // Insert a few more blocks without enabling snapshot - var cacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - TriesInMemory: 128, - SnapshotLimit: 0, - StateScheme: snaptest.scheme, - } - newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - newchain.InsertChain(gappedBlocks) - newchain.Stop() - - // Restart the chain with enabling the snapshot - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer newchain.Stop() - - snaptest.verify(t, newchain, blocks) -} - -// setHeadSnapshotTest is the test type used to test this scenario: -// - have a complete snapshot -// - set the head to a lower point -// - restart -type setHeadSnapshotTest struct { - snapshotTestBasic - setHead uint64 // Block number to set head back to -} - -func (snaptest *setHeadSnapshotTest) test(t *testing.T) { - // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump()) - chain, blocks := snaptest.prepare(t) - - // Rewind the chain if setHead operation is required. - chain.SetHead(snaptest.setHead) - chain.Stop() - - newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer newchain.Stop() - - snaptest.verify(t, newchain, blocks) -} - -// wipeCrashSnapshotTest is the test type used to test this scenario: -// - have a complete snapshot -// - restart, insert more blocks without enabling the snapshot -// - restart again with enabling the snapshot -// - crash -type wipeCrashSnapshotTest struct { - snapshotTestBasic - newBlocks int -} - -func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { - // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump()) - chain, blocks := snaptest.prepare(t) - - // Firstly, stop the chain properly, with all snapshot journal - // and state committed. - chain.Stop() - - config := &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, - TriesInMemory: 128, - StateScheme: snaptest.scheme, - } - newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - newBlocks, _ := GenerateChain(snaptest.gspec.Config, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, func(i int, b *BlockGen) {}) - newchain.InsertChain(newBlocks) - newchain.Stop() - - // Restart the chain, the wiper should start working - config = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 256, - SnapshotWait: false, // Don't wait rebuild - TriesInMemory: 128, - StateScheme: snaptest.scheme, - } - tmp, err := NewBlockChain(snaptest.db, config, snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - - // Simulate the blockchain crash. - tmp.triedb.Close() - tmp.stopWithoutSaving() - - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfigWithScheme(snaptest.scheme), snaptest.gspec, nil, snaptest.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - snaptest.verify(t, newchain, blocks) - newchain.Stop() -} - -// Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot -// journal will be persisted correctly. In this case no snapshot recovery is -// required. -func TestRestartWithNewSnapshot(t *testing.T) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Commit: G - // Snapshot: G - // - // SetHead(0) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C8 - // Expected snapshot disk : G - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &snapshotTest{ - snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 8, - expSnapshotBottom: 0, // Initial disk layer built from genesis - }, - } - test.test(t) - test.teardown() - } -} - -// Tests a Geth was crashed and restarts with a broken snapshot. In this case the -// chain head should be rewound to the point with available state. And also the -// new head should must be lower than disk layer. But there is no committed point -// so the chain should be rewound to genesis and the disk layer should be left -// for recovery. -func TestNoCommitCrashWithNewSnapshot(t *testing.T) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Commit: G - // Snapshot: G, C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : G - // Expected snapshot disk : C4 - // TODO - //for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - for _, scheme := range []string{rawdb.HashScheme} { - test := &crashSnapshotTest{ - snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() - } -} - -// Tests a Geth was crashed and restarts with a broken snapshot. In this case the -// chain head should be rewound to the point with available state. And also the -// new head should must be lower than disk layer. But there is only a low committed -// point so the chain should be rewound to committed point and the disk layer -// should be left for recovery. -func TestLowCommitCrashWithNewSnapshot(t *testing.T) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Commit: G, C2 - // Snapshot: G, C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : C2 - // Expected snapshot disk : C4 - // TODO - //for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - for _, scheme := range []string{rawdb.HashScheme} { - test := &crashSnapshotTest{ - snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 2, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 2, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() - } -} - -// Tests a Geth was crashed and restarts with a broken snapshot. In this case -// the chain head should be rewound to the point with available state. And also -// the new head should must be lower than disk layer. But there is only a high -// committed point so the chain should be rewound to genesis and the disk layer -// should be left for recovery. -func TestHighCommitCrashWithNewSnapshot(t *testing.T) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Commit: G, C6 - // Snapshot: G, C4 - // - // CRASH - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8 - // - // Expected head header : C8 - // Expected head fast block: C8 - // Expected head block : G - // Expected snapshot disk : C4 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - expHead := uint64(0) - if scheme == rawdb.PathScheme { - expHead = uint64(4) - } - test := &crashSnapshotTest{ - snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 6, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: expHead, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }, - } - test.test(t) - test.teardown() - } -} - -// Tests a Geth was running with snapshot enabled. Then restarts without -// enabling snapshot and after that re-enable the snapshot again. In this -// case the snapshot should be rebuilt with latest chain head. -func TestGappedNewSnapshot(t *testing.T) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Commit: G - // Snapshot: G - // - // SetHead(0) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 - // - // Expected head header : C10 - // Expected head fast block: C10 - // Expected head block : C10 - // Expected snapshot disk : C10 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &gappedSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 10, - expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD - }, - gapped: 2, - } - test.test(t) - test.teardown() - } -} - -// Tests the Geth was running with snapshot enabled and resetHead is applied. -// In this case the head is rewound to the target(with state available). After -// that the chain is restarted and the original disk layer is kept. -func TestSetHeadWithNewSnapshot(t *testing.T) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Commit: G - // Snapshot: G - // - // SetHead(4) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4 - // - // Expected head header : C4 - // Expected head fast block: C4 - // Expected head block : C4 - // Expected snapshot disk : G - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &setHeadSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 4, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - expSnapshotBottom: 0, // The initial disk layer is built from the genesis - }, - setHead: 4, - } - test.test(t) - test.teardown() - } -} - -// Tests the Geth was running with a complete snapshot and then imports a few -// more new blocks on top without enabling the snapshot. After the restart, -// crash happens. Check everything is ok after the restart. -func TestRecoverSnapshotFromWipingCrash(t *testing.T) { - // Chain: - // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) - // - // Commit: G - // Snapshot: G - // - // SetHead(0) - // - // ------------------------------ - // - // Expected in leveldb: - // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 - // - // Expected head header : C10 - // Expected head fast block: C10 - // Expected head block : C8 - // Expected snapshot disk : C10 - for _, scheme := range []string{rawdb.HashScheme, rawdb.PathScheme} { - test := &wipeCrashSnapshotTest{ - snapshotTestBasic: snapshotTestBasic{ - scheme: scheme, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 10, - expSnapshotBottom: 10, - }, - newBlocks: 2, - } - test.test(t) - test.teardown() - } -} diff --git a/core/blockchain_test.go b/core/blockchain_test.go deleted file mode 100644 index 3917117b91..0000000000 --- a/core/blockchain_test.go +++ /dev/null @@ -1,4566 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "crypto/ecdsa" - "errors" - "fmt" - "math/big" - "math/rand" - "os" - "sync" - "testing" - "time" - - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" - "github.com/holiman/uint256" -) - -// So we can deterministically seed different blockchains -var ( - canonicalSeed = 1 - forkSeed1 = 2 - forkSeed2 = 3 - - TestTriesInMemory = 128 -) - -// newCanonical creates a chain database, and injects a deterministic canonical -// chain. Depending on the full flag, it creates either a full block chain or a -// header only chain. The database and genesis specification for block generation -// are also returned in case more test blocks are needed later. -func newCanonical(engine consensus.Engine, n int, full bool, scheme string, pipeline bool) (ethdb.Database, *Genesis, *BlockChain, error) { - var ( - genesis = &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: params.AllEthashProtocolChanges, - } - ) - - // Initialize a fresh chain with only a genesis block - var ops []BlockChainOption - if pipeline { - ops = append(ops, EnablePipelineCommit) - } - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil, ops...) - // Create and inject the requested chain - if n == 0 { - return rawdb.NewMemoryDatabase(), genesis, blockchain, nil - } - if full { - // Full block-chain requested - genDb, blocks := makeBlockChainWithGenesis(genesis, n, engine, canonicalSeed) - _, err := blockchain.InsertChain(blocks) - return genDb, genesis, blockchain, err - } - // Header-only chain requested - genDb, headers := makeHeaderChainWithGenesis(genesis, n, engine, canonicalSeed) - _, err := blockchain.InsertHeaderChain(headers) - return genDb, genesis, blockchain, err -} - -func newGwei(n int64) *big.Int { - return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei)) -} - -// Test fork of length N starting from block i -func testInvalidStateRootBlockImport(t *testing.T, blockchain *BlockChain, i, n int, pipeline bool) { - // Copy old chain up to #i into a new db - db, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - defer blockchain2.Stop() - - // Assert the chains have the same header/block at #i - hash1 := blockchain.GetBlockByNumber(uint64(i)).Hash() - hash2 := blockchain2.GetBlockByNumber(uint64(i)).Hash() - if hash1 != hash2 { - t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1) - } - // Extend the newly created chain - blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed1) - for idx, block := range blockChainB { - block.SetRoot(common.Hash{0: byte(forkSeed1), 19: byte(idx)}) - } - previousBlock := blockchain.CurrentBlock() - // Sanity check that the forked chain can be imported into the original - if _, err := blockchain.InsertChain(blockChainB); err == nil { - t.Fatalf("failed to report insert error") - } - - time.Sleep(2 * rewindBadBlockInterval) - latestBlock := blockchain.CurrentBlock() - if latestBlock.Hash() != previousBlock.Hash() || latestBlock.Number.Uint64() != previousBlock.Number.Uint64() { - t.Fatalf("rewind do not take effect") - } - db, _, blockchain3, err := newCanonical(ethash.NewFaker(), i, true, rawdb.HashScheme, pipeline) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - defer blockchain3.Stop() - - blockChainC := makeBlockChain(blockchain3.chainConfig, blockchain3.GetBlockByHash(blockchain3.CurrentBlock().Hash()), n, ethash.NewFaker(), db, forkSeed2) - - if _, err := blockchain.InsertChain(blockChainC); err != nil { - t.Fatalf("failed to insert forking chain: %v", err) - } -} - -// Test fork of length N starting from block i -func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int), scheme string, pipeline bool) { - // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, pipeline) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - defer blockchain2.Stop() - - // Assert the chains have the same header/block at #i - var hash1, hash2 common.Hash - if full { - hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash() - hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash() - } else { - hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash() - hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash() - } - if hash1 != hash2 { - t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1) - } - // Extend the newly created chain - var ( - blockChainB []*types.Block - headerChainB []*types.Header - ) - if full { - blockChainB = makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1) - if _, err := blockchain2.InsertChain(blockChainB); err != nil { - t.Fatalf("failed to insert forking chain: %v", err) - } - } else { - headerChainB = makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1) - if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil { - t.Fatalf("failed to insert forking chain: %v", err) - } - } - // Sanity check that the forked chain can be imported into the original - var tdPre, tdPost *big.Int - - if full { - cur := blockchain.CurrentBlock() - tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64()) - if err := testBlockChainImport(blockChainB, pipeline, blockchain); err != nil { - t.Fatalf("failed to import forked block chain: %v", err) - } - last := blockChainB[len(blockChainB)-1] - tdPost = blockchain.GetTd(last.Hash(), last.NumberU64()) - } else { - cur := blockchain.CurrentHeader() - tdPre = blockchain.GetTd(cur.Hash(), cur.Number.Uint64()) - if err := testHeaderChainImport(headerChainB, blockchain); err != nil { - t.Fatalf("failed to import forked header chain: %v", err) - } - last := headerChainB[len(headerChainB)-1] - tdPost = blockchain.GetTd(last.Hash(), last.Number.Uint64()) - } - // Compare the total difficulties of the chains - comparator(tdPre, tdPost) -} - -// testBlockChainImport tries to process a chain of blocks, writing them into -// the database if successful. -func testBlockChainImport(chain types.Blocks, pipelineCommit bool, blockchain *BlockChain) error { - for _, block := range chain { - // Try and process the block - err := blockchain.engine.VerifyHeader(blockchain, block.Header()) - if err == nil { - err = blockchain.validator.ValidateBody(block) - } - if err != nil { - if err == ErrKnownBlock { - continue - } - return err - } - statedb, err := state.New(blockchain.GetBlockByHash(block.ParentHash()).Root(), blockchain.stateCache, nil) - if err != nil { - return err - } - statedb.SetExpectedStateRoot(block.Root()) - if pipelineCommit { - statedb.EnablePipeCommit() - } - statedb, receipts, _, usedGas, err := blockchain.processor.Process(block, statedb, vm.Config{}) - if err != nil { - blockchain.reportBlock(block, receipts, err) - return err - } - err = blockchain.validator.ValidateState(block, statedb, receipts, usedGas) - if err != nil { - blockchain.reportBlock(block, receipts, err) - return err - } - - blockchain.chainmu.MustLock() - rawdb.WriteTd(blockchain.db, block.Hash(), block.NumberU64(), new(big.Int).Add(block.Difficulty(), blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))) - rawdb.WriteBlock(blockchain.db, block) - statedb.Finalise(false) - statedb.AccountsIntermediateRoot() - statedb.Commit(block.NumberU64(), nil) - blockchain.chainmu.Unlock() - } - return nil -} - -// testHeaderChainImport tries to process a chain of header, writing them into -// the database if successful. -func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error { - for _, header := range chain { - // Try and validate the header - if err := blockchain.engine.VerifyHeader(blockchain, header); err != nil { - return err - } - // Manually insert the header into the database, but don't reorganise (allows subsequent testing) - blockchain.chainmu.MustLock() - rawdb.WriteTd(blockchain.db, header.Hash(), header.Number.Uint64(), new(big.Int).Add(header.Difficulty, blockchain.GetTd(header.ParentHash, header.Number.Uint64()-1))) - rawdb.WriteHeader(blockchain.db, header) - blockchain.chainmu.Unlock() - } - return nil -} - -func TestBlockImportVerification(t *testing.T) { - length := 5 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, true, rawdb.HashScheme, true) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - // Start fork from current height - processor, _ = EnablePipelineCommit(processor) - testInvalidStateRootBlockImport(t, processor, length, 10, true) -} -func TestLastBlock(t *testing.T) { - testLastBlock(t, rawdb.HashScheme) - testLastBlock(t, rawdb.PathScheme) -} - -func testLastBlock(t *testing.T, scheme string) { - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false) - if err != nil { - t.Fatalf("failed to create pristine chain: %v", err) - } - defer blockchain.Stop() - - blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 1, ethash.NewFullFaker(), genDb, 0) - if _, err := blockchain.InsertChain(blocks); err != nil { - t.Fatalf("Failed to insert block: %v", err) - } - if blocks[len(blocks)-1].Hash() != rawdb.ReadHeadBlockHash(blockchain.db) { - t.Fatalf("Write/Get HeadBlockHash failed") - } -} - -// Test inserts the blocks/headers after the fork choice rule is changed. -// The chain is reorged to whatever specified. -func testInsertAfterMerge(t *testing.T, blockchain *BlockChain, i, n int, full bool, scheme string) { - // Copy old chain up to #i into a new db - genDb, _, blockchain2, err := newCanonical(ethash.NewFaker(), i, full, scheme, false) - if err != nil { - t.Fatal("could not make new canonical in testFork", err) - } - defer blockchain2.Stop() - - // Assert the chains have the same header/block at #i - var hash1, hash2 common.Hash - if full { - hash1 = blockchain.GetBlockByNumber(uint64(i)).Hash() - hash2 = blockchain2.GetBlockByNumber(uint64(i)).Hash() - } else { - hash1 = blockchain.GetHeaderByNumber(uint64(i)).Hash() - hash2 = blockchain2.GetHeaderByNumber(uint64(i)).Hash() - } - if hash1 != hash2 { - t.Errorf("chain content mismatch at %d: have hash %v, want hash %v", i, hash2, hash1) - } - - // Extend the newly created chain - if full { - blockChainB := makeBlockChain(blockchain2.chainConfig, blockchain2.GetBlockByHash(blockchain2.CurrentBlock().Hash()), n, ethash.NewFaker(), genDb, forkSeed1) - if _, err := blockchain2.InsertChain(blockChainB); err != nil { - t.Fatalf("failed to insert forking chain: %v", err) - } - if blockchain2.CurrentBlock().Number.Uint64() != blockChainB[len(blockChainB)-1].NumberU64() { - t.Fatalf("failed to reorg to the given chain") - } - if blockchain2.CurrentBlock().Hash() != blockChainB[len(blockChainB)-1].Hash() { - t.Fatalf("failed to reorg to the given chain") - } - } else { - headerChainB := makeHeaderChain(blockchain2.chainConfig, blockchain2.CurrentHeader(), n, ethash.NewFaker(), genDb, forkSeed1) - if _, err := blockchain2.InsertHeaderChain(headerChainB); err != nil { - t.Fatalf("failed to insert forking chain: %v", err) - } - if blockchain2.CurrentHeader().Number.Uint64() != headerChainB[len(headerChainB)-1].Number.Uint64() { - t.Fatalf("failed to reorg to the given chain") - } - if blockchain2.CurrentHeader().Hash() != headerChainB[len(headerChainB)-1].Hash() { - t.Fatalf("failed to reorg to the given chain") - } - } -} - -// Tests that given a starting canonical chain of a given size, it can be extended -// with various length chains. -func TestExtendCanonicalHeaders(t *testing.T) { - testExtendCanonical(t, false, rawdb.HashScheme, false) - testExtendCanonical(t, false, rawdb.PathScheme, false) -} - -func TestExtendCanonicalBlocks(t *testing.T) { - testExtendCanonical(t, true, rawdb.HashScheme, false) - testExtendCanonical(t, true, rawdb.PathScheme, false) - testExtendCanonical(t, true, rawdb.HashScheme, true) -} - -func testExtendCanonical(t *testing.T, full bool, scheme string, pipeline bool) { - length := 5 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - - // Define the difficulty comparator - better := func(td1, td2 *big.Int) { - if td2.Cmp(td1) <= 0 { - t.Errorf("total difficulty mismatch: have %v, expected more than %v", td2, td1) - } - } - // Start fork from current height - testFork(t, processor, length, 1, full, better, scheme, pipeline) - testFork(t, processor, length, 2, full, better, scheme, pipeline) - testFork(t, processor, length, 5, full, better, scheme, pipeline) - testFork(t, processor, length, 10, full, better, scheme, pipeline) -} - -// Tests that given a starting canonical chain of a given size, it can be extended -// with various length chains. -func TestExtendCanonicalHeadersAfterMerge(t *testing.T) { - testExtendCanonicalAfterMerge(t, false, rawdb.HashScheme) - testExtendCanonicalAfterMerge(t, false, rawdb.PathScheme) -} -func TestExtendCanonicalBlocksAfterMerge(t *testing.T) { - testExtendCanonicalAfterMerge(t, true, rawdb.HashScheme) - testExtendCanonicalAfterMerge(t, true, rawdb.PathScheme) -} - -func testExtendCanonicalAfterMerge(t *testing.T, full bool, scheme string) { - length := 5 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - - testInsertAfterMerge(t, processor, length, 1, full, scheme) - testInsertAfterMerge(t, processor, length, 10, full, scheme) -} - -// Tests that given a starting canonical chain of a given size, creating shorter -// forks do not take canonical ownership. -func TestShorterForkHeaders(t *testing.T) { - testShorterFork(t, false, rawdb.HashScheme, false) - testShorterFork(t, false, rawdb.PathScheme, false) -} -func TestShorterForkBlocks(t *testing.T) { - testShorterFork(t, true, rawdb.HashScheme, false) - testShorterFork(t, true, rawdb.PathScheme, false) - testShorterFork(t, true, rawdb.HashScheme, true) -} - -func testShorterFork(t *testing.T, full bool, scheme string, pipeline bool) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - - // Define the difficulty comparator - worse := func(td1, td2 *big.Int) { - if td2.Cmp(td1) >= 0 { - t.Errorf("total difficulty mismatch: have %v, expected less than %v", td2, td1) - } - } - // Sum of numbers must be less than `length` for this to be a shorter fork - testFork(t, processor, 0, 3, full, worse, scheme, pipeline) - testFork(t, processor, 0, 7, full, worse, scheme, pipeline) - testFork(t, processor, 1, 1, full, worse, scheme, pipeline) - testFork(t, processor, 1, 7, full, worse, scheme, pipeline) - testFork(t, processor, 5, 3, full, worse, scheme, pipeline) - testFork(t, processor, 5, 4, full, worse, scheme, pipeline) -} - -// Tests that given a starting canonical chain of a given size, creating shorter -// forks do not take canonical ownership. -func TestShorterForkHeadersAfterMerge(t *testing.T) { - testShorterForkAfterMerge(t, false, rawdb.HashScheme) - testShorterForkAfterMerge(t, false, rawdb.PathScheme) -} -func TestShorterForkBlocksAfterMerge(t *testing.T) { - testShorterForkAfterMerge(t, true, rawdb.HashScheme) - testShorterForkAfterMerge(t, true, rawdb.PathScheme) -} - -func testShorterForkAfterMerge(t *testing.T, full bool, scheme string) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - - testInsertAfterMerge(t, processor, 0, 3, full, scheme) - testInsertAfterMerge(t, processor, 0, 7, full, scheme) - testInsertAfterMerge(t, processor, 1, 1, full, scheme) - testInsertAfterMerge(t, processor, 1, 7, full, scheme) - testInsertAfterMerge(t, processor, 5, 3, full, scheme) - testInsertAfterMerge(t, processor, 5, 4, full, scheme) -} - -// Tests that given a starting canonical chain of a given size, creating longer -// forks do take canonical ownership. -func TestLongerForkHeaders(t *testing.T) { - testLongerFork(t, false, rawdb.HashScheme, false) - testLongerFork(t, false, rawdb.PathScheme, false) -} -func TestLongerForkBlocks(t *testing.T) { - testLongerFork(t, true, rawdb.HashScheme, false) - testLongerFork(t, true, rawdb.PathScheme, false) - testLongerFork(t, true, rawdb.HashScheme, true) -} - -func testLongerFork(t *testing.T, full bool, scheme string, pipeline bool) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - - testInsertAfterMerge(t, processor, 0, 11, full, scheme) - testInsertAfterMerge(t, processor, 0, 15, full, scheme) - testInsertAfterMerge(t, processor, 1, 10, full, scheme) - testInsertAfterMerge(t, processor, 1, 12, full, scheme) - testInsertAfterMerge(t, processor, 5, 6, full, scheme) - testInsertAfterMerge(t, processor, 5, 8, full, scheme) -} - -// Tests that given a starting canonical chain of a given size, creating longer -// forks do take canonical ownership. -func TestLongerForkHeadersAfterMerge(t *testing.T) { - testLongerForkAfterMerge(t, false, rawdb.HashScheme) - testLongerForkAfterMerge(t, false, rawdb.PathScheme) -} -func TestLongerForkBlocksAfterMerge(t *testing.T) { - testLongerForkAfterMerge(t, true, rawdb.HashScheme) - testLongerForkAfterMerge(t, true, rawdb.PathScheme) -} - -func testLongerForkAfterMerge(t *testing.T, full bool, scheme string) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - - testInsertAfterMerge(t, processor, 0, 11, full, scheme) - testInsertAfterMerge(t, processor, 0, 15, full, scheme) - testInsertAfterMerge(t, processor, 1, 10, full, scheme) - testInsertAfterMerge(t, processor, 1, 12, full, scheme) - testInsertAfterMerge(t, processor, 5, 6, full, scheme) - testInsertAfterMerge(t, processor, 5, 8, full, scheme) -} - -// Tests that given a starting canonical chain of a given size, creating equal -// forks do take canonical ownership. -func TestEqualForkHeaders(t *testing.T) { - testEqualFork(t, false, rawdb.HashScheme, false) - testEqualFork(t, false, rawdb.PathScheme, false) -} -func TestEqualForkBlocks(t *testing.T) { - testEqualFork(t, true, rawdb.HashScheme, false) - testEqualFork(t, true, rawdb.PathScheme, false) - testEqualFork(t, true, rawdb.HashScheme, true) -} - -func testEqualFork(t *testing.T, full bool, scheme string, pipeline bool) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - - // Define the difficulty comparator - equal := func(td1, td2 *big.Int) { - if td2.Cmp(td1) != 0 { - t.Errorf("total difficulty mismatch: have %v, want %v", td2, td1) - } - } - // Sum of numbers must be equal to `length` for this to be an equal fork - testFork(t, processor, 0, 10, full, equal, scheme, pipeline) - testFork(t, processor, 1, 9, full, equal, scheme, pipeline) - testFork(t, processor, 2, 8, full, equal, scheme, pipeline) - testFork(t, processor, 5, 5, full, equal, scheme, pipeline) - testFork(t, processor, 6, 4, full, equal, scheme, pipeline) - testFork(t, processor, 9, 1, full, equal, scheme, pipeline) -} - -// Tests that given a starting canonical chain of a given size, creating equal -// forks do take canonical ownership. -func TestEqualForkHeadersAfterMerge(t *testing.T) { - testEqualForkAfterMerge(t, false, rawdb.HashScheme) - testEqualForkAfterMerge(t, false, rawdb.PathScheme) -} -func TestEqualForkBlocksAfterMerge(t *testing.T) { - testEqualForkAfterMerge(t, true, rawdb.HashScheme) - testEqualForkAfterMerge(t, true, rawdb.PathScheme) -} - -func testEqualForkAfterMerge(t *testing.T, full bool, scheme string) { - length := 10 - - // Make first chain starting from genesis - _, _, processor, err := newCanonical(ethash.NewFaker(), length, full, scheme, false) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer processor.Stop() - - testInsertAfterMerge(t, processor, 0, 10, full, scheme) - testInsertAfterMerge(t, processor, 1, 9, full, scheme) - testInsertAfterMerge(t, processor, 2, 8, full, scheme) - testInsertAfterMerge(t, processor, 5, 5, full, scheme) - testInsertAfterMerge(t, processor, 6, 4, full, scheme) - testInsertAfterMerge(t, processor, 9, 1, full, scheme) -} - -// Tests that chains missing links do not get accepted by the processor. -func TestBrokenHeaderChain(t *testing.T) { - testBrokenChain(t, false, rawdb.HashScheme, false) - testBrokenChain(t, false, rawdb.PathScheme, false) -} -func TestBrokenBlockChain(t *testing.T) { - testBrokenChain(t, true, rawdb.HashScheme, false) - testBrokenChain(t, true, rawdb.PathScheme, false) - testBrokenChain(t, true, rawdb.HashScheme, true) -} - -func testBrokenChain(t *testing.T, full bool, scheme string, pipeline bool) { - // Make chain starting from genesis - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to make new canonical chain: %v", err) - } - defer blockchain.Stop() - - // Create a forked chain, and try to insert with a missing link - if full { - chain := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 5, ethash.NewFaker(), genDb, forkSeed1)[1:] - if err := testBlockChainImport(chain, pipeline, blockchain); err == nil { - t.Errorf("broken block chain not reported") - } - } else { - chain := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 5, ethash.NewFaker(), genDb, forkSeed1)[1:] - if err := testHeaderChainImport(chain, blockchain); err == nil { - t.Errorf("broken header chain not reported") - } - } -} - -// Tests that reorganising a long difficult chain after a short easy one -// overwrites the canonical numbers and links in the database. -func TestReorgLongHeaders(t *testing.T) { - testReorgLong(t, false, rawdb.HashScheme, false) - testReorgLong(t, false, rawdb.PathScheme, false) -} -func TestReorgLongBlocks(t *testing.T) { - testReorgLong(t, true, rawdb.HashScheme, false) - testReorgLong(t, true, rawdb.PathScheme, false) - testReorgLong(t, true, rawdb.HashScheme, true) -} - -func testReorgLong(t *testing.T, full bool, scheme string, pipeline bool) { - testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280+params.GenesisDifficulty.Int64(), full, scheme, pipeline) -} - -// Tests that reorganising a short difficult chain after a long easy one -// overwrites the canonical numbers and links in the database. -func TestReorgShortHeaders(t *testing.T) { - testReorgShort(t, false, rawdb.HashScheme, false) - testReorgShort(t, false, rawdb.PathScheme, false) -} -func TestReorgShortBlocks(t *testing.T) { - testReorgShort(t, true, rawdb.HashScheme, false) - testReorgShort(t, true, rawdb.PathScheme, false) - testReorgShort(t, true, rawdb.HashScheme, true) -} - -func testReorgShort(t *testing.T, full bool, scheme string, pipeline bool) { - // Create a long easy chain vs. a short heavy one. Due to difficulty adjustment - // we need a fairly long chain of blocks with different difficulties for a short - // one to become heavier than a long one. The 96 is an empirical value. - easy := make([]int64, 96) - for i := 0; i < len(easy); i++ { - easy[i] = 60 - } - diff := make([]int64, len(easy)-1) - for i := 0; i < len(diff); i++ { - diff[i] = -9 - } - testReorg(t, easy, diff, 12615120+params.GenesisDifficulty.Int64(), full, scheme, pipeline) -} - -func testReorg(t *testing.T, first, second []int64, td int64, full bool, scheme string, pipeline bool) { - // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to create pristine chain: %v", err) - } - defer blockchain.Stop() - - // Insert an easy and a difficult chain afterwards - easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(first), func(i int, b *BlockGen) { - b.OffsetTime(first[i]) - }) - diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), ethash.NewFaker(), genDb, len(second), func(i int, b *BlockGen) { - b.OffsetTime(second[i]) - }) - if full { - if _, err := blockchain.InsertChain(easyBlocks); err != nil { - t.Fatalf("failed to insert easy chain: %v", err) - } - if _, err := blockchain.InsertChain(diffBlocks); err != nil { - t.Fatalf("failed to insert difficult chain: %v", err) - } - } else { - easyHeaders := make([]*types.Header, len(easyBlocks)) - for i, block := range easyBlocks { - easyHeaders[i] = block.Header() - } - diffHeaders := make([]*types.Header, len(diffBlocks)) - for i, block := range diffBlocks { - diffHeaders[i] = block.Header() - } - if _, err := blockchain.InsertHeaderChain(easyHeaders); err != nil { - t.Fatalf("failed to insert easy chain: %v", err) - } - if _, err := blockchain.InsertHeaderChain(diffHeaders); err != nil { - t.Fatalf("failed to insert difficult chain: %v", err) - } - } - // Check that the chain is valid number and link wise - if full { - prev := blockchain.CurrentBlock() - for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().Number.Uint64() - 1); block.NumberU64() != 0; prev, block = block.Header(), blockchain.GetBlockByNumber(block.NumberU64()-1) { - if prev.ParentHash != block.Hash() { - t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash, block.Hash()) - } - } - } else { - prev := blockchain.CurrentHeader() - for header := blockchain.GetHeaderByNumber(blockchain.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, blockchain.GetHeaderByNumber(header.Number.Uint64()-1) { - if prev.ParentHash != header.Hash() { - t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash()) - } - } - } - // Make sure the chain total difficulty is the correct one - want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td)) - if full { - cur := blockchain.CurrentBlock() - if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 { - t.Errorf("total difficulty mismatch: have %v, want %v", have, want) - } - } else { - cur := blockchain.CurrentHeader() - if have := blockchain.GetTd(cur.Hash(), cur.Number.Uint64()); have.Cmp(want) != 0 { - t.Errorf("total difficulty mismatch: have %v, want %v", have, want) - } - } -} - -// Tests that the insertion functions detect banned hashes. -func TestBadHeaderHashes(t *testing.T) { - testBadHashes(t, false, rawdb.HashScheme, false) - testBadHashes(t, false, rawdb.PathScheme, false) -} - -func TestBadBlockHashes(t *testing.T) { - testBadHashes(t, true, rawdb.HashScheme, false) - testBadHashes(t, true, rawdb.HashScheme, true) - testBadHashes(t, true, rawdb.PathScheme, false) -} - -func testBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { - // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to create pristine chain: %v", err) - } - defer blockchain.Stop() - - // Create a chain, ban a hash and try to import - if full { - blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 3, ethash.NewFaker(), genDb, 10) - - BadHashes[blocks[2].Header().Hash()] = true - defer func() { delete(BadHashes, blocks[2].Header().Hash()) }() - - _, err = blockchain.InsertChain(blocks) - } else { - headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 3, ethash.NewFaker(), genDb, 10) - - BadHashes[headers[2].Hash()] = true - defer func() { delete(BadHashes, headers[2].Hash()) }() - - _, err = blockchain.InsertHeaderChain(headers) - } - if !errors.Is(err, ErrBannedHash) { - t.Errorf("error mismatch: have: %v, want: %v", err, ErrBannedHash) - } -} - -// Tests that bad hashes are detected on boot, and the chain rolled back to a -// good state prior to the bad hash. -func TestReorgBadHeaderHashes(t *testing.T) { - testReorgBadHashes(t, false, rawdb.HashScheme, false) - testReorgBadHashes(t, false, rawdb.PathScheme, false) -} -func TestReorgBadBlockHashes(t *testing.T) { - testReorgBadHashes(t, true, rawdb.HashScheme, false) - testReorgBadHashes(t, true, rawdb.HashScheme, true) - testReorgBadHashes(t, true, rawdb.PathScheme, false) -} - -func testReorgBadHashes(t *testing.T, full bool, scheme string, pipeline bool) { - // Create a pristine chain and database - genDb, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to create pristine chain: %v", err) - } - // Create a chain, import and ban afterwards - headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), 4, ethash.NewFaker(), genDb, 10) - blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), 4, ethash.NewFaker(), genDb, 10) - - if full { - if _, err = blockchain.InsertChain(blocks); err != nil { - t.Errorf("failed to import blocks: %v", err) - } - if blockchain.CurrentBlock().Hash() != blocks[3].Hash() { - t.Errorf("last block hash mismatch: have: %x, want %x", blockchain.CurrentBlock().Hash(), blocks[3].Header().Hash()) - } - BadHashes[blocks[3].Header().Hash()] = true - defer func() { delete(BadHashes, blocks[3].Header().Hash()) }() - } else { - if _, err = blockchain.InsertHeaderChain(headers); err != nil { - t.Errorf("failed to import headers: %v", err) - } - if blockchain.CurrentHeader().Hash() != headers[3].Hash() { - t.Errorf("last header hash mismatch: have: %x, want %x", blockchain.CurrentHeader().Hash(), headers[3].Hash()) - } - BadHashes[headers[3].Hash()] = true - defer func() { delete(BadHashes, headers[3].Hash()) }() - } - blockchain.Stop() - - // Create a new BlockChain and check that it rolled back the state. - ncm, err := NewBlockChain(blockchain.db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create new chain manager: %v", err) - } - if full { - if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() { - t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash()) - } - if blocks[2].Header().GasLimit != ncm.GasLimit() { - t.Errorf("last block gasLimit mismatch: have: %d, want %d", ncm.GasLimit(), blocks[2].Header().GasLimit) - } - } else { - if ncm.CurrentHeader().Hash() != headers[2].Hash() { - t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash()) - } - } - ncm.Stop() -} - -// Tests chain insertions in the face of one entity containing an invalid nonce. -func TestHeadersInsertNonceError(t *testing.T) { - testInsertNonceError(t, false, rawdb.HashScheme, false) - testInsertNonceError(t, false, rawdb.PathScheme, false) -} -func TestBlocksInsertNonceError(t *testing.T) { - testInsertNonceError(t, true, rawdb.HashScheme, false) - testInsertNonceError(t, true, rawdb.HashScheme, true) - testInsertNonceError(t, true, rawdb.PathScheme, false) -} - -func testInsertNonceError(t *testing.T, full bool, scheme string, pipeline bool) { - doTest := func(i int) { - // Create a pristine chain and database - genDb, _, blockchain, err := newCanonical(ethash.NewFaker(), 0, full, scheme, pipeline) - if err != nil { - t.Fatalf("failed to create pristine chain: %v", err) - } - defer blockchain.Stop() - - // Create and insert a chain with a failing nonce - var ( - failAt int - failRes int - failNum uint64 - ) - if full { - blocks := makeBlockChain(blockchain.chainConfig, blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash()), i, ethash.NewFaker(), genDb, 0) - - failAt = rand.Int() % len(blocks) - failNum = blocks[failAt].NumberU64() - - blockchain.engine = ethash.NewFakeFailer(failNum) - failRes, err = blockchain.InsertChain(blocks) - } else { - headers := makeHeaderChain(blockchain.chainConfig, blockchain.CurrentHeader(), i, ethash.NewFaker(), genDb, 0) - - failAt = rand.Int() % len(headers) - failNum = headers[failAt].Number.Uint64() - - blockchain.engine = ethash.NewFakeFailer(failNum) - blockchain.hc.engine = blockchain.engine - failRes, err = blockchain.InsertHeaderChain(headers) - } - // Check that the returned error indicates the failure - if failRes != failAt { - t.Errorf("test %d: failure (%v) index mismatch: have %d, want %d", i, err, failRes, failAt) - } - // Check that all blocks after the failing block have been inserted - for j := 0; j < i-failAt; j++ { - if full { - if block := blockchain.GetBlockByNumber(failNum + uint64(j)); block != nil { - t.Errorf("test %d: invalid block in chain: %v", i, block) - } - } else { - if header := blockchain.GetHeaderByNumber(failNum + uint64(j)); header != nil { - t.Errorf("test %d: invalid header in chain: %v", i, header) - } - } - } - } - for i := 1; i < 25 && !t.Failed(); i++ { - doTest(i) - } -} - -// Tests that fast importing a block chain produces the same chain data as the -// classical full block processing. -func TestFastVsFullChains(t *testing.T) { - testFastVsFullChains(t, rawdb.HashScheme) - testFastVsFullChains(t, rawdb.PathScheme) -} - -func testFastVsFullChains(t *testing.T, scheme string) { - // Configure and generate a sample block chain - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{address: {Balance: funds}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - signer = types.LatestSigner(gspec.Config) - ) - _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, func(i int, block *BlockGen) { - block.SetCoinbase(common.Address{0x00}) - - // If the block number is multiple of 3, send a few bonus transactions to the miner - if i%3 == 2 { - for j := 0; j < i%4+1; j++ { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - } - // If the block number is a multiple of 5, add an uncle to the block - if i%5 == 4 { - block.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 2).Hash(), Number: big.NewInt(int64(i))}) - } - }) - // Import the chain as an archive node for the comparison baseline - archiveDb := rawdb.NewMemoryDatabase() - archive, _ := NewBlockChain(archiveDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer archive.Stop() - - if n, err := archive.InsertChain(blocks); err != nil { - t.Fatalf("failed to process block %d: %v", n, err) - } - // Fast import the chain as a non-archive node to test - fastDb := rawdb.NewMemoryDatabase() - fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer fast.Stop() - - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - if n, err := fast.InsertHeaderChain(headers); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) - } - if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil { - t.Fatalf("failed to insert receipt %d: %v", n, err) - } - // Freezer style fast import the chain. - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - if err != nil { - t.Fatalf("failed to create temp freezer db: %v", err) - } - defer ancientDb.Close() - - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer ancient.Stop() - - if n, err := ancient.InsertHeaderChain(headers); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) - } - if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(len(blocks)/2)); err != nil { - t.Fatalf("failed to insert receipt %d: %v", n, err) - } - - // Iterate over all chain data components, and cross reference - for i := 0; i < len(blocks); i++ { - num, hash, time := blocks[i].NumberU64(), blocks[i].Hash(), blocks[i].Time() - - if ftd, atd := fast.GetTd(hash, num), archive.GetTd(hash, num); ftd.Cmp(atd) != 0 { - t.Errorf("block #%d [%x]: td mismatch: fastdb %v, archivedb %v", num, hash, ftd, atd) - } - if antd, artd := ancient.GetTd(hash, num), archive.GetTd(hash, num); antd.Cmp(artd) != 0 { - t.Errorf("block #%d [%x]: td mismatch: ancientdb %v, archivedb %v", num, hash, antd, artd) - } - if fheader, aheader := fast.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); fheader.Hash() != aheader.Hash() { - t.Errorf("block #%d [%x]: header mismatch: fastdb %v, archivedb %v", num, hash, fheader, aheader) - } - if anheader, arheader := ancient.GetHeaderByHash(hash), archive.GetHeaderByHash(hash); anheader.Hash() != arheader.Hash() { - t.Errorf("block #%d [%x]: header mismatch: ancientdb %v, archivedb %v", num, hash, anheader, arheader) - } - if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() { - t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock) - } else if types.DeriveSha(fblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) || types.DeriveSha(anblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) { - t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions()) - } else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) { - t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles()) - } - - // Check receipts. - freceipts := rawdb.ReadReceipts(fastDb, hash, num, time, fast.Config()) - anreceipts := rawdb.ReadReceipts(ancientDb, hash, num, time, fast.Config()) - areceipts := rawdb.ReadReceipts(archiveDb, hash, num, time, fast.Config()) - if types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) { - t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts) - } - - // Check that hash-to-number mappings are present in all databases. - if m := rawdb.ReadHeaderNumber(fastDb, hash); m == nil || *m != num { - t.Errorf("block #%d [%x]: wrong hash-to-number mapping in fastdb: %v", num, hash, m) - } - if m := rawdb.ReadHeaderNumber(ancientDb, hash); m == nil || *m != num { - t.Errorf("block #%d [%x]: wrong hash-to-number mapping in ancientdb: %v", num, hash, m) - } - if m := rawdb.ReadHeaderNumber(archiveDb, hash); m == nil || *m != num { - t.Errorf("block #%d [%x]: wrong hash-to-number mapping in archivedb: %v", num, hash, m) - } - } - - // Check that the canonical chains are the same between the databases - for i := 0; i < len(blocks)+1; i++ { - if fhash, ahash := rawdb.ReadCanonicalHash(fastDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); fhash != ahash { - t.Errorf("block #%d: canonical hash mismatch: fastdb %v, archivedb %v", i, fhash, ahash) - } - if anhash, arhash := rawdb.ReadCanonicalHash(ancientDb, uint64(i)), rawdb.ReadCanonicalHash(archiveDb, uint64(i)); anhash != arhash { - t.Errorf("block #%d: canonical hash mismatch: ancientdb %v, archivedb %v", i, anhash, arhash) - } - } -} - -// Tests that various import methods move the chain head pointers to the correct -// positions. -func TestLightVsFastVsFullChainHeads(t *testing.T) { - testLightVsFastVsFullChainHeads(t, rawdb.HashScheme) - testLightVsFastVsFullChainHeads(t, rawdb.PathScheme) -} - -func testLightVsFastVsFullChainHeads(t *testing.T, scheme string) { - // Configure and generate a sample block chain - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{address: {Balance: funds}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - ) - height := uint64(1024) - _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) - - // makeDb creates a db instance for testing. - makeDb := func() ethdb.Database { - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - if err != nil { - t.Fatalf("failed to create temp freezer db: %v", err) - } - return db - } - // Configure a subchain to roll back - remove := blocks[height/2].NumberU64() - - // Create a small assertion method to check the three heads - assert := func(t *testing.T, kind string, chain *BlockChain, header uint64, fast uint64, block uint64) { - t.Helper() - - if num := chain.CurrentBlock().Number.Uint64(); num != block { - t.Errorf("%s head block mismatch: have #%v, want #%v", kind, num, block) - } - if num := chain.CurrentSnapBlock().Number.Uint64(); num != fast { - t.Errorf("%s head snap-block mismatch: have #%v, want #%v", kind, num, fast) - } - if num := chain.CurrentHeader().Number.Uint64(); num != header { - t.Errorf("%s head header mismatch: have #%v, want #%v", kind, num, header) - } - } - // Import the chain as an archive node and ensure all pointers are updated - archiveDb := makeDb() - defer archiveDb.Close() - - archiveCaching := *defaultCacheConfig - archiveCaching.TrieDirtyDisabled = true - archiveCaching.StateScheme = scheme - - archive, _ := NewBlockChain(archiveDb, &archiveCaching, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if n, err := archive.InsertChain(blocks); err != nil { - t.Fatalf("failed to process block %d: %v", n, err) - } - defer archive.Stop() - - assert(t, "archive", archive, height, height, height) - archive.SetHead(remove - 1) - assert(t, "archive", archive, height/2, height/2, height/2) - - // Import the chain as a non-archive node and ensure all pointers are updated - fastDb := makeDb() - defer fastDb.Close() - fast, _ := NewBlockChain(fastDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer fast.Stop() - - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - if n, err := fast.InsertHeaderChain(headers); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) - } - if n, err := fast.InsertReceiptChain(blocks, receipts, 0); err != nil { - t.Fatalf("failed to insert receipt %d: %v", n, err) - } - assert(t, "fast", fast, height, height, 0) - fast.SetHead(remove - 1) - assert(t, "fast", fast, height/2, height/2, 0) - - // Import the chain as a ancient-first node and ensure all pointers are updated - ancientDb := makeDb() - defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer ancient.Stop() - - if n, err := ancient.InsertHeaderChain(headers); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) - } - if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil { - t.Fatalf("failed to insert receipt %d: %v", n, err) - } - assert(t, "ancient", ancient, height, height, 0) - ancient.SetHead(remove - 1) - assert(t, "ancient", ancient, 0, 0, 0) - - if frozen, err := ancientDb.Ancients(); err != nil || frozen != 1 { - t.Fatalf("failed to truncate ancient store, want %v, have %v", 1, frozen) - } - // Import the chain as a light node and ensure all pointers are updated - lightDb := makeDb() - defer lightDb.Close() - light, _ := NewBlockChain(lightDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if n, err := light.InsertHeaderChain(headers); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) - } - defer light.Stop() - - assert(t, "light", light, height, 0, 0) - light.SetHead(remove - 1) - assert(t, "light", light, height/2, 0, 0) -} - -// Tests that chain reorganisations handle transaction removals and reinsertions. -func TestChainTxReorgs(t *testing.T) { - testChainTxReorgs(t, rawdb.HashScheme) - testChainTxReorgs(t, rawdb.PathScheme) -} - -func testChainTxReorgs(t *testing.T, scheme string) { - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - addr3 = crypto.PubkeyToAddress(key3.PublicKey) - gspec = &Genesis{ - Config: params.TestChainConfig, - GasLimit: 3141592, - Alloc: types.GenesisAlloc{ - addr1: {Balance: big.NewInt(1000000000000000)}, - addr2: {Balance: big.NewInt(1000000000000000)}, - addr3: {Balance: big.NewInt(1000000000000000)}, - }, - } - signer = types.LatestSigner(gspec.Config) - ) - - // Create two transactions shared between the chains: - // - postponed: transaction included at a later block in the forked chain - // - swapped: transaction included at the same block number in the forked chain - postponed, _ := types.SignTx(types.NewTransaction(0, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1) - swapped, _ := types.SignTx(types.NewTransaction(1, addr1, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, key1) - - // Create two transactions that will be dropped by the forked chain: - // - pastDrop: transaction dropped retroactively from a past block - // - freshDrop: transaction dropped exactly at the block where the reorg is detected - var pastDrop, freshDrop *types.Transaction - - // Create three transactions that will be added in the forked chain: - // - pastAdd: transaction added before the reorganization is detected - // - freshAdd: transaction added at the exact block the reorg is detected - // - futureAdd: transaction added after the reorg has already finished - var pastAdd, freshAdd, futureAdd *types.Transaction - - _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) { - switch i { - case 0: - pastDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2) - - gen.AddTx(pastDrop) // This transaction will be dropped in the fork from below the split point - gen.AddTx(postponed) // This transaction will be postponed till block #3 in the fork - - case 2: - freshDrop, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr2, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key2) - - gen.AddTx(freshDrop) // This transaction will be dropped in the fork from exactly at the split point - gen.AddTx(swapped) // This transaction will be swapped out at the exact height - - gen.OffsetTime(9) // Lower the block difficulty to simulate a weaker chain - } - }) - // Import the chain. This runs all block validation rules. - db := rawdb.NewMemoryDatabase() - blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if i, err := blockchain.InsertChain(chain); err != nil { - t.Fatalf("failed to insert original chain[%d]: %v", i, err) - } - defer blockchain.Stop() - - // overwrite the old chain - _, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 5, func(i int, gen *BlockGen) { - switch i { - case 0: - pastAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3) - gen.AddTx(pastAdd) // This transaction needs to be injected during reorg - - case 2: - gen.AddTx(postponed) // This transaction was postponed from block #1 in the original chain - gen.AddTx(swapped) // This transaction was swapped from the exact current spot in the original chain - - freshAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3) - gen.AddTx(freshAdd) // This transaction will be added exactly at reorg time - - case 3: - futureAdd, _ = types.SignTx(types.NewTransaction(gen.TxNonce(addr3), addr3, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key3) - gen.AddTx(futureAdd) // This transaction will be added after a full reorg - } - }) - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - - // removed tx - for i, tx := range (types.Transactions{pastDrop, freshDrop}) { - if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn != nil { - t.Errorf("drop %d: tx %v found while shouldn't have been", i, txn) - } - if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt != nil { - t.Errorf("drop %d: receipt %v found while shouldn't have been", i, rcpt) - } - } - // added tx - for i, tx := range (types.Transactions{pastAdd, freshAdd, futureAdd}) { - if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil { - t.Errorf("add %d: expected tx to be found", i) - } - if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil { - t.Errorf("add %d: expected receipt to be found", i) - } - } - // shared tx - for i, tx := range (types.Transactions{postponed, swapped}) { - if txn, _, _, _ := rawdb.ReadTransaction(db, tx.Hash()); txn == nil { - t.Errorf("share %d: expected tx to be found", i) - } - if rcpt, _, _, _ := rawdb.ReadReceipt(db, tx.Hash(), blockchain.Config()); rcpt == nil { - t.Errorf("share %d: expected receipt to be found", i) - } - } -} - -func TestLogReorgs(t *testing.T) { - testLogReorgs(t, rawdb.HashScheme) - testLogReorgs(t, rawdb.PathScheme) -} - -func testLogReorgs(t *testing.T, scheme string) { - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - - // this code generates a log - code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") - gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} - signer = types.LatestSigner(gspec.Config) - ) - - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer blockchain.Stop() - - rmLogsCh := make(chan RemovedLogsEvent) - blockchain.SubscribeRemovedLogsEvent(rmLogsCh) - _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) { - if i == 1 { - tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, code), signer, key1) - if err != nil { - t.Fatalf("failed to create tx: %v", err) - } - gen.AddTx(tx) - } - }) - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatalf("failed to insert chain: %v", err) - } - - _, chain, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {}) - done := make(chan struct{}) - go func() { - ev := <-rmLogsCh - if len(ev.Logs) == 0 { - t.Error("expected logs") - } - close(done) - }() - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - timeout := time.NewTimer(1 * time.Second) - defer timeout.Stop() - select { - case <-done: - case <-timeout.C: - t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.") - } -} - -// This EVM code generates a log when the contract is created. -var logCode = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") - -// This test checks that log events and RemovedLogsEvent are sent -// when the chain reorganizes. -func TestLogRebirth(t *testing.T) { - testLogRebirth(t, rawdb.HashScheme) - testLogRebirth(t, rawdb.PathScheme) -} - -func testLogRebirth(t *testing.T, scheme string) { - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} - signer = types.LatestSigner(gspec.Config) - engine = ethash.NewFaker() - blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) - ) - defer blockchain.Stop() - - // The event channels. - newLogCh := make(chan []*types.Log, 10) - rmLogsCh := make(chan RemovedLogsEvent, 10) - blockchain.SubscribeLogsEvent(newLogCh) - blockchain.SubscribeRemovedLogsEvent(rmLogsCh) - - // This chain contains 10 logs. - genDb, chain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) { - if i < 2 { - for ii := 0; ii < 5; ii++ { - tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{ - Nonce: gen.TxNonce(addr1), - GasPrice: gen.header.BaseFee, - Gas: uint64(1000001), - Data: logCode, - }) - if err != nil { - t.Fatalf("failed to create tx: %v", err) - } - gen.AddTx(tx) - } - } - }) - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatalf("failed to insert chain: %v", err) - } - checkLogEvents(t, newLogCh, rmLogsCh, 10, 0) - - // Generate long reorg chain containing more logs. Inserting the - // chain removes one log and adds four. - _, forkChain, _ := GenerateChainWithGenesis(gspec, engine, 3, func(i int, gen *BlockGen) { - if i == 2 { - // The last (head) block is not part of the reorg-chain, we can ignore it - return - } - for ii := 0; ii < 5; ii++ { - tx, err := types.SignNewTx(key1, signer, &types.LegacyTx{ - Nonce: gen.TxNonce(addr1), - GasPrice: gen.header.BaseFee, - Gas: uint64(1000000), - Data: logCode, - }) - if err != nil { - t.Fatalf("failed to create tx: %v", err) - } - gen.AddTx(tx) - } - gen.OffsetTime(-9) // higher block difficulty - }) - if _, err := blockchain.InsertChain(forkChain); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - checkLogEvents(t, newLogCh, rmLogsCh, 10, 10) - - // This chain segment is rooted in the original chain, but doesn't contain any logs. - // When inserting it, the canonical chain switches away from forkChain and re-emits - // the log event for the old chain, as well as a RemovedLogsEvent for forkChain. - newBlocks, _ := GenerateChain(gspec.Config, chain[len(chain)-1], engine, genDb, 1, func(i int, gen *BlockGen) {}) - if _, err := blockchain.InsertChain(newBlocks); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - checkLogEvents(t, newLogCh, rmLogsCh, 10, 10) -} - -// This test is a variation of TestLogRebirth. It verifies that log events are emitted -// when a side chain containing log events overtakes the canonical chain. -func TestSideLogRebirth(t *testing.T) { - testSideLogRebirth(t, rawdb.HashScheme) - testSideLogRebirth(t, rawdb.PathScheme) -} - -func testSideLogRebirth(t *testing.T, scheme string) { - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} - signer = types.LatestSigner(gspec.Config) - blockchain, _ = NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - ) - defer blockchain.Stop() - - newLogCh := make(chan []*types.Log, 10) - rmLogsCh := make(chan RemovedLogsEvent, 10) - blockchain.SubscribeLogsEvent(newLogCh) - blockchain.SubscribeRemovedLogsEvent(rmLogsCh) - - _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) { - if i == 1 { - gen.OffsetTime(-9) // higher block difficulty - } - }) - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - checkLogEvents(t, newLogCh, rmLogsCh, 0, 0) - - // Generate side chain with lower difficulty - genDb, sideChain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *BlockGen) { - if i == 1 { - tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, logCode), signer, key1) - if err != nil { - t.Fatalf("failed to create tx: %v", err) - } - gen.AddTx(tx) - } - }) - if _, err := blockchain.InsertChain(sideChain); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - checkLogEvents(t, newLogCh, rmLogsCh, 0, 0) - - // Generate a new block based on side chain. - newBlocks, _ := GenerateChain(gspec.Config, sideChain[len(sideChain)-1], ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) - if _, err := blockchain.InsertChain(newBlocks); err != nil { - t.Fatalf("failed to insert forked chain: %v", err) - } - checkLogEvents(t, newLogCh, rmLogsCh, 1, 0) -} - -func checkLogEvents(t *testing.T, logsCh <-chan []*types.Log, rmLogsCh <-chan RemovedLogsEvent, wantNew, wantRemoved int) { - t.Helper() - var ( - countNew int - countRm int - prev int - ) - // Drain events. - for len(logsCh) > 0 { - x := <-logsCh - countNew += len(x) - for _, log := range x { - // We expect added logs to be in ascending order: 0:0, 0:1, 1:0 ... - have := 100*int(log.BlockNumber) + int(log.TxIndex) - if have < prev { - t.Fatalf("Expected new logs to arrive in ascending order (%d < %d)", have, prev) - } - prev = have - } - } - prev = 0 - for len(rmLogsCh) > 0 { - x := <-rmLogsCh - countRm += len(x.Logs) - for _, log := range x.Logs { - // We expect removed logs to be in ascending order: 0:0, 0:1, 1:0 ... - have := 100*int(log.BlockNumber) + int(log.TxIndex) - if have < prev { - t.Fatalf("Expected removed logs to arrive in ascending order (%d < %d)", have, prev) - } - prev = have - } - } - - if countNew != wantNew { - t.Fatalf("wrong number of log events: got %d, want %d", countNew, wantNew) - } - if countRm != wantRemoved { - t.Fatalf("wrong number of removed log events: got %d, want %d", countRm, wantRemoved) - } -} - -func TestReorgSideEvent(t *testing.T) { - testReorgSideEvent(t, rawdb.HashScheme) - testReorgSideEvent(t, rawdb.PathScheme) -} - -func testReorgSideEvent(t *testing.T, scheme string) { - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}, - } - signer = types.LatestSigner(gspec.Config) - ) - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer blockchain.Stop() - - _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, gen *BlockGen) {}) - if _, err := blockchain.InsertChain(chain); err != nil { - t.Fatalf("failed to insert chain: %v", err) - } - - _, replacementBlocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, gen *BlockGen) { - tx, err := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, nil), signer, key1) - if i == 2 { - gen.OffsetTime(-9) - } - if err != nil { - t.Fatalf("failed to create tx: %v", err) - } - gen.AddTx(tx) - }) - chainSideCh := make(chan ChainSideEvent, 64) - blockchain.SubscribeChainSideEvent(chainSideCh) - if _, err := blockchain.InsertChain(replacementBlocks); err != nil { - t.Fatalf("failed to insert chain: %v", err) - } - - // first two block of the secondary chain are for a brief moment considered - // side chains because up to that point the first one is considered the - // heavier chain. - expectedSideHashes := map[common.Hash]bool{ - replacementBlocks[0].Hash(): true, - replacementBlocks[1].Hash(): true, - chain[0].Hash(): true, - chain[1].Hash(): true, - chain[2].Hash(): true, - } - - i := 0 - - const timeoutDura = 10 * time.Second - timeout := time.NewTimer(timeoutDura) -done: - for { - select { - case ev := <-chainSideCh: - block := ev.Block - if _, ok := expectedSideHashes[block.Hash()]; !ok { - t.Errorf("%d: didn't expect %x to be in side chain", i, block.Hash()) - } - i++ - - if i == len(expectedSideHashes) { - timeout.Stop() - - break done - } - timeout.Reset(timeoutDura) - - case <-timeout.C: - t.Fatal("Timeout. Possibly not all blocks were triggered for sideevent") - } - } - - // make sure no more events are fired - select { - case e := <-chainSideCh: - t.Errorf("unexpected event fired: %v", e) - case <-time.After(250 * time.Millisecond): - } -} - -// Tests if the canonical block can be fetched from the database during chain insertion. -func TestCanonicalBlockRetrieval(t *testing.T) { - testCanonicalBlockRetrieval(t, rawdb.HashScheme) - testCanonicalBlockRetrieval(t, rawdb.PathScheme) -} - -func testCanonicalBlockRetrieval(t *testing.T, scheme string) { - _, gspec, blockchain, err := newCanonical(ethash.NewFaker(), 0, true, scheme, false) - if err != nil { - t.Fatalf("failed to create pristine chain: %v", err) - } - defer blockchain.Stop() - - _, chain, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 10, func(i int, gen *BlockGen) {}) - - var pend sync.WaitGroup - pend.Add(len(chain)) - - for i := range chain { - go func(block *types.Block) { - defer pend.Done() - - // try to retrieve a block by its canonical hash and see if the block data can be retrieved. - for { - ch := rawdb.ReadCanonicalHash(blockchain.db, block.NumberU64()) - if ch == (common.Hash{}) { - continue // busy wait for canonical hash to be written - } - if ch != block.Hash() { - t.Errorf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex()) - return - } - fb := rawdb.ReadBlock(blockchain.db, ch, block.NumberU64()) - if fb == nil { - t.Errorf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex()) - return - } - if fb.Hash() != block.Hash() { - t.Errorf("invalid block hash for block %d, want %s, got %s", block.NumberU64(), block.Hash().Hex(), fb.Hash().Hex()) - return - } - return - } - }(chain[i]) - - if _, err := blockchain.InsertChain(types.Blocks{chain[i]}); err != nil { - t.Fatalf("failed to insert block %d: %v", i, err) - } - } - pend.Wait() -} -func TestEIP155Transition(t *testing.T) { - testEIP155Transition(t, rawdb.HashScheme) - testEIP155Transition(t, rawdb.PathScheme) -} - -func testEIP155Transition(t *testing.T, scheme string) { - // Configure and generate a sample block chain - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000) - deleteAddr = common.Address{1} - gspec = &Genesis{ - Config: ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(2), - HomesteadBlock: new(big.Int), - }, - Alloc: types.GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}}, - } - ) - genDb, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 4, func(i int, block *BlockGen) { - var ( - tx *types.Transaction - err error - basicTx = func(signer types.Signer) (*types.Transaction, error) { - return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key) - } - ) - switch i { - case 0: - tx, err = basicTx(types.HomesteadSigner{}) - if err != nil { - t.Fatal(err) - } - block.AddTx(tx) - case 2: - tx, err = basicTx(types.HomesteadSigner{}) - if err != nil { - t.Fatal(err) - } - block.AddTx(tx) - - tx, err = basicTx(types.LatestSigner(gspec.Config)) - if err != nil { - t.Fatal(err) - } - block.AddTx(tx) - case 3: - tx, err = basicTx(types.HomesteadSigner{}) - if err != nil { - t.Fatal(err) - } - block.AddTx(tx) - - tx, err = basicTx(types.LatestSigner(gspec.Config)) - if err != nil { - t.Fatal(err) - } - block.AddTx(tx) - } - }) - - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer blockchain.Stop() - - if _, err := blockchain.InsertChain(blocks); err != nil { - t.Fatal(err) - } - block := blockchain.GetBlockByNumber(1) - if block.Transactions()[0].Protected() { - t.Error("Expected block[0].txs[0] to not be replay protected") - } - - block = blockchain.GetBlockByNumber(3) - if block.Transactions()[0].Protected() { - t.Error("Expected block[3].txs[0] to not be replay protected") - } - if !block.Transactions()[1].Protected() { - t.Error("Expected block[3].txs[1] to be replay protected") - } - if _, err := blockchain.InsertChain(blocks[4:]); err != nil { - t.Fatal(err) - } - - // generate an invalid chain id transaction - config := ¶ms.ChainConfig{ - ChainID: big.NewInt(2), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(2), - HomesteadBlock: new(big.Int), - } - blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), genDb, 4, func(i int, block *BlockGen) { - var ( - tx *types.Transaction - err error - basicTx = func(signer types.Signer) (*types.Transaction, error) { - return types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{}, new(big.Int), 21000, new(big.Int), nil), signer, key) - } - ) - if i == 0 { - tx, err = basicTx(types.LatestSigner(config)) - if err != nil { - t.Fatal(err) - } - block.AddTx(tx) - } - }) - _, err := blockchain.InsertChain(blocks) - if have, want := err, types.ErrInvalidChainId; !errors.Is(have, want) { - t.Errorf("have %v, want %v", have, want) - } -} -func TestEIP161AccountRemoval(t *testing.T) { - testEIP161AccountRemoval(t, rawdb.HashScheme) - testEIP161AccountRemoval(t, rawdb.PathScheme) -} - -func testEIP161AccountRemoval(t *testing.T, scheme string) { - // Configure and generate a sample block chain - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000) - theAddr = common.Address{1} - gspec = &Genesis{ - Config: ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: new(big.Int), - EIP155Block: new(big.Int), - EIP150Block: new(big.Int), - EIP158Block: big.NewInt(2), - }, - Alloc: types.GenesisAlloc{address: {Balance: funds}}, - } - ) - _, blocks, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), 3, func(i int, block *BlockGen) { - var ( - tx *types.Transaction - err error - signer = types.LatestSigner(gspec.Config) - ) - switch i { - case 0: - tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key) - case 1: - tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key) - case 2: - tx, err = types.SignTx(types.NewTransaction(block.TxNonce(address), theAddr, new(big.Int), 21000, new(big.Int), nil), signer, key) - } - if err != nil { - t.Fatal(err) - } - block.AddTx(tx) - }) - // account must exist pre eip 161 - blockchain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer blockchain.Stop() - - if _, err := blockchain.InsertChain(types.Blocks{blocks[0]}); err != nil { - t.Fatal(err) - } - if st, _ := blockchain.State(); !st.Exist(theAddr) { - t.Error("expected account to exist") - } - - // account needs to be deleted post eip 161 - if _, err := blockchain.InsertChain(types.Blocks{blocks[1]}); err != nil { - t.Fatal(err) - } - if st, _ := blockchain.State(); st.Exist(theAddr) { - t.Error("account should not exist") - } - - // account mustn't be created post eip 161 - if _, err := blockchain.InsertChain(types.Blocks{blocks[2]}); err != nil { - t.Fatal(err) - } - if st, _ := blockchain.State(); st.Exist(theAddr) { - t.Error("account should not exist") - } -} - -// This is a regression test (i.e. as weird as it is, don't delete it ever), which -// tests that under weird reorg conditions the blockchain and its internal header- -// chain return the same latest block/header. -// -// https://github.com/ethereum/go-ethereum/pull/15941 -func TestBlockchainHeaderchainReorgConsistency(t *testing.T) { - testBlockchainHeaderchainReorgConsistency(t, rawdb.HashScheme) - testBlockchainHeaderchainReorgConsistency(t, rawdb.PathScheme) -} - -func testBlockchainHeaderchainReorgConsistency(t *testing.T, scheme string) { - // Generate a canonical chain to act as the main dataset - engine := ethash.NewFaker() - genesis := &Genesis{ - Config: params.TestChainConfig, - BaseFee: big.NewInt(params.InitialBaseFee), - } - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - - // Generate a bunch of fork blocks, each side forking from the canonical chain - forks := make([]*types.Block, len(blocks)) - for i := 0; i < len(forks); i++ { - parent := genesis.ToBlock() - if i > 0 { - parent = blocks[i-1] - } - fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) - forks[i] = fork[0] - } - // Import the canonical and fork chain side by side, verifying the current block - // and current header consistency - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - for i := 0; i < len(blocks); i++ { - if _, err := chain.InsertChain(blocks[i : i+1]); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", i, err) - } - if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() { - t.Errorf("block %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4]) - } - if _, err := chain.InsertChain(forks[i : i+1]); err != nil { - t.Fatalf(" fork %d: failed to insert into chain: %v", i, err) - } - if chain.CurrentBlock().Hash() != chain.CurrentHeader().Hash() { - t.Errorf(" fork %d: current block/header mismatch: block #%d [%x..], header #%d [%x..]", i, chain.CurrentBlock().Number, chain.CurrentBlock().Hash().Bytes()[:4], chain.CurrentHeader().Number, chain.CurrentHeader().Hash().Bytes()[:4]) - } - } -} - -// Tests that importing small side forks doesn't leave junk in the trie database -// cache (which would eventually cause memory issues). -func TestTrieForkGC(t *testing.T) { - // Generate a canonical chain to act as the main dataset - engine := ethash.NewFaker() - genesis := &Genesis{ - Config: params.TestChainConfig, - BaseFee: big.NewInt(params.InitialBaseFee), - } - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - - // Generate a bunch of fork blocks, each side forking from the canonical chain - forks := make([]*types.Block, len(blocks)) - for i := 0; i < len(forks); i++ { - parent := genesis.ToBlock() - if i > 0 { - parent = blocks[i-1] - } - fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) - forks[i] = fork[0] - } - // Import the canonical and fork chain side by side, forcing the trie cache to cache both - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - for i := 0; i < len(blocks); i++ { - if _, err := chain.InsertChain(blocks[i : i+1]); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", i, err) - } - if _, err := chain.InsertChain(forks[i : i+1]); err != nil { - t.Fatalf("fork %d: failed to insert into chain: %v", i, err) - } - } - // Dereference all the recent tries and ensure no past trie is left in - for i := 0; i < TriesInMemory; i++ { - chain.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) - chain.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) - } - if _, nodes, _, _ := chain.TrieDB().Size(); nodes > 0 { // all memory is returned in the nodes return for hashdb - t.Fatalf("stale tries still alive after garbase collection") - } -} - -// Tests that doing large reorgs works even if the state associated with the -// forking point is not available any more. -func TestLargeReorgTrieGC(t *testing.T) { - testLargeReorgTrieGC(t, rawdb.HashScheme) - testLargeReorgTrieGC(t, rawdb.PathScheme) -} - -func testLargeReorgTrieGC(t *testing.T, scheme string) { - // Generate the original common chain segment and the two competing forks - engine := ethash.NewFaker() - genesis := &Genesis{ - Config: params.TestChainConfig, - BaseFee: big.NewInt(params.InitialBaseFee), - } - genDb, shared, _ := GenerateChainWithGenesis(genesis, engine, 64, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - original, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{2}) }) - competitor, _ := GenerateChain(genesis.Config, shared[len(shared)-1], engine, genDb, 2*TriesInMemory+1, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{3}) }) - - // Import the shared chain and the original canonical one - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - defer db.Close() - - chain, err := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if _, err := chain.InsertChain(shared); err != nil { - t.Fatalf("failed to insert shared chain: %v", err) - } - if _, err := chain.InsertChain(original); err != nil { - t.Fatalf("failed to insert original chain: %v", err) - } - // Ensure that the state associated with the forking point is pruned away - if chain.HasState(shared[len(shared)-1].Root()) { - t.Fatalf("common-but-old ancestor still cache") - } - // Import the competitor chain without exceeding the canonical's TD and ensure - // we have not processed any of the blocks (protection against malicious blocks) - if _, err := chain.InsertChain(competitor[:len(competitor)-2]); err != nil { - t.Fatalf("failed to insert competitor chain: %v", err) - } - for i, block := range competitor[:len(competitor)-2] { - if chain.HasState(block.Root()) { - t.Fatalf("competitor %d: low TD chain became processed", i) - } - } - // Import the head of the competitor chain, triggering the reorg and ensure we - // successfully reprocess all the stashed away blocks. - if _, err := chain.InsertChain(competitor[len(competitor)-2:]); err != nil { - t.Fatalf("failed to finalize competitor chain: %v", err) - } - // In path-based trie database implementation, it will keep 128 diff + 1 disk - // layers, totally 129 latest states available. In hash-based it's 128. - states := TestTriesInMemory - if scheme == rawdb.PathScheme { - states = states + 1 - } - for i, block := range competitor[:len(competitor)-states] { - if chain.HasState(block.Root()) { - t.Fatalf("competitor %d: unexpected competing chain state", i) - } - } - for i, block := range competitor[len(competitor)-states:] { - if !chain.HasState(block.Root()) { - t.Fatalf("competitor %d: competing chain state missing", i) - } - } -} - -func TestBlockchainRecovery(t *testing.T) { - testBlockchainRecovery(t, rawdb.HashScheme) - testBlockchainRecovery(t, rawdb.PathScheme) -} - -func testBlockchainRecovery(t *testing.T, scheme string) { - // Configure and generate a sample block chain - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000) - gspec = &Genesis{Config: params.TestChainConfig, Alloc: types.GenesisAlloc{address: {Balance: funds}}} - ) - height := uint64(1024) - _, blocks, receipts := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(height), nil) - - // Import the chain as a ancient-first node and ensure all pointers are updated - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - if err != nil { - t.Fatalf("failed to create temp freezer db: %v", err) - } - defer ancientDb.Close() - ancient, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - - headers := make([]*types.Header, len(blocks)) - for i, block := range blocks { - headers[i] = block.Header() - } - if n, err := ancient.InsertHeaderChain(headers); err != nil { - t.Fatalf("failed to insert header %d: %v", n, err) - } - if n, err := ancient.InsertReceiptChain(blocks, receipts, uint64(3*len(blocks)/4)); err != nil { - t.Fatalf("failed to insert receipt %d: %v", n, err) - } - rawdb.WriteLastPivotNumber(ancientDb, blocks[len(blocks)-1].NumberU64()) // Force fast sync behavior - ancient.Stop() - - // Destroy head fast block manually - midBlock := blocks[len(blocks)/2] - rawdb.WriteHeadFastBlockHash(ancientDb, midBlock.Hash()) - - // Reopen broken blockchain again - ancient, _ = NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer ancient.Stop() - if num := ancient.CurrentBlock().Number.Uint64(); num != 0 { - t.Errorf("head block mismatch: have #%v, want #%v", num, 0) - } - if num := ancient.CurrentSnapBlock().Number.Uint64(); num != midBlock.NumberU64() { - t.Errorf("head snap-block mismatch: have #%v, want #%v", num, midBlock.NumberU64()) - } - if num := ancient.CurrentHeader().Number.Uint64(); num != midBlock.NumberU64() { - t.Errorf("head header mismatch: have #%v, want #%v", num, midBlock.NumberU64()) - } -} - -// This test checks that InsertReceiptChain will roll back correctly when attempting to insert a side chain. -func TestInsertReceiptChainRollback(t *testing.T) { - testInsertReceiptChainRollback(t, rawdb.HashScheme) - testInsertReceiptChainRollback(t, rawdb.PathScheme) -} - -func testInsertReceiptChainRollback(t *testing.T, scheme string) { - // Generate forked chain. The returned BlockChain object is used to process the side chain blocks. - tmpChain, sideblocks, canonblocks, gspec, err := getLongAndShortChains(scheme) - if err != nil { - t.Fatal(err) - } - defer tmpChain.Stop() - // Get the side chain receipts. - if _, err := tmpChain.InsertChain(sideblocks); err != nil { - t.Fatal("processing side chain failed:", err) - } - t.Log("sidechain head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash()) - sidechainReceipts := make([]types.Receipts, len(sideblocks)) - for i, block := range sideblocks { - sidechainReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash()) - } - // Get the canon chain receipts. - if _, err := tmpChain.InsertChain(canonblocks); err != nil { - t.Fatal("processing canon chain failed:", err) - } - t.Log("canon head:", tmpChain.CurrentBlock().Number, tmpChain.CurrentBlock().Hash()) - canonReceipts := make([]types.Receipts, len(canonblocks)) - for i, block := range canonblocks { - canonReceipts[i] = tmpChain.GetReceiptsByHash(block.Hash()) - } - - // Set up a BlockChain that uses the ancient store. - ancientDb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - if err != nil { - t.Fatalf("failed to create temp freezer db: %v", err) - } - defer ancientDb.Close() - - ancientChain, _ := NewBlockChain(ancientDb, DefaultCacheConfigWithScheme(scheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer ancientChain.Stop() - - // Import the canonical header chain. - canonHeaders := make([]*types.Header, len(canonblocks)) - for i, block := range canonblocks { - canonHeaders[i] = block.Header() - } - if _, err = ancientChain.InsertHeaderChain(canonHeaders); err != nil { - t.Fatal("can't import canon headers:", err) - } - - // Try to insert blocks/receipts of the side chain. - _, err = ancientChain.InsertReceiptChain(sideblocks, sidechainReceipts, uint64(len(sideblocks))) - if err == nil { - t.Fatal("expected error from InsertReceiptChain.") - } - if ancientChain.CurrentSnapBlock().Number.Uint64() != 0 { - t.Fatalf("failed to rollback ancient data, want %d, have %d", 0, ancientChain.CurrentSnapBlock().Number) - } - if frozen, err := ancientChain.db.Ancients(); err != nil || frozen != 1 { - t.Fatalf("failed to truncate ancient data, frozen index is %d", frozen) - } - - // Insert blocks/receipts of the canonical chain. - _, err = ancientChain.InsertReceiptChain(canonblocks, canonReceipts, uint64(len(canonblocks))) - if err != nil { - t.Fatalf("can't import canon chain receipts: %v", err) - } - if ancientChain.CurrentSnapBlock().Number.Uint64() != canonblocks[len(canonblocks)-1].NumberU64() { - t.Fatalf("failed to insert ancient recept chain after rollback") - } - if frozen, _ := ancientChain.db.Ancients(); frozen != uint64(len(canonblocks))+1 { - t.Fatalf("wrong ancients count %d", frozen) - } -} - -// Tests that importing a very large side fork, which is larger than the canon chain, -// but where the difficulty per block is kept low: this means that it will not -// overtake the 'canon' chain until after it's passed canon by about 200 blocks. -// -// Details at: -// - https://github.com/ethereum/go-ethereum/issues/18977 -// - https://github.com/ethereum/go-ethereum/pull/18988 -func TestLowDiffLongChain(t *testing.T) { - testLowDiffLongChain(t, rawdb.HashScheme) - testLowDiffLongChain(t, rawdb.PathScheme) -} - -func testLowDiffLongChain(t *testing.T, scheme string) { - // Generate a canonical chain to act as the main dataset - engine := ethash.NewFaker() - genesis := &Genesis{ - Config: params.TestChainConfig, - BaseFee: big.NewInt(params.InitialBaseFee), - } - // We must use a pretty long chain to ensure that the fork doesn't overtake us - // until after at least 128 blocks post tip - genDb, blocks, _ := GenerateChainWithGenesis(genesis, engine, 6*TriesInMemory, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - b.OffsetTime(-9) - }) - - // Import the canonical chain - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - defer diskdb.Close() - - chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - // Generate fork chain, starting from an early block - parent := blocks[10] - fork, _ := GenerateChain(genesis.Config, parent, engine, genDb, 8*TriesInMemory, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{2}) - }) - - // And now import the fork - if i, err := chain.InsertChain(fork); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", i, err) - } - head := chain.CurrentBlock() - if got := fork[len(fork)-1].Hash(); got != head.Hash() { - t.Fatalf("head wrong, expected %x got %x", head.Hash(), got) - } - // Sanity check that all the canonical numbers are present - header := chain.CurrentHeader() - for number := head.Number.Uint64(); number > 0; number-- { - if hash := chain.GetHeaderByNumber(number).Hash(); hash != header.Hash() { - t.Fatalf("header %d: canonical hash mismatch: have %x, want %x", number, hash, header.Hash()) - } - header = chain.GetHeader(header.ParentHash, number-1) - } -} - -// Tests that importing a sidechain (S), where -// - S is sidechain, containing blocks [Sn...Sm] -// - C is canon chain, containing blocks [G..Cn..Cm] -// - A common ancestor is placed at prune-point + blocksBetweenCommonAncestorAndPruneblock -// - The sidechain S is prepended with numCanonBlocksInSidechain blocks from the canon chain -// -// The mergePoint can be these values: -// -1: the transition won't happen -// 0: the transition happens since genesis -// 1: the transition happens after some chain segments -func testSideImport(t *testing.T, numCanonBlocksInSidechain, blocksBetweenCommonAncestorAndPruneblock int, mergePoint int) { - // Generate a canonical chain to act as the main dataset - chainConfig := *params.TestChainConfig - var ( - merger = consensus.NewMerger(rawdb.NewMemoryDatabase()) - engine = beacon.New(ethash.NewFaker()) - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr = crypto.PubkeyToAddress(key.PublicKey) - nonce = uint64(0) - - gspec = &Genesis{ - Config: &chainConfig, - Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - signer = types.LatestSigner(gspec.Config) - mergeBlock = math.MaxInt32 - ) - // Generate and import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - // Activate the transition since genesis if required - if mergePoint == 0 { - mergeBlock = 0 - merger.ReachTTD() - merger.FinalizePoS() - - // Set the terminal total difficulty in the config - gspec.Config.TerminalTotalDifficulty = big.NewInt(0) - } - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { - tx, err := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("deadbeef"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key) - if err != nil { - t.Fatalf("failed to create tx: %v", err) - } - gen.AddTx(tx) - if int(gen.header.Number.Uint64()) >= mergeBlock { - gen.SetPoS() - } - nonce++ - }) - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - lastPrunedIndex := len(blocks) - TestTriesInMemory - 1 - lastPrunedBlock := blocks[lastPrunedIndex-1] - firstNonPrunedBlock := blocks[len(blocks)-TestTriesInMemory] - - // Verify pruning of lastPrunedBlock - if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { - t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64()) - } - // Verify firstNonPrunedBlock is not pruned - if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) { - t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64()) - } - - // Activate the transition in the middle of the chain - if mergePoint == 1 { - merger.ReachTTD() - merger.FinalizePoS() - // Set the terminal total difficulty in the config - ttd := big.NewInt(int64(len(blocks))) - ttd.Mul(ttd, params.GenesisDifficulty) - gspec.Config.TerminalTotalDifficulty = ttd - mergeBlock = len(blocks) - } - - // Generate the sidechain - // First block should be a known block, block after should be a pruned block. So - // canon(pruned), side, side... - - // Generate fork chain, make it longer than canon - parentIndex := lastPrunedIndex + blocksBetweenCommonAncestorAndPruneblock - parent := blocks[parentIndex] - fork, _ := GenerateChain(gspec.Config, parent, engine, genDb, 2*TriesInMemory, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{2}) - if int(b.header.Number.Uint64()) >= mergeBlock { - b.SetPoS() - } - }) - // Prepend the parent(s) - var sidechain []*types.Block - for i := numCanonBlocksInSidechain; i > 0; i-- { - sidechain = append(sidechain, blocks[parentIndex+1-i]) - } - sidechain = append(sidechain, fork...) - n, err := chain.InsertChain(sidechain) - if err != nil { - t.Errorf("Got error, %v number %d - %d", err, sidechain[n].NumberU64(), n) - } - head := chain.CurrentBlock() - if got := fork[len(fork)-1].Hash(); got != head.Hash() { - t.Fatalf("head wrong, expected %x got %x", head.Hash(), got) - } -} - -// Tests that importing a sidechain (S), where -// - S is sidechain, containing blocks [Sn...Sm] -// - C is canon chain, containing blocks [G..Cn..Cm] -// - The common ancestor Cc is pruned -// - The first block in S: Sn, is == Cn -// -// That is: the sidechain for import contains some blocks already present in canon chain. -// So the blocks are: -// -// [ Cn, Cn+1, Cc, Sn+3 ... Sm] -// ^ ^ ^ pruned -func TestPrunedImportSide(t *testing.T) { - //glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false))) - //glogger.Verbosity(3) - //log.Root().SetHandler(log.Handler(glogger)) - testSideImport(t, 3, 3, -1) - testSideImport(t, 3, -3, -1) - testSideImport(t, 10, 0, -1) - testSideImport(t, 1, 10, -1) - testSideImport(t, 1, -10, -1) -} - -func TestPrunedImportSideWithMerging(t *testing.T) { - //glogger := log.NewGlogHandler(log.StreamHandler(os.Stdout, log.TerminalFormat(false))) - //glogger.Verbosity(3) - //log.Root().SetHandler(log.Handler(glogger)) - testSideImport(t, 3, 3, 0) - testSideImport(t, 3, -3, 0) - testSideImport(t, 10, 0, 0) - testSideImport(t, 1, 10, 0) - testSideImport(t, 1, -10, 0) - - testSideImport(t, 3, 3, 1) - testSideImport(t, 3, -3, 1) - testSideImport(t, 10, 0, 1) - testSideImport(t, 1, 10, 1) - testSideImport(t, 1, -10, 1) -} - -func TestInsertKnownHeaders(t *testing.T) { - testInsertKnownChainData(t, "headers", rawdb.HashScheme) - testInsertKnownChainData(t, "headers", rawdb.PathScheme) -} -func TestInsertKnownReceiptChain(t *testing.T) { - testInsertKnownChainData(t, "receipts", rawdb.HashScheme) - testInsertKnownChainData(t, "receipts", rawdb.PathScheme) -} -func TestInsertKnownBlocks(t *testing.T) { - testInsertKnownChainData(t, "blocks", rawdb.HashScheme) - testInsertKnownChainData(t, "blocks", rawdb.PathScheme) -} - -func testInsertKnownChainData(t *testing.T, typ string, scheme string) { - engine := ethash.NewFaker() - genesis := &Genesis{ - Config: params.TestChainConfig, - BaseFee: big.NewInt(params.InitialBaseFee), - } - genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - - // A longer chain but total difficulty is lower. - blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{1}) }) - - // A shorter chain but total difficulty is higher. - blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - b.OffsetTime(-9) // A higher difficulty - }) - // Import the shared chain and the original canonical one - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - if err != nil { - t.Fatalf("failed to create temp freezer db: %v", err) - } - defer chaindb.Close() - - chain, err := NewBlockChain(chaindb, DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - var ( - inserter func(blocks []*types.Block, receipts []types.Receipts) error - asserter func(t *testing.T, block *types.Block) - ) - if typ == "headers" { - inserter = func(blocks []*types.Block, receipts []types.Receipts) error { - headers := make([]*types.Header, 0, len(blocks)) - for _, block := range blocks { - headers = append(headers, block.Header()) - } - _, err := chain.InsertHeaderChain(headers) - return err - } - asserter = func(t *testing.T, block *types.Block) { - if chain.CurrentHeader().Hash() != block.Hash() { - t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex()) - } - } - } else if typ == "receipts" { - inserter = func(blocks []*types.Block, receipts []types.Receipts) error { - headers := make([]*types.Header, 0, len(blocks)) - for _, block := range blocks { - headers = append(headers, block.Header()) - } - _, err := chain.InsertHeaderChain(headers) - if err != nil { - return err - } - _, err = chain.InsertReceiptChain(blocks, receipts, 0) - return err - } - asserter = func(t *testing.T, block *types.Block) { - if chain.CurrentSnapBlock().Hash() != block.Hash() { - t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex()) - } - } - } else { - inserter = func(blocks []*types.Block, receipts []types.Receipts) error { - _, err := chain.InsertChain(blocks) - return err - } - asserter = func(t *testing.T, block *types.Block) { - if chain.CurrentBlock().Hash() != block.Hash() { - t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex()) - } - } - } - - if err := inserter(blocks, receipts); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - - // Reimport the chain data again. All the imported - // chain data are regarded "known" data. - if err := inserter(blocks, receipts); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks[len(blocks)-1]) - - // Import a long canonical chain with some known data as prefix. - rollback := blocks[len(blocks)/2].NumberU64() - - chain.SetHead(rollback - 1) - if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks2[len(blocks2)-1]) - - // Import a heavier shorter but higher total difficulty chain with some known data as prefix. - if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks3[len(blocks3)-1]) - - // Import a longer but lower total difficulty chain with some known data as prefix. - if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - // The head shouldn't change. - asserter(t, blocks3[len(blocks3)-1]) - - // Rollback the heavier chain and re-insert the longer chain again - chain.SetHead(rollback - 1) - if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks2[len(blocks2)-1]) -} - -func TestInsertKnownHeadersWithMerging(t *testing.T) { - testInsertKnownChainDataWithMerging(t, "headers", 0) -} -func TestInsertKnownReceiptChainWithMerging(t *testing.T) { - testInsertKnownChainDataWithMerging(t, "receipts", 0) -} -func TestInsertKnownBlocksWithMerging(t *testing.T) { - testInsertKnownChainDataWithMerging(t, "blocks", 0) -} -func TestInsertKnownHeadersAfterMerging(t *testing.T) { - testInsertKnownChainDataWithMerging(t, "headers", 1) -} -func TestInsertKnownReceiptChainAfterMerging(t *testing.T) { - testInsertKnownChainDataWithMerging(t, "receipts", 1) -} -func TestInsertKnownBlocksAfterMerging(t *testing.T) { - testInsertKnownChainDataWithMerging(t, "blocks", 1) -} - -// mergeHeight can be assigned in these values: -// 0: means the merging is applied since genesis -// 1: means the merging is applied after the first segment -func testInsertKnownChainDataWithMerging(t *testing.T, typ string, mergeHeight int) { - // Copy the TestChainConfig so we can modify it during tests - chainConfig := *params.TestChainConfig - var ( - genesis = &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: &chainConfig, - } - engine = beacon.New(ethash.NewFaker()) - mergeBlock = uint64(math.MaxUint64) - ) - // Apply merging since genesis - if mergeHeight == 0 { - genesis.Config.TerminalTotalDifficulty = big.NewInt(0) - mergeBlock = uint64(0) - } - - genDb, blocks, receipts := GenerateChainWithGenesis(genesis, engine, 32, - func(i int, b *BlockGen) { - if b.header.Number.Uint64() >= mergeBlock { - b.SetPoS() - } - b.SetCoinbase(common.Address{1}) - }) - - // Apply merging after the first segment - if mergeHeight == 1 { - // TTD is genesis diff + blocks - ttd := big.NewInt(1 + int64(len(blocks))) - ttd.Mul(ttd, params.GenesisDifficulty) - genesis.Config.TerminalTotalDifficulty = ttd - mergeBlock = uint64(len(blocks)) - } - // Longer chain and shorter chain - blocks2, receipts2 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 65, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - if b.header.Number.Uint64() >= mergeBlock { - b.SetPoS() - } - }) - blocks3, receipts3 := GenerateChain(genesis.Config, blocks[len(blocks)-1], engine, genDb, 64, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - b.OffsetTime(-9) // Time shifted, difficulty shouldn't be changed - if b.header.Number.Uint64() >= mergeBlock { - b.SetPoS() - } - }) - // Import the shared chain and the original canonical one - chaindb, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - if err != nil { - t.Fatalf("failed to create temp freezer db: %v", err) - } - defer chaindb.Close() - - chain, err := NewBlockChain(chaindb, nil, genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - var ( - inserter func(blocks []*types.Block, receipts []types.Receipts) error - asserter func(t *testing.T, block *types.Block) - ) - if typ == "headers" { - inserter = func(blocks []*types.Block, receipts []types.Receipts) error { - headers := make([]*types.Header, 0, len(blocks)) - for _, block := range blocks { - headers = append(headers, block.Header()) - } - i, err := chain.InsertHeaderChain(headers) - if err != nil { - return fmt.Errorf("index %d, number %d: %w", i, headers[i].Number, err) - } - return err - } - asserter = func(t *testing.T, block *types.Block) { - if chain.CurrentHeader().Hash() != block.Hash() { - t.Fatalf("current head header mismatch, have %v, want %v", chain.CurrentHeader().Hash().Hex(), block.Hash().Hex()) - } - } - } else if typ == "receipts" { - inserter = func(blocks []*types.Block, receipts []types.Receipts) error { - headers := make([]*types.Header, 0, len(blocks)) - for _, block := range blocks { - headers = append(headers, block.Header()) - } - i, err := chain.InsertHeaderChain(headers) - if err != nil { - return fmt.Errorf("index %d: %w", i, err) - } - _, err = chain.InsertReceiptChain(blocks, receipts, 0) - return err - } - asserter = func(t *testing.T, block *types.Block) { - if chain.CurrentSnapBlock().Hash() != block.Hash() { - t.Fatalf("current head fast block mismatch, have %v, want %v", chain.CurrentSnapBlock().Hash().Hex(), block.Hash().Hex()) - } - } - } else { - inserter = func(blocks []*types.Block, receipts []types.Receipts) error { - i, err := chain.InsertChain(blocks) - if err != nil { - return fmt.Errorf("index %d: %w", i, err) - } - return nil - } - asserter = func(t *testing.T, block *types.Block) { - if chain.CurrentBlock().Hash() != block.Hash() { - t.Fatalf("current head block mismatch, have %v, want %v", chain.CurrentBlock().Hash().Hex(), block.Hash().Hex()) - } - } - } - if err := inserter(blocks, receipts); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - - // Reimport the chain data again. All the imported - // chain data are regarded "known" data. - if err := inserter(blocks, receipts); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks[len(blocks)-1]) - - // Import a long canonical chain with some known data as prefix. - rollback := blocks[len(blocks)/2].NumberU64() - chain.SetHead(rollback - 1) - if err := inserter(blocks, receipts); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks[len(blocks)-1]) - - // Import a longer chain with some known data as prefix. - if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks2[len(blocks2)-1]) - - // Import a shorter chain with some known data as prefix. - // The reorg is expected since the fork choice rule is - // already changed. - if err := inserter(append(blocks, blocks3...), append(receipts, receipts3...)); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - // The head shouldn't change. - asserter(t, blocks3[len(blocks3)-1]) - - // Reimport the longer chain again, the reorg is still expected - chain.SetHead(rollback - 1) - if err := inserter(append(blocks, blocks2...), append(receipts, receipts2...)); err != nil { - t.Fatalf("failed to insert chain data: %v", err) - } - asserter(t, blocks2[len(blocks2)-1]) -} - -// getLongAndShortChains returns two chains: A is longer, B is heavier. -func getLongAndShortChains(scheme string) (*BlockChain, []*types.Block, []*types.Block, *Genesis, error) { - // Generate a canonical chain to act as the main dataset - engine := ethash.NewFaker() - genesis := &Genesis{ - Config: params.TestChainConfig, - BaseFee: big.NewInt(params.InitialBaseFee), - } - // Generate and import the canonical chain, - // Offset the time, to keep the difficulty low - genDb, longChain, _ := GenerateChainWithGenesis(genesis, engine, 80, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("failed to create tester chain: %v", err) - } - // Generate fork chain, make it shorter than canon, with common ancestor pretty early - parentIndex := 3 - parent := longChain[parentIndex] - heavyChainExt, _ := GenerateChain(genesis.Config, parent, engine, genDb, 75, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{2}) - b.OffsetTime(-9) - }) - var heavyChain []*types.Block - heavyChain = append(heavyChain, longChain[:parentIndex+1]...) - heavyChain = append(heavyChain, heavyChainExt...) - - // Verify that the test is sane - var ( - longerTd = new(big.Int) - shorterTd = new(big.Int) - ) - for index, b := range longChain { - longerTd.Add(longerTd, b.Difficulty()) - if index <= parentIndex { - shorterTd.Add(shorterTd, b.Difficulty()) - } - } - for _, b := range heavyChain { - shorterTd.Add(shorterTd, b.Difficulty()) - } - if shorterTd.Cmp(longerTd) <= 0 { - return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain td (%v) must be larger than canon td (%v)", shorterTd, longerTd) - } - longerNum := longChain[len(longChain)-1].NumberU64() - shorterNum := heavyChain[len(heavyChain)-1].NumberU64() - if shorterNum >= longerNum { - return nil, nil, nil, nil, fmt.Errorf("test is moot, heavyChain num (%v) must be lower than canon num (%v)", shorterNum, longerNum) - } - return chain, longChain, heavyChain, genesis, nil -} - -// TestReorgToShorterRemovesCanonMapping tests that if we -// 1. Have a chain [0 ... N .. X] -// 2. Reorg to shorter but heavier chain [0 ... N ... Y] -// 3. Then there should be no canon mapping for the block at height X -// 4. The forked block should still be retrievable by hash -func TestReorgToShorterRemovesCanonMapping(t *testing.T) { - testReorgToShorterRemovesCanonMapping(t, rawdb.HashScheme) - testReorgToShorterRemovesCanonMapping(t, rawdb.PathScheme) -} - -func testReorgToShorterRemovesCanonMapping(t *testing.T, scheme string) { - chain, canonblocks, sideblocks, _, err := getLongAndShortChains(scheme) - if err != nil { - t.Fatal(err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(canonblocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - canonNum := chain.CurrentBlock().Number.Uint64() - canonHash := chain.CurrentBlock().Hash() - _, err = chain.InsertChain(sideblocks) - if err != nil { - t.Errorf("Got error, %v", err) - } - head := chain.CurrentBlock() - if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() { - t.Fatalf("head wrong, expected %x got %x", head.Hash(), got) - } - // We have now inserted a sidechain. - if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil { - t.Errorf("expected block to be gone: %v", blockByNum.NumberU64()) - } - if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil { - t.Errorf("expected header to be gone: %v", headerByNum.Number) - } - if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil { - t.Errorf("expected block to be present: %x", blockByHash.Hash()) - } - if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil { - t.Errorf("expected header to be present: %x", headerByHash.Hash()) - } -} - -// TestReorgToShorterRemovesCanonMappingHeaderChain is the same scenario -// as TestReorgToShorterRemovesCanonMapping, but applied on headerchain -// imports -- that is, for fast sync -func TestReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T) { - testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.HashScheme) - testReorgToShorterRemovesCanonMappingHeaderChain(t, rawdb.PathScheme) -} - -func testReorgToShorterRemovesCanonMappingHeaderChain(t *testing.T, scheme string) { - chain, canonblocks, sideblocks, _, err := getLongAndShortChains(scheme) - if err != nil { - t.Fatal(err) - } - defer chain.Stop() - - // Convert into headers - canonHeaders := make([]*types.Header, len(canonblocks)) - for i, block := range canonblocks { - canonHeaders[i] = block.Header() - } - if n, err := chain.InsertHeaderChain(canonHeaders); err != nil { - t.Fatalf("header %d: failed to insert into chain: %v", n, err) - } - canonNum := chain.CurrentHeader().Number.Uint64() - canonHash := chain.CurrentBlock().Hash() - sideHeaders := make([]*types.Header, len(sideblocks)) - for i, block := range sideblocks { - sideHeaders[i] = block.Header() - } - if n, err := chain.InsertHeaderChain(sideHeaders); err != nil { - t.Fatalf("header %d: failed to insert into chain: %v", n, err) - } - head := chain.CurrentHeader() - if got := sideblocks[len(sideblocks)-1].Hash(); got != head.Hash() { - t.Fatalf("head wrong, expected %x got %x", head.Hash(), got) - } - // We have now inserted a sidechain. - if blockByNum := chain.GetBlockByNumber(canonNum); blockByNum != nil { - t.Errorf("expected block to be gone: %v", blockByNum.NumberU64()) - } - if headerByNum := chain.GetHeaderByNumber(canonNum); headerByNum != nil { - t.Errorf("expected header to be gone: %v", headerByNum.Number.Uint64()) - } - if blockByHash := chain.GetBlockByHash(canonHash); blockByHash == nil { - t.Errorf("expected block to be present: %x", blockByHash.Hash()) - } - if headerByHash := chain.GetHeaderByHash(canonHash); headerByHash == nil { - t.Errorf("expected header to be present: %x", headerByHash.Hash()) - } -} - -// Benchmarks large blocks with value transfers to non-existing accounts -func benchmarkLargeNumberOfValueToNonexisting(b *testing.B, numTxs, numBlocks int, recipientFn func(uint64) common.Address, dataFn func(uint64) []byte) { - var ( - signer = types.HomesteadSigner{} - testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) - bankFunds = big.NewInt(100000000000000000) - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - testBankAddress: {Balance: bankFunds}, - common.HexToAddress("0xc0de"): { - Code: []byte{0x60, 0x01, 0x50}, - Balance: big.NewInt(0), - }, // push 1, pop - }, - GasLimit: 100e6, // 100 M - } - ) - // Generate the original common chain segment and the two competing forks - engine := ethash.NewFaker() - - blockGenerator := func(i int, block *BlockGen) { - block.SetCoinbase(common.Address{1}) - for txi := 0; txi < numTxs; txi++ { - uniq := uint64(i*numTxs + txi) - recipient := recipientFn(uniq) - tx, err := types.SignTx(types.NewTransaction(uniq, recipient, big.NewInt(1), params.TxGas, block.header.BaseFee, nil), signer, testBankKey) - if err != nil { - b.Error(err) - } - block.AddTx(tx) - } - } - - _, shared, _ := GenerateChainWithGenesis(gspec, engine, numBlocks, blockGenerator) - b.StopTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - // Import the shared chain and the original canonical one - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - b.Fatalf("failed to create tester chain: %v", err) - } - b.StartTimer() - if _, err := chain.InsertChain(shared); err != nil { - b.Fatalf("failed to insert shared chain: %v", err) - } - b.StopTimer() - block := chain.GetBlockByHash(chain.CurrentBlock().Hash()) - if got := block.Transactions().Len(); got != numTxs*numBlocks { - b.Fatalf("Transactions were not included, expected %d, got %d", numTxs*numBlocks, got) - } - } -} - -func BenchmarkBlockChain_1x1000ValueTransferToNonexisting(b *testing.B) { - var ( - numTxs = 1000 - numBlocks = 1 - ) - recipientFn := func(nonce uint64) common.Address { - return common.BigToAddress(new(big.Int).SetUint64(1337 + nonce)) - } - dataFn := func(nonce uint64) []byte { - return nil - } - benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn) -} - -func BenchmarkBlockChain_1x1000ValueTransferToExisting(b *testing.B) { - var ( - numTxs = 1000 - numBlocks = 1 - ) - b.StopTimer() - b.ResetTimer() - - recipientFn := func(nonce uint64) common.Address { - return common.BigToAddress(new(big.Int).SetUint64(1337)) - } - dataFn := func(nonce uint64) []byte { - return nil - } - benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn) -} - -func BenchmarkBlockChain_1x1000Executions(b *testing.B) { - var ( - numTxs = 1000 - numBlocks = 1 - ) - b.StopTimer() - b.ResetTimer() - - recipientFn := func(nonce uint64) common.Address { - return common.BigToAddress(new(big.Int).SetUint64(0xc0de)) - } - dataFn := func(nonce uint64) []byte { - return nil - } - benchmarkLargeNumberOfValueToNonexisting(b, numTxs, numBlocks, recipientFn, dataFn) -} - -// Tests that importing a some old blocks, where all blocks are before the -// pruning point. -// This internally leads to a sidechain import, since the blocks trigger an -// ErrPrunedAncestor error. -// This may e.g. happen if -// 1. Downloader rollbacks a batch of inserted blocks and exits -// 2. Downloader starts to sync again -// 3. The blocks fetched are all known and canonical blocks -func TestSideImportPrunedBlocks(t *testing.T) { - testSideImportPrunedBlocks(t, rawdb.HashScheme) - testSideImportPrunedBlocks(t, rawdb.PathScheme) -} - -func testSideImportPrunedBlocks(t *testing.T, scheme string) { - // Generate a canonical chain to act as the main dataset - engine := ethash.NewFaker() - genesis := &Genesis{ - Config: params.TestChainConfig, - BaseFee: big.NewInt(params.InitialBaseFee), - } - // Generate and import the canonical chain - _, blocks, _ := GenerateChainWithGenesis(genesis, engine, 2*TriesInMemory, nil) - - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - // In path-based trie database implementation, it will keep 128 diff + 1 disk - // layers, totally 129 latest states available. In hash-based it's 128. - states := TestTriesInMemory - if scheme == rawdb.PathScheme { - states = TestTriesInMemory + 1 - } - lastPrunedIndex := len(blocks) - states - 1 - lastPrunedBlock := blocks[lastPrunedIndex] - - // Verify pruning of lastPrunedBlock - if chain.HasBlockAndState(lastPrunedBlock.Hash(), lastPrunedBlock.NumberU64()) { - t.Errorf("Block %d not pruned", lastPrunedBlock.NumberU64()) - } - firstNonPrunedBlock := blocks[len(blocks)-states] - // Verify firstNonPrunedBlock is not pruned - if !chain.HasBlockAndState(firstNonPrunedBlock.Hash(), firstNonPrunedBlock.NumberU64()) { - t.Errorf("Block %d pruned", firstNonPrunedBlock.NumberU64()) - } - // Now re-import some old blocks - blockToReimport := blocks[5:8] - _, err = chain.InsertChain(blockToReimport) - if err != nil { - t.Errorf("Got error, %v", err) - } -} - -// TestDeleteCreateRevert tests a weird state transition corner case that we hit -// while changing the internals of statedb. The workflow is that a contract is -// self destructed, then in a followup transaction (but same block) it's created -// again and the transaction reverted. -// -// The original statedb implementation flushed dirty objects to the tries after -// each transaction, so this works ok. The rework accumulated writes in memory -// first, but the journal wiped the entire state object on create-revert. -func TestDeleteCreateRevert(t *testing.T) { - testDeleteCreateRevert(t, rawdb.HashScheme) - testDeleteCreateRevert(t, rawdb.PathScheme) -} - -func testDeleteCreateRevert(t *testing.T, scheme string) { - var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") - bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") - engine = ethash.NewFaker() - - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(100000000000000000) - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - // The address 0xAAAAA selfdestructs if called - aa: { - // Code needs to just selfdestruct - Code: []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)}, - Nonce: 1, - Balance: big.NewInt(0), - }, - // The address 0xBBBB send 1 wei to 0xAAAA, then reverts - bb: { - Code: []byte{ - byte(vm.PC), // [0] - byte(vm.DUP1), // [0,0] - byte(vm.DUP1), // [0,0,0] - byte(vm.DUP1), // [0,0,0,0] - byte(vm.PUSH1), 0x01, // [0,0,0,0,1] (value) - byte(vm.PUSH2), 0xaa, 0xaa, // [0,0,0,0,1, 0xaaaa] - byte(vm.GAS), - byte(vm.CALL), - byte(vm.REVERT), - }, - Balance: big.NewInt(1), - }, - }, - } - ) - - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - // One transaction to AAAA - tx, _ := types.SignTx(types.NewTransaction(0, aa, - big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - b.AddTx(tx) - // One transaction to BBBB - tx, _ = types.SignTx(types.NewTransaction(1, bb, - big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - b.AddTx(tx) - }) - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } -} - -// TestDeleteRecreateSlots tests a state-transition that contains both deletion -// and recreation of contract state. -// Contract A exists, has slots 1 and 2 set -// Tx 1: Selfdestruct A -// Tx 2: Re-create A, set slots 3 and 4 -// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct, -// and then the new slots exist -func TestDeleteRecreateSlots(t *testing.T) { - testDeleteRecreateSlots(t, rawdb.HashScheme) - testDeleteRecreateSlots(t, rawdb.PathScheme) -} - -func testDeleteRecreateSlots(t *testing.T, scheme string) { - var ( - engine = ethash.NewFaker() - - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) - bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") - aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA - aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct) - ) - // Populate two slots - aaStorage[common.HexToHash("01")] = common.HexToHash("01") - aaStorage[common.HexToHash("02")] = common.HexToHash("02") - - // The bb-code needs to CREATE2 the aa contract. It consists of - // both initcode and deployment code - // initcode: - // 1. Set slots 3=3, 4=4, - // 2. Return aaCode - - initCode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x3, // location - byte(vm.SSTORE), // Set slot[3] = 3 - byte(vm.PUSH1), 0x4, // value - byte(vm.PUSH1), 0x4, // location - byte(vm.SSTORE), // Set slot[4] = 4 - // Slots are set, now return the code - byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack - byte(vm.PUSH1), 0x0, // memory start on stack - byte(vm.MSTORE), - // Code is now in memory. - byte(vm.PUSH1), 0x2, // size - byte(vm.PUSH1), byte(32 - 2), // offset - byte(vm.RETURN), - } - if l := len(initCode); l > 32 { - t.Fatalf("init code is too long for a pushx, need a more elaborate deployer") - } - bbCode := []byte{ - // Push initcode onto stack - byte(vm.PUSH1) + byte(len(initCode)-1)} - bbCode = append(bbCode, initCode...) - bbCode = append(bbCode, []byte{ - byte(vm.PUSH1), 0x0, // memory start on stack - byte(vm.MSTORE), - byte(vm.PUSH1), 0x00, // salt - byte(vm.PUSH1), byte(len(initCode)), // size - byte(vm.PUSH1), byte(32 - len(initCode)), // offset - byte(vm.PUSH1), 0x00, // endowment - byte(vm.CREATE2), - }...) - - initHash := crypto.Keccak256Hash(initCode) - aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:]) - t.Logf("Destination address: %x\n", aa) - - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - // The address 0xAAAAA selfdestructs if called - aa: { - // Code needs to just selfdestruct - Code: aaCode, - Nonce: 1, - Balance: big.NewInt(0), - Storage: aaStorage, - }, - // The contract BB recreates AA - bb: { - Code: bbCode, - Balance: big.NewInt(1), - }, - }, - } - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - // One transaction to AA, to kill it - tx, _ := types.SignTx(types.NewTransaction(0, aa, - big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - b.AddTx(tx) - // One transaction to BB, to recreate AA - tx, _ = types.SignTx(types.NewTransaction(1, bb, - big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - b.AddTx(tx) - }) - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ - Tracer: logger.NewJSONLogger(nil, os.Stdout), - }, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - statedb, _ := chain.State() - - // If all is correct, then slot 1 and 2 are zero - if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp { - t.Errorf("got %x exp %x", got, exp) - } - if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp { - t.Errorf("got %x exp %x", got, exp) - } - // Also, 3 and 4 should be set - if got, exp := statedb.GetState(aa, common.HexToHash("03")), common.HexToHash("03"); got != exp { - t.Fatalf("got %x exp %x", got, exp) - } - if got, exp := statedb.GetState(aa, common.HexToHash("04")), common.HexToHash("04"); got != exp { - t.Fatalf("got %x exp %x", got, exp) - } -} - -// TestDeleteRecreateAccount tests a state-transition that contains deletion of a -// contract with storage, and a recreate of the same contract via a -// regular value-transfer -// Expected outcome is that _all_ slots are cleared from A -func TestDeleteRecreateAccount(t *testing.T) { - testDeleteRecreateAccount(t, rawdb.HashScheme) - testDeleteRecreateAccount(t, rawdb.PathScheme) -} - -func testDeleteRecreateAccount(t *testing.T, scheme string) { - var ( - engine = ethash.NewFaker() - - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) - - aa = common.HexToAddress("0x7217d81b76bdd8707601e959454e3d776aee5f43") - aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA - aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct) - ) - // Populate two slots - aaStorage[common.HexToHash("01")] = common.HexToHash("01") - aaStorage[common.HexToHash("02")] = common.HexToHash("02") - - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - // The address 0xAAAAA selfdestructs if called - aa: { - // Code needs to just selfdestruct - Code: aaCode, - Nonce: 1, - Balance: big.NewInt(0), - Storage: aaStorage, - }, - }, - } - - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - // One transaction to AA, to kill it - tx, _ := types.SignTx(types.NewTransaction(0, aa, - big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - b.AddTx(tx) - // One transaction to AA, to recreate it (but without storage - tx, _ = types.SignTx(types.NewTransaction(1, aa, - big.NewInt(1), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - b.AddTx(tx) - }) - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ - Tracer: logger.NewJSONLogger(nil, os.Stdout), - }, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - statedb, _ := chain.State() - - // If all is correct, then both slots are zero - if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp { - t.Errorf("got %x exp %x", got, exp) - } - if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp { - t.Errorf("got %x exp %x", got, exp) - } -} - -// TestDeleteRecreateSlotsAcrossManyBlocks tests multiple state-transition that contains both deletion -// and recreation of contract state. -// Contract A exists, has slots 1 and 2 set -// Tx 1: Selfdestruct A -// Tx 2: Re-create A, set slots 3 and 4 -// Expected outcome is that _all_ slots are cleared from A, due to the selfdestruct, -// and then the new slots exist -func TestDeleteRecreateSlotsAcrossManyBlocks(t *testing.T) { - testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.HashScheme) - testDeleteRecreateSlotsAcrossManyBlocks(t, rawdb.PathScheme) -} - -func testDeleteRecreateSlotsAcrossManyBlocks(t *testing.T, scheme string) { - var ( - engine = ethash.NewFaker() - - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) - bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") - aaStorage = make(map[common.Hash]common.Hash) // Initial storage in AA - aaCode = []byte{byte(vm.PC), byte(vm.SELFDESTRUCT)} // Code for AA (simple selfdestruct) - ) - // Populate two slots - aaStorage[common.HexToHash("01")] = common.HexToHash("01") - aaStorage[common.HexToHash("02")] = common.HexToHash("02") - - // The bb-code needs to CREATE2 the aa contract. It consists of - // both initcode and deployment code - // initcode: - // 1. Set slots 3=blocknum+1, 4=4, - // 2. Return aaCode - - initCode := []byte{ - byte(vm.PUSH1), 0x1, // - byte(vm.NUMBER), // value = number + 1 - byte(vm.ADD), // - byte(vm.PUSH1), 0x3, // location - byte(vm.SSTORE), // Set slot[3] = number + 1 - byte(vm.PUSH1), 0x4, // value - byte(vm.PUSH1), 0x4, // location - byte(vm.SSTORE), // Set slot[4] = 4 - // Slots are set, now return the code - byte(vm.PUSH2), byte(vm.PC), byte(vm.SELFDESTRUCT), // Push code on stack - byte(vm.PUSH1), 0x0, // memory start on stack - byte(vm.MSTORE), - // Code is now in memory. - byte(vm.PUSH1), 0x2, // size - byte(vm.PUSH1), byte(32 - 2), // offset - byte(vm.RETURN), - } - if l := len(initCode); l > 32 { - t.Fatalf("init code is too long for a pushx, need a more elaborate deployer") - } - bbCode := []byte{ - // Push initcode onto stack - byte(vm.PUSH1) + byte(len(initCode)-1)} - bbCode = append(bbCode, initCode...) - bbCode = append(bbCode, []byte{ - byte(vm.PUSH1), 0x0, // memory start on stack - byte(vm.MSTORE), - byte(vm.PUSH1), 0x00, // salt - byte(vm.PUSH1), byte(len(initCode)), // size - byte(vm.PUSH1), byte(32 - len(initCode)), // offset - byte(vm.PUSH1), 0x00, // endowment - byte(vm.CREATE2), - }...) - - initHash := crypto.Keccak256Hash(initCode) - aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:]) - t.Logf("Destination address: %x\n", aa) - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - // The address 0xAAAAA selfdestructs if called - aa: { - // Code needs to just selfdestruct - Code: aaCode, - Nonce: 1, - Balance: big.NewInt(0), - Storage: aaStorage, - }, - // The contract BB recreates AA - bb: { - Code: bbCode, - Balance: big.NewInt(1), - }, - }, - } - var nonce uint64 - - type expectation struct { - exist bool - blocknum int - values map[int]int - } - var current = &expectation{ - exist: true, // exists in genesis - blocknum: 0, - values: map[int]int{1: 1, 2: 2}, - } - var expectations []*expectation - var newDestruct = func(e *expectation, b *BlockGen) *types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, aa, - big.NewInt(0), 50000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - nonce++ - if e.exist { - e.exist = false - e.values = nil - } - //t.Logf("block %d; adding destruct\n", e.blocknum) - return tx - } - var newResurrect = func(e *expectation, b *BlockGen) *types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, bb, - big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - nonce++ - if !e.exist { - e.exist = true - e.values = map[int]int{3: e.blocknum + 1, 4: 4} - } - //t.Logf("block %d; adding resurrect\n", e.blocknum) - return tx - } - - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 150, func(i int, b *BlockGen) { - var exp = new(expectation) - exp.blocknum = i + 1 - exp.values = make(map[int]int) - for k, v := range current.values { - exp.values[k] = v - } - exp.exist = current.exist - - b.SetCoinbase(common.Address{1}) - if i%2 == 0 { - b.AddTx(newDestruct(exp, b)) - } - if i%3 == 0 { - b.AddTx(newResurrect(exp, b)) - } - if i%5 == 0 { - b.AddTx(newDestruct(exp, b)) - } - if i%7 == 0 { - b.AddTx(newResurrect(exp, b)) - } - expectations = append(expectations, exp) - current = exp - }) - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ - //Debug: true, - //Tracer: vm.NewJSONLogger(nil, os.Stdout), - }, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - var asHash = func(num int) common.Hash { - return common.BytesToHash([]byte{byte(num)}) - } - for i, block := range blocks { - blockNum := i + 1 - if n, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - statedb, _ := chain.State() - // If all is correct, then slot 1 and 2 are zero - if got, exp := statedb.GetState(aa, common.HexToHash("01")), (common.Hash{}); got != exp { - t.Errorf("block %d, got %x exp %x", blockNum, got, exp) - } - if got, exp := statedb.GetState(aa, common.HexToHash("02")), (common.Hash{}); got != exp { - t.Errorf("block %d, got %x exp %x", blockNum, got, exp) - } - exp := expectations[i] - if exp.exist { - if !statedb.Exist(aa) { - t.Fatalf("block %d, expected %v to exist, it did not", blockNum, aa) - } - for slot, val := range exp.values { - if gotValue, expValue := statedb.GetState(aa, asHash(slot)), asHash(val); gotValue != expValue { - t.Fatalf("block %d, slot %d, got %x exp %x", blockNum, slot, gotValue, expValue) - } - } - } else { - if statedb.Exist(aa) { - t.Fatalf("block %d, expected %v to not exist, it did", blockNum, aa) - } - } - } -} - -// TestInitThenFailCreateContract tests a pretty notorious case that happened -// on mainnet over blocks 7338108, 7338110 and 7338115. -// - Block 7338108: address e771789f5cccac282f23bb7add5690e1f6ca467c is initiated -// with 0.001 ether (thus created but no code) -// - Block 7338110: a CREATE2 is attempted. The CREATE2 would deploy code on -// the same address e771789f5cccac282f23bb7add5690e1f6ca467c. However, the -// deployment fails due to OOG during initcode execution -// - Block 7338115: another tx checks the balance of -// e771789f5cccac282f23bb7add5690e1f6ca467c, and the snapshotter returned it as -// zero. -// -// The problem being that the snapshotter maintains a destructset, and adds items -// to the destructset in case something is created "onto" an existing item. -// We need to either roll back the snapDestructs, or not place it into snapDestructs -// in the first place. -// - -func TestInitThenFailCreateContract(t *testing.T) { - testInitThenFailCreateContract(t, rawdb.HashScheme) - testInitThenFailCreateContract(t, rawdb.PathScheme) -} - -func testInitThenFailCreateContract(t *testing.T, scheme string) { - var ( - engine = ethash.NewFaker() - - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) - bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") - ) - - // The bb-code needs to CREATE2 the aa contract. It consists of - // both initcode and deployment code - // initcode: - // 1. If blocknum < 1, error out (e.g invalid opcode) - // 2. else, return a snippet of code - initCode := []byte{ - byte(vm.PUSH1), 0x1, // y (2) - byte(vm.NUMBER), // x (number) - byte(vm.GT), // x > y? - byte(vm.PUSH1), byte(0x8), - byte(vm.JUMPI), // jump to label if number > 2 - byte(0xFE), // illegal opcode - byte(vm.JUMPDEST), - byte(vm.PUSH1), 0x2, // size - byte(vm.PUSH1), 0x0, // offset - byte(vm.RETURN), // return 2 bytes of zero-code - } - if l := len(initCode); l > 32 { - t.Fatalf("init code is too long for a pushx, need a more elaborate deployer") - } - bbCode := []byte{ - // Push initcode onto stack - byte(vm.PUSH1) + byte(len(initCode)-1)} - bbCode = append(bbCode, initCode...) - bbCode = append(bbCode, []byte{ - byte(vm.PUSH1), 0x0, // memory start on stack - byte(vm.MSTORE), - byte(vm.PUSH1), 0x00, // salt - byte(vm.PUSH1), byte(len(initCode)), // size - byte(vm.PUSH1), byte(32 - len(initCode)), // offset - byte(vm.PUSH1), 0x00, // endowment - byte(vm.CREATE2), - }...) - - initHash := crypto.Keccak256Hash(initCode) - aa := crypto.CreateAddress2(bb, [32]byte{}, initHash[:]) - t.Logf("Destination address: %x\n", aa) - - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - // The address aa has some funds - aa: {Balance: big.NewInt(100000)}, - // The contract BB tries to create code onto AA - bb: { - Code: bbCode, - Balance: big.NewInt(1), - }, - }, - } - nonce := uint64(0) - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 4, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - // One transaction to BB - tx, _ := types.SignTx(types.NewTransaction(nonce, bb, - big.NewInt(0), 100000, b.header.BaseFee, nil), types.HomesteadSigner{}, key) - b.AddTx(tx) - nonce++ - }) - - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{ - //Debug: true, - //Tracer: vm.NewJSONLogger(nil, os.Stdout), - }, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - statedb, _ := chain.State() - if got, exp := statedb.GetBalance(aa), uint256.NewInt(100000); got.Cmp(exp) != 0 { - t.Fatalf("Genesis err, got %v exp %v", got, exp) - } - // First block tries to create, but fails - { - block := blocks[0] - if _, err := chain.InsertChain([]*types.Block{blocks[0]}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) - } - statedb, _ = chain.State() - if got, exp := statedb.GetBalance(aa), uint256.NewInt(100000); got.Cmp(exp) != 0 { - t.Fatalf("block %d: got %v exp %v", block.NumberU64(), got, exp) - } - } - // Import the rest of the blocks - for _, block := range blocks[1:] { - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) - } - } -} - -// TestEIP2718Transition* tests that an EIP-2718 transaction will be accepted -// after the fork block has passed. This is verified by sending an EIP-2930 -// access list transaction, which specifies a single slot access, and then -// checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated -// correctly. - -// TestEIP2718TransitionWithTestChainConfig tests EIP-2718 with TestChainConfig. -func TestEIP2718TransitionWithTestChainConfig(t *testing.T) { - testEIP2718TransitionWithConfig(t, rawdb.HashScheme, params.TestChainConfig) - testEIP2718TransitionWithConfig(t, rawdb.HashScheme, params.TestChainConfig) -} - -func preShanghaiConfig() *params.ChainConfig { - config := *params.ParliaTestChainConfig - config.ShanghaiTime = nil - config.KeplerTime = nil - config.FeynmanTime = nil - config.FeynmanFixTime = nil - config.CancunTime = nil - return &config -} - -// TestEIP2718TransitionWithParliaConfig tests EIP-2718 with Parlia Config. -func TestEIP2718TransitionWithParliaConfig(t *testing.T) { - testEIP2718TransitionWithConfig(t, rawdb.HashScheme, preShanghaiConfig()) - testEIP2718TransitionWithConfig(t, rawdb.PathScheme, preShanghaiConfig()) -} - -// testEIP2718TransitionWithConfig tests EIP02718 with given ChainConfig. -func testEIP2718TransitionWithConfig(t *testing.T, scheme string, config *params.ChainConfig) { - var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") - engine = ethash.NewFaker() - - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) - gspec = &Genesis{ - Config: config, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - // The address 0xAAAA sloads 0x00 and 0x01 - aa: { - Code: []byte{ - byte(vm.PC), - byte(vm.PC), - byte(vm.SLOAD), - byte(vm.SLOAD), - }, - Nonce: 0, - Balance: big.NewInt(0), - }, - }, - } - ) - // Generate blocks - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - - // One transaction to 0xAAAA - signer := types.LatestSigner(gspec.Config) - tx, _ := types.SignNewTx(key, signer, &types.AccessListTx{ - ChainID: gspec.Config.ChainID, - Nonce: 0, - To: &aa, - Gas: 30000, - GasPrice: b.header.BaseFee, - AccessList: types.AccessList{{ - Address: aa, - StorageKeys: []common.Hash{{0}}, - }}, - }) - b.AddTx(tx) - }) - - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - block := chain.GetBlockByNumber(1) - - // Expected gas is intrinsic + 2 * pc + hot load + cold load, since only one load is in the access list - expected := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas + - vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929 - if block.GasUsed() != expected { - t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expected, block.GasUsed()) - } -} - -// TestEIP1559Transition tests the following: -// -// 1. A transaction whose gasFeeCap is greater than the baseFee is valid. -// 2. Gas accounting for access lists on EIP-1559 transactions is correct. -// 3. Only the transaction's tip will be received by the coinbase. -// 4. The transaction sender pays for both the tip and baseFee. -// 5. The coinbase receives only the partially realized tip when -// gasFeeCap - gasTipCap < baseFee. -// 6. Legacy transaction behave as expected (e.g. gasPrice = gasFeeCap = gasTipCap). -func TestEIP1559Transition(t *testing.T) { - testEIP1559Transition(t, rawdb.HashScheme) - testEIP1559Transition(t, rawdb.PathScheme) -} - -func testEIP1559Transition(t *testing.T, scheme string) { - var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") - engine = ethash.NewFaker() - - // A sender who makes transactions, has some funds - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) - config = *params.AllEthashProtocolChanges - gspec = &Genesis{ - Config: &config, - Alloc: types.GenesisAlloc{ - addr1: {Balance: funds}, - addr2: {Balance: funds}, - // The address 0xAAAA sloads 0x00 and 0x01 - aa: { - Code: []byte{ - byte(vm.PC), - byte(vm.PC), - byte(vm.SLOAD), - byte(vm.SLOAD), - }, - Nonce: 0, - Balance: big.NewInt(0), - }, - }, - } - ) - - gspec.Config.BerlinBlock = common.Big0 - gspec.Config.LondonBlock = common.Big0 - signer := types.LatestSigner(gspec.Config) - - genDb, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{1}) - - // One transaction to 0xAAAA - accesses := types.AccessList{types.AccessTuple{ - Address: aa, - StorageKeys: []common.Hash{{0}}, - }} - - txdata := &types.DynamicFeeTx{ - ChainID: gspec.Config.ChainID, - Nonce: 0, - To: &aa, - Gas: 30000, - GasFeeCap: newGwei(5), - GasTipCap: big.NewInt(2), - AccessList: accesses, - Data: []byte{}, - } - tx := types.NewTx(txdata) - tx, _ = types.SignTx(tx, signer, key1) - - b.AddTx(tx) - }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - block := chain.GetBlockByNumber(1) - - // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage. - expectedGas := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas + - vm.GasQuickStep*2 + params.WarmStorageReadCostEIP2929 + params.ColdSloadCostEIP2929 - if block.GasUsed() != expectedGas { - t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) - } - - state, _ := chain.State() - - // 3: Ensure that miner received only the tx's tip. - actual := state.GetBalance(block.Coinbase()).ToBig() - expected := new(big.Int).Add( - new(big.Int).SetUint64(block.GasUsed()*block.Transactions()[0].GasTipCap().Uint64()), - ethash.ConstantinopleBlockReward.ToBig(), - ) - if actual.Cmp(expected) != 0 { - t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) - } - - // 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee). - actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig()) - expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64())) - if actual.Cmp(expected) != 0 { - t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) - } - - blocks, _ = GenerateChain(gspec.Config, block, engine, genDb, 1, func(i int, b *BlockGen) { - b.SetCoinbase(common.Address{2}) - - txdata := &types.LegacyTx{ - Nonce: 0, - To: &aa, - Gas: 30000, - GasPrice: newGwei(5), - } - tx := types.NewTx(txdata) - tx, _ = types.SignTx(tx, signer, key2) - - b.AddTx(tx) - }) - - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - block = chain.GetBlockByNumber(2) - state, _ = chain.State() - effectiveTip := block.Transactions()[0].GasTipCap().Uint64() - block.BaseFee().Uint64() - - // 6+5: Ensure that miner received only the tx's effective tip. - actual = state.GetBalance(block.Coinbase()).ToBig() - expected = new(big.Int).Add( - new(big.Int).SetUint64(block.GasUsed()*effectiveTip), - ethash.ConstantinopleBlockReward.ToBig(), - ) - if actual.Cmp(expected) != 0 { - t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) - } - - // 4: Ensure the tx sender paid for the gasUsed * (effectiveTip + block baseFee). - actual = new(big.Int).Sub(funds, state.GetBalance(addr2).ToBig()) - expected = new(big.Int).SetUint64(block.GasUsed() * (effectiveTip + block.BaseFee().Uint64())) - if actual.Cmp(expected) != 0 { - t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) - } -} - -// Tests the scenario the chain is requested to another point with the missing state. -// It expects the state is recovered and all relevant chain markers are set correctly. -func TestSetCanonical(t *testing.T) { - testSetCanonical(t, rawdb.HashScheme) - testSetCanonical(t, rawdb.PathScheme) -} - -func testSetCanonical(t *testing.T, scheme string) { - //log.Root().SetHandler(log.LvlFilterHandler(log.LvlDebug, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(100000000000000000) - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{address: {Balance: funds}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - signer = types.LatestSigner(gspec.Config) - engine = ethash.NewFaker() - ) - // Generate and import the canonical chain - _, canon, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { - tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, gen.header.BaseFee, nil), signer, key) - if err != nil { - panic(err) - } - gen.AddTx(tx) - }) - diskdb, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false, false, false, false, false) - defer diskdb.Close() - - chain, err := NewBlockChain(diskdb, DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - - if n, err := chain.InsertChain(canon); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - // Generate the side chain and import them - _, side, _ := GenerateChainWithGenesis(gspec, engine, 2*TriesInMemory, func(i int, gen *BlockGen) { - tx, err := types.SignTx(types.NewTransaction(gen.TxNonce(address), common.Address{0x00}, big.NewInt(1), params.TxGas, gen.header.BaseFee, nil), signer, key) - if err != nil { - panic(err) - } - gen.AddTx(tx) - }) - for _, block := range side { - err := chain.InsertBlockWithoutSetHead(block) - if err != nil { - t.Fatalf("Failed to insert into chain: %v", err) - } - } - for _, block := range side { - got := chain.GetBlockByHash(block.Hash()) - if got == nil { - t.Fatalf("Lost the inserted block") - } - } - - // Set the chain head to the side chain, ensure all the relevant markers are updated. - verify := func(head *types.Block) { - if chain.CurrentBlock().Hash() != head.Hash() { - t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash()) - } - if chain.CurrentSnapBlock().Hash() != head.Hash() { - t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash()) - } - if chain.CurrentHeader().Hash() != head.Hash() { - t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash()) - } - if !chain.HasState(head.Root()) { - t.Fatalf("Lost block state %v %x", head.Number(), head.Hash()) - } - } - chain.SetCanonical(side[len(side)-1]) - verify(side[len(side)-1]) - - // Reset the chain head to original chain - chain.SetCanonical(canon[TriesInMemory-1]) - verify(canon[TriesInMemory-1]) -} - -// TestCanonicalHashMarker tests all the canonical hash markers are updated/deleted -// correctly in case reorg is called. -func TestCanonicalHashMarker(t *testing.T) { - testCanonicalHashMarker(t, rawdb.HashScheme) - testCanonicalHashMarker(t, rawdb.PathScheme) -} - -func testCanonicalHashMarker(t *testing.T, scheme string) { - var cases = []struct { - forkA int - forkB int - }{ - // ForkA: 10 blocks - // ForkB: 1 blocks - // - // reorged: - // markers [2, 10] should be deleted - // markers [1] should be updated - {10, 1}, - - // ForkA: 10 blocks - // ForkB: 2 blocks - // - // reorged: - // markers [3, 10] should be deleted - // markers [1, 2] should be updated - {10, 2}, - - // ForkA: 10 blocks - // ForkB: 10 blocks - // - // reorged: - // markers [1, 10] should be updated - {10, 10}, - - // ForkA: 10 blocks - // ForkB: 11 blocks - // - // reorged: - // markers [1, 11] should be updated - {10, 11}, - } - for _, c := range cases { - var ( - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - engine = ethash.NewFaker() - ) - _, forkA, _ := GenerateChainWithGenesis(gspec, engine, c.forkA, func(i int, gen *BlockGen) {}) - _, forkB, _ := GenerateChainWithGenesis(gspec, engine, c.forkB, func(i int, gen *BlockGen) {}) - - // Initialize test chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), DefaultCacheConfigWithScheme(scheme), gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - // Insert forkA and forkB, the canonical should on forkA still - if n, err := chain.InsertChain(forkA); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - if n, err := chain.InsertChain(forkB); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - verify := func(head *types.Block) { - if chain.CurrentBlock().Hash() != head.Hash() { - t.Fatalf("Unexpected block hash, want %x, got %x", head.Hash(), chain.CurrentBlock().Hash()) - } - if chain.CurrentSnapBlock().Hash() != head.Hash() { - t.Fatalf("Unexpected fast block hash, want %x, got %x", head.Hash(), chain.CurrentSnapBlock().Hash()) - } - if chain.CurrentHeader().Hash() != head.Hash() { - t.Fatalf("Unexpected head header, want %x, got %x", head.Hash(), chain.CurrentHeader().Hash()) - } - if !chain.HasState(head.Root()) { - t.Fatalf("Lost block state %v %x", head.Number(), head.Hash()) - } - } - - // Switch canonical chain to forkB if necessary - if len(forkA) < len(forkB) { - verify(forkB[len(forkB)-1]) - } else { - verify(forkA[len(forkA)-1]) - chain.SetCanonical(forkB[len(forkB)-1]) - verify(forkB[len(forkB)-1]) - } - - // Ensure all hash markers are updated correctly - for i := 0; i < len(forkB); i++ { - block := forkB[i] - hash := chain.GetCanonicalHash(block.NumberU64()) - if hash != block.Hash() { - t.Fatalf("Unexpected canonical hash %d", block.NumberU64()) - } - } - if c.forkA > c.forkB { - for i := uint64(c.forkB) + 1; i <= uint64(c.forkA); i++ { - hash := chain.GetCanonicalHash(i) - if hash != (common.Hash{}) { - t.Fatalf("Unexpected canonical hash %d", i) - } - } - } - chain.Stop() - } -} - -func TestCreateThenDeletePreByzantium(t *testing.T) { - // We use Ropsten chain config instead of Testchain config, this is - // deliberate: we want to use pre-byz rules where we have intermediate state roots - // between transactions. - testCreateThenDelete(t, ¶ms.ChainConfig{ - ChainID: big.NewInt(3), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(10), - EIP158Block: big.NewInt(10), - ByzantiumBlock: big.NewInt(1_700_000), - }) -} -func TestCreateThenDeletePostByzantium(t *testing.T) { - testCreateThenDelete(t, params.TestChainConfig) -} - -// testCreateThenDelete tests a creation and subsequent deletion of a contract, happening -// within the same block. -func testCreateThenDelete(t *testing.T, config *params.ChainConfig) { - var ( - engine = ethash.NewFaker() - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - destAddress = crypto.CreateAddress(address, 0) - funds = big.NewInt(1000000000000000) - ) - - // runtime code is 0x60ffff : PUSH1 0xFF SELFDESTRUCT, a.k.a SELFDESTRUCT(0xFF) - code := append([]byte{0x60, 0xff, 0xff}, make([]byte, 32-3)...) - initCode := []byte{ - // SSTORE 1:1 - byte(vm.PUSH1), 0x1, - byte(vm.PUSH1), 0x1, - byte(vm.SSTORE), - // Get the runtime-code on the stack - byte(vm.PUSH32)} - initCode = append(initCode, code...) - initCode = append(initCode, []byte{ - byte(vm.PUSH1), 0x0, // offset - byte(vm.MSTORE), - byte(vm.PUSH1), 0x3, // size - byte(vm.PUSH1), 0x0, // offset - byte(vm.RETURN), // return 3 bytes of zero-code - }...) - gspec := &Genesis{ - Config: config, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - }, - } - nonce := uint64(0) - signer := types.HomesteadSigner{} - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) { - fee := big.NewInt(1) - if b.header.BaseFee != nil { - fee = b.header.BaseFee - } - b.SetCoinbase(common.Address{1}) - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 100000, - Data: initCode, - }) - nonce++ - b.AddTx(tx) - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 100000, - To: &destAddress, - }) - b.AddTx(tx) - nonce++ - }) - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{ - //Debug: true, - //Tracer: logger.NewJSONLogger(nil, os.Stdout), - }, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - // Import the blocks - for _, block := range blocks { - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) - } - } -} - -func TestDeleteThenCreate(t *testing.T) { - var ( - engine = ethash.NewFaker() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - factoryAddr = crypto.CreateAddress(address, 0) - funds = big.NewInt(1000000000000000) - ) - /* - contract Factory { - function deploy(bytes memory code) public { - address addr; - assembly { - addr := create2(0, add(code, 0x20), mload(code), 0) - if iszero(extcodesize(addr)) { - revert(0, 0) - } - } - } - } - */ - factoryBIN := common.Hex2Bytes("608060405234801561001057600080fd5b50610241806100206000396000f3fe608060405234801561001057600080fd5b506004361061002a5760003560e01c80627743601461002f575b600080fd5b610049600480360381019061004491906100d8565b61004b565b005b6000808251602084016000f59050803b61006457600080fd5b5050565b600061007b61007684610146565b610121565b905082815260208101848484011115610097576100966101eb565b5b6100a2848285610177565b509392505050565b600082601f8301126100bf576100be6101e6565b5b81356100cf848260208601610068565b91505092915050565b6000602082840312156100ee576100ed6101f5565b5b600082013567ffffffffffffffff81111561010c5761010b6101f0565b5b610118848285016100aa565b91505092915050565b600061012b61013c565b90506101378282610186565b919050565b6000604051905090565b600067ffffffffffffffff821115610161576101606101b7565b5b61016a826101fa565b9050602081019050919050565b82818337600083830152505050565b61018f826101fa565b810181811067ffffffffffffffff821117156101ae576101ad6101b7565b5b80604052505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080fd5b600080fd5b600080fd5b600080fd5b6000601f19601f830116905091905056fea2646970667358221220ea8b35ed310d03b6b3deef166941140b4d9e90ea2c92f6b41eb441daf49a59c364736f6c63430008070033") - - /* - contract C { - uint256 value; - constructor() { - value = 100; - } - function destruct() public payable { - selfdestruct(payable(msg.sender)); - } - receive() payable external {} - } - */ - contractABI := common.Hex2Bytes("6080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c63430008070033") - contractAddr := crypto.CreateAddress2(factoryAddr, [32]byte{}, crypto.Keccak256(contractABI)) - - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - }, - } - nonce := uint64(0) - signer := types.HomesteadSigner{} - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 2, func(i int, b *BlockGen) { - fee := big.NewInt(1) - if b.header.BaseFee != nil { - fee = b.header.BaseFee - } - b.SetCoinbase(common.Address{1}) - - // Block 1 - if i == 0 { - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - Data: factoryBIN, - }) - nonce++ - b.AddTx(tx) - - data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000") - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &factoryAddr, - Data: data, - }) - b.AddTx(tx) - nonce++ - } else { - // Block 2 - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &contractAddr, - Data: common.Hex2Bytes("2b68b9c6"), // destruct - }) - nonce++ - b.AddTx(tx) - - data := common.Hex2Bytes("00774360000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000a76080604052348015600f57600080fd5b5060646000819055506081806100266000396000f3fe608060405260043610601f5760003560e01c80632b68b9c614602a576025565b36602557005b600080fd5b60306032565b005b3373ffffffffffffffffffffffffffffffffffffffff16fffea2646970667358221220ab749f5ed1fcb87bda03a74d476af3f074bba24d57cb5a355e8162062ad9a4e664736f6c6343000807003300000000000000000000000000000000000000000000000000") - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 500000, - To: &factoryAddr, // re-creation - Data: data, - }) - b.AddTx(tx) - nonce++ - } - }) - // Import the canonical chain - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - for _, block := range blocks { - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", block.NumberU64(), err) - } - } -} - -// TestTransientStorageReset ensures the transient storage is wiped correctly -// between transactions. -func TestTransientStorageReset(t *testing.T) { - var ( - engine = ethash.NewFaker() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - destAddress = crypto.CreateAddress(address, 0) - funds = big.NewInt(1000000000000000) - vmConfig = vm.Config{ - ExtraEips: []int{1153}, // Enable transient storage EIP - } - ) - code := append([]byte{ - // TLoad value with location 1 - byte(vm.PUSH1), 0x1, - byte(vm.TLOAD), - - // PUSH location - byte(vm.PUSH1), 0x1, - - // SStore location:value - byte(vm.SSTORE), - }, make([]byte, 32-6)...) - initCode := []byte{ - // TSTORE 1:1 - byte(vm.PUSH1), 0x1, - byte(vm.PUSH1), 0x1, - byte(vm.TSTORE), - - // Get the runtime-code on the stack - byte(vm.PUSH32)} - initCode = append(initCode, code...) - initCode = append(initCode, []byte{ - byte(vm.PUSH1), 0x0, // offset - byte(vm.MSTORE), - byte(vm.PUSH1), 0x6, // size - byte(vm.PUSH1), 0x0, // offset - byte(vm.RETURN), // return 6 bytes of zero-code - }...) - gspec := &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - }, - } - nonce := uint64(0) - signer := types.HomesteadSigner{} - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { - fee := big.NewInt(1) - if b.header.BaseFee != nil { - fee = b.header.BaseFee - } - b.SetCoinbase(common.Address{1}) - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 100000, - Data: initCode, - }) - nonce++ - b.AddTxWithVMConfig(tx, vmConfig) - - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: nonce, - GasPrice: new(big.Int).Set(fee), - Gas: 100000, - To: &destAddress, - }) - b.AddTxWithVMConfig(tx, vmConfig) - nonce++ - }) - - // Initialize the blockchain with 1153 enabled. - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vmConfig, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - // Import the blocks - if _, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("failed to insert into chain: %v", err) - } - // Check the storage - state, err := chain.StateAt(chain.CurrentHeader().Root) - if err != nil { - t.Fatalf("Failed to load state %v", err) - } - loc := common.BytesToHash([]byte{1}) - slot := state.GetState(destAddress, loc) - if slot != (common.Hash{}) { - t.Fatalf("Unexpected dirty storage slot") - } -} - -func TestEIP3651(t *testing.T) { - var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") - bb = common.HexToAddress("0x000000000000000000000000000000000000bbbb") - engine = beacon.NewFaker() - - // A sender who makes transactions, has some funds - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - funds = new(big.Int).Mul(common.Big1, big.NewInt(params.Ether)) - config = *params.AllEthashProtocolChanges - gspec = &Genesis{ - Config: &config, - Alloc: types.GenesisAlloc{ - addr1: {Balance: funds}, - addr2: {Balance: funds}, - // The address 0xAAAA sloads 0x00 and 0x01 - aa: { - Code: []byte{ - byte(vm.PC), - byte(vm.PC), - byte(vm.SLOAD), - byte(vm.SLOAD), - }, - Nonce: 0, - Balance: big.NewInt(0), - }, - // The address 0xBBBB calls 0xAAAA - bb: { - Code: []byte{ - byte(vm.PUSH1), 0, // out size - byte(vm.DUP1), // out offset - byte(vm.DUP1), // out insize - byte(vm.DUP1), // in offset - byte(vm.PUSH2), // address - byte(0xaa), - byte(0xaa), - byte(vm.GAS), // gas - byte(vm.DELEGATECALL), - }, - Nonce: 0, - Balance: big.NewInt(0), - }, - }, - } - ) - - gspec.Config.BerlinBlock = common.Big0 - gspec.Config.LondonBlock = common.Big0 - gspec.Config.TerminalTotalDifficulty = common.Big0 - gspec.Config.TerminalTotalDifficultyPassed = true - gspec.Config.ShanghaiTime = u64(0) - signer := types.LatestSigner(gspec.Config) - - _, blocks, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, b *BlockGen) { - b.SetCoinbase(aa) - // One transaction to Coinbase - txdata := &types.DynamicFeeTx{ - ChainID: gspec.Config.ChainID, - Nonce: 0, - To: &bb, - Gas: 500000, - GasFeeCap: newGwei(5), - GasTipCap: big.NewInt(2), - AccessList: nil, - Data: []byte{}, - } - tx := types.NewTx(txdata) - tx, _ = types.SignTx(tx, signer, key1) - - b.AddTx(tx) - }) - chain, err := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, engine, vm.Config{Tracer: logger.NewMarkdownLogger(&logger.Config{}, os.Stderr)}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - defer chain.Stop() - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - block := chain.GetBlockByNumber(1) - - // 1+2: Ensure EIP-1559 access lists are accounted for via gas usage. - innerGas := vm.GasQuickStep*2 + params.ColdSloadCostEIP2929*2 - expectedGas := params.TxGas + 5*vm.GasFastestStep + vm.GasQuickStep + 100 + innerGas // 100 because 0xaaaa is in access list - if block.GasUsed() != expectedGas { - t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGas, block.GasUsed()) - } - - state, _ := chain.State() - - // 3: Ensure that miner received only the tx's tip. - actual := state.GetBalance(block.Coinbase()).ToBig() - expected := new(big.Int).SetUint64(block.GasUsed() * block.Transactions()[0].GasTipCap().Uint64()) - if actual.Cmp(expected) != 0 { - t.Fatalf("miner balance incorrect: expected %d, got %d", expected, actual) - } - - // 4: Ensure the tx sender paid for the gasUsed * (tip + block baseFee). - actual = new(big.Int).Sub(funds, state.GetBalance(addr1).ToBig()) - expected = new(big.Int).SetUint64(block.GasUsed() * (block.Transactions()[0].GasTipCap().Uint64() + block.BaseFee().Uint64())) - if actual.Cmp(expected) != 0 { - t.Fatalf("sender balance incorrect: expected %d, got %d", expected, actual) - } -} - -type mockParlia struct { - consensus.Engine -} - -func (c *mockParlia) Author(header *types.Header) (common.Address, error) { - return header.Coinbase, nil -} - -func (c *mockParlia) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { - return nil -} - -func (c *mockParlia) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { - return nil -} - -func (c *mockParlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { - abort := make(chan<- struct{}) - results := make(chan error, len(headers)) - for i := 0; i < len(headers); i++ { - results <- nil - } - return abort, results -} - -func (c *mockParlia) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, _ *[]*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal, - _ *[]*types.Receipt, _ *[]*types.Transaction, _ *uint64) (err error) { - return -} - -func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, - uncles []*types.Header, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, []*types.Receipt, error) { - // Finalize block - c.Finalize(chain, header, state, &txs, uncles, nil, nil, nil, nil) - - // Assign the final state root to header. - header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - - // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), receipts, nil -} - -func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { - return big.NewInt(1) -} - -func TestParliaBlobFeeReward(t *testing.T) { - // Have N headers in the freezer - frdir := t.TempDir() - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false) - if err != nil { - t.Fatalf("failed to create database with ancient backend") - } - config := params.ParliaTestChainConfig - gspec := &Genesis{ - Config: config, - Alloc: types.GenesisAlloc{testAddr: {Balance: new(big.Int).SetUint64(10 * params.Ether)}}, - } - engine := &mockParlia{} - chain, _ := NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) - signer := types.LatestSigner(config) - - _, bs, _ := GenerateChainWithGenesis(gspec, engine, 1, func(i int, gen *BlockGen) { - tx, _ := makeMockTx(config, signer, testKey, gen.TxNonce(testAddr), gen.BaseFee().Uint64(), eip4844.CalcBlobFee(gen.ExcessBlobGas()).Uint64(), false) - gen.AddTxWithChain(chain, tx) - tx, sidecar := makeMockTx(config, signer, testKey, gen.TxNonce(testAddr), gen.BaseFee().Uint64(), eip4844.CalcBlobFee(gen.ExcessBlobGas()).Uint64(), true) - gen.AddTxWithChain(chain, tx) - gen.AddBlobSidecar(&types.BlobSidecar{ - BlobTxSidecar: *sidecar, - TxIndex: 1, - TxHash: tx.Hash(), - }) - }) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - - stateDB, err := chain.State() - if err != nil { - panic(err) - } - expect := new(big.Int) - for _, block := range bs { - receipts := chain.GetReceiptsByHash(block.Hash()) - for _, receipt := range receipts { - if receipt.BlobGasPrice != nil { - blob := receipt.BlobGasPrice.Mul(receipt.BlobGasPrice, new(big.Int).SetUint64(receipt.BlobGasUsed)) - expect.Add(expect, blob) - } - plain := receipt.EffectiveGasPrice.Mul(receipt.EffectiveGasPrice, new(big.Int).SetUint64(receipt.GasUsed)) - expect.Add(expect, plain) - } - } - actual := stateDB.GetBalance(params.SystemAddress) - require.Equal(t, expect.Uint64(), actual.Uint64()) -} - -func makeMockTx(config *params.ChainConfig, signer types.Signer, key *ecdsa.PrivateKey, nonce uint64, baseFee uint64, blobBaseFee uint64, isBlobTx bool) (*types.Transaction, *types.BlobTxSidecar) { - if !isBlobTx { - raw := &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: nonce, - GasTipCap: big.NewInt(10), - GasFeeCap: new(big.Int).SetUint64(baseFee + 10), - Gas: params.TxGas, - To: &common.Address{0x00}, - Value: big.NewInt(0), - } - tx, _ := types.SignTx(types.NewTx(raw), signer, key) - return tx, nil - } - sidecar := &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof}, - } - raw := &types.BlobTx{ - ChainID: uint256.MustFromBig(config.ChainID), - Nonce: nonce, - GasTipCap: uint256.NewInt(10), - GasFeeCap: uint256.NewInt(baseFee + 10), - Gas: params.TxGas, - To: common.Address{0x00}, - Value: uint256.NewInt(0), - BlobFeeCap: uint256.NewInt(blobBaseFee), - BlobHashes: sidecar.BlobHashes(), - } - tx, _ := types.SignTx(types.NewTx(raw), signer, key) - return tx, sidecar -} diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go deleted file mode 100644 index f099609015..0000000000 --- a/core/chain_indexer_test.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "context" - "errors" - "fmt" - "math/big" - "math/rand" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" -) - -// Runs multiple tests with randomized parameters. -func TestChainIndexerSingle(t *testing.T) { - for i := 0; i < 10; i++ { - testChainIndexer(t, 1) - } -} - -// Runs multiple tests with randomized parameters and different number of -// chain backends. -func TestChainIndexerWithChildren(t *testing.T) { - for i := 2; i < 8; i++ { - testChainIndexer(t, i) - } -} - -// testChainIndexer runs a test with either a single chain indexer or a chain of -// multiple backends. The section size and required confirmation count parameters -// are randomized. -func testChainIndexer(t *testing.T, count int) { - db := rawdb.NewMemoryDatabase() - defer db.Close() - - // Create a chain of indexers and ensure they all report empty - backends := make([]*testChainIndexBackend, count) - for i := 0; i < count; i++ { - var ( - sectionSize = uint64(rand.Intn(100) + 1) - confirmsReq = uint64(rand.Intn(10)) - ) - backends[i] = &testChainIndexBackend{t: t, processCh: make(chan uint64)} - backends[i].indexer = NewChainIndexer(db, rawdb.NewTable(db, string([]byte{byte(i)})), backends[i], sectionSize, confirmsReq, 0, fmt.Sprintf("indexer-%d", i)) - - if sections, _, _ := backends[i].indexer.Sections(); sections != 0 { - t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, 0) - } - if i > 0 { - backends[i-1].indexer.AddChildIndexer(backends[i].indexer) - } - } - defer backends[0].indexer.Close() // parent indexer shuts down children - // notify pings the root indexer about a new head or reorg, then expect - // processed blocks if a section is processable - notify := func(headNum, failNum uint64, reorg bool) { - backends[0].indexer.newHead(headNum, reorg) - if reorg { - for _, backend := range backends { - headNum = backend.reorg(headNum) - backend.assertSections() - } - return - } - var cascade bool - for _, backend := range backends { - headNum, cascade = backend.assertBlocks(headNum, failNum) - if !cascade { - break - } - backend.assertSections() - } - } - // inject inserts a new random canonical header into the database directly - inject := func(number uint64) { - header := &types.Header{Number: big.NewInt(int64(number)), Extra: big.NewInt(rand.Int63()).Bytes()} - if number > 0 { - header.ParentHash = rawdb.ReadCanonicalHash(db, number-1) - } - rawdb.WriteHeader(db, header) - rawdb.WriteCanonicalHash(db, header.Hash(), number) - } - // Start indexer with an already existing chain - for i := uint64(0); i <= 100; i++ { - inject(i) - } - notify(100, 100, false) - - // Add new blocks one by one - for i := uint64(101); i <= 1000; i++ { - inject(i) - notify(i, i, false) - } - // Do a reorg - notify(500, 500, true) - - // Create new fork - for i := uint64(501); i <= 1000; i++ { - inject(i) - notify(i, i, false) - } - for i := uint64(1001); i <= 1500; i++ { - inject(i) - } - // Failed processing scenario where less blocks are available than notified - notify(2000, 1500, false) - - // Notify about a reorg (which could have caused the missing blocks if happened during processing) - notify(1500, 1500, true) - - // Create new fork - for i := uint64(1501); i <= 2000; i++ { - inject(i) - notify(i, i, false) - } -} - -// testChainIndexBackend implements ChainIndexerBackend -type testChainIndexBackend struct { - t *testing.T - indexer *ChainIndexer - section, headerCnt, stored uint64 - processCh chan uint64 -} - -// assertSections verifies if a chain indexer has the correct number of section. -func (b *testChainIndexBackend) assertSections() { - // Keep trying for 3 seconds if it does not match - var sections uint64 - for i := 0; i < 300; i++ { - sections, _, _ = b.indexer.Sections() - if sections == b.stored { - return - } - time.Sleep(10 * time.Millisecond) - } - b.t.Fatalf("Canonical section count mismatch: have %v, want %v", sections, b.stored) -} - -// assertBlocks expects processing calls after new blocks have arrived. If the -// failNum < headNum then we are simulating a scenario where a reorg has happened -// after the processing has started and the processing of a section fails. -func (b *testChainIndexBackend) assertBlocks(headNum, failNum uint64) (uint64, bool) { - var sections uint64 - if headNum >= b.indexer.confirmsReq { - sections = (headNum + 1 - b.indexer.confirmsReq) / b.indexer.sectionSize - if sections > b.stored { - // expect processed blocks - for expectd := b.stored * b.indexer.sectionSize; expectd < sections*b.indexer.sectionSize; expectd++ { - if expectd > failNum { - // rolled back after processing started, no more process calls expected - // wait until updating is done to make sure that processing actually fails - var updating bool - for i := 0; i < 300; i++ { - b.indexer.lock.Lock() - updating = b.indexer.knownSections > b.indexer.storedSections - b.indexer.lock.Unlock() - if !updating { - break - } - time.Sleep(10 * time.Millisecond) - } - if updating { - b.t.Fatalf("update did not finish") - } - sections = expectd / b.indexer.sectionSize - break - } - select { - case <-time.After(10 * time.Second): - b.t.Fatalf("Expected processed block #%d, got nothing", expectd) - case processed := <-b.processCh: - if processed != expectd { - b.t.Errorf("Expected processed block #%d, got #%d", expectd, processed) - } - } - } - b.stored = sections - } - } - if b.stored == 0 { - return 0, false - } - return b.stored*b.indexer.sectionSize - 1, true -} - -func (b *testChainIndexBackend) reorg(headNum uint64) uint64 { - firstChanged := (headNum + 1) / b.indexer.sectionSize - if firstChanged < b.stored { - b.stored = firstChanged - } - return b.stored * b.indexer.sectionSize -} - -func (b *testChainIndexBackend) Reset(ctx context.Context, section uint64, prevHead common.Hash) error { - b.section = section - b.headerCnt = 0 - return nil -} - -func (b *testChainIndexBackend) Process(ctx context.Context, header *types.Header) error { - b.headerCnt++ - if b.headerCnt > b.indexer.sectionSize { - b.t.Error("Processing too many headers") - } - //t.processCh <- header.Number.Uint64() - select { - case <-time.After(10 * time.Second): - b.t.Error("Unexpected call to Process") - // Can't use Fatal since this is not the test's goroutine. - // Returning error stops the chainIndexer's updateLoop - return errors.New("Unexpected call to Process") - case b.processCh <- header.Number.Uint64(): - } - return nil -} - -func (b *testChainIndexBackend) Commit() error { - if b.headerCnt != b.indexer.sectionSize { - b.t.Error("Not enough headers processed") - } - return nil -} - -func (b *testChainIndexBackend) Prune(threshold uint64) error { - return nil -} diff --git a/core/chain_makers_test.go b/core/chain_makers_test.go deleted file mode 100644 index a2ec9e6507..0000000000 --- a/core/chain_makers_test.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "fmt" - "math/big" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/triedb" -) - -func TestGeneratePOSChain(t *testing.T) { - var ( - keyHex = "9c647b8b7c4e7c3490668fb6c11473619db80c93704c70893d3813af4090c39c" - key, _ = crypto.HexToECDSA(keyHex) - address = crypto.PubkeyToAddress(key.PublicKey) // 658bdf435d810c91414ec09147daa6db62406379 - aa = common.Address{0xaa} - bb = common.Address{0xbb} - funds = big.NewInt(0).Mul(big.NewInt(1337), big.NewInt(params.Ether)) - config = *params.AllEthashProtocolChanges - asm4788 = common.Hex2Bytes("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500") - gspec = &Genesis{ - Config: &config, - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - params.BeaconRootsAddress: {Balance: common.Big0, Code: asm4788}, - }, - BaseFee: big.NewInt(params.InitialBaseFee), - Difficulty: common.Big1, - GasLimit: 5_000_000, - } - gendb = rawdb.NewMemoryDatabase() - db = rawdb.NewMemoryDatabase() - ) - - config.TerminalTotalDifficultyPassed = true - config.TerminalTotalDifficulty = common.Big0 - config.ShanghaiTime = u64(0) - config.CancunTime = u64(0) - - // init 0xaa with some storage elements - storage := make(map[common.Hash]common.Hash) - storage[common.Hash{0x00}] = common.Hash{0x00} - storage[common.Hash{0x01}] = common.Hash{0x01} - storage[common.Hash{0x02}] = common.Hash{0x02} - storage[common.Hash{0x03}] = common.HexToHash("0303") - gspec.Alloc[aa] = types.Account{ - Balance: common.Big1, - Nonce: 1, - Storage: storage, - Code: common.Hex2Bytes("6042"), - } - gspec.Alloc[bb] = types.Account{ - Balance: common.Big2, - Nonce: 1, - Storage: storage, - Code: common.Hex2Bytes("600154600354"), - } - genesis := gspec.MustCommit(gendb, triedb.NewDatabase(gendb, triedb.HashDefaults)) - - genchain, genreceipts := GenerateChain(gspec.Config, genesis, beacon.NewFaker(), gendb, 4, func(i int, gen *BlockGen) { - gen.SetParentBeaconRoot(common.Hash{byte(i + 1)}) - - // Add value transfer tx. - tx := types.MustSignNewTx(key, gen.Signer(), &types.LegacyTx{ - Nonce: gen.TxNonce(address), - To: &address, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: new(big.Int).Add(gen.BaseFee(), common.Big1), - }) - gen.AddTx(tx) - - // Add withdrawals. - if i == 1 { - gen.AddWithdrawal(&types.Withdrawal{ - Validator: 42, - Address: common.Address{0xee}, - Amount: 1337, - }) - gen.AddWithdrawal(&types.Withdrawal{ - Validator: 13, - Address: common.Address{0xee}, - Amount: 1, - }) - } - if i == 3 { - gen.AddWithdrawal(&types.Withdrawal{ - Validator: 42, - Address: common.Address{0xee}, - Amount: 1337, - }) - gen.AddWithdrawal(&types.Withdrawal{ - Validator: 13, - Address: common.Address{0xee}, - Amount: 1, - }) - } - }) - - // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, nil, gspec, nil, beacon.NewFaker(), vm.Config{}, nil, nil) - defer blockchain.Stop() - - if i, err := blockchain.InsertChain(genchain); err != nil { - t.Fatalf("insert error (block %d): %v\n", genchain[i].NumberU64(), err) - } - - // enforce that withdrawal indexes are monotonically increasing from 0 - var ( - withdrawalIndex uint64 - ) - for i := range genchain { - blocknum := genchain[i].NumberU64() - block := blockchain.GetBlockByNumber(blocknum) - if block == nil { - t.Fatalf("block %d not found", blocknum) - } - - // Verify receipts. - genBlockReceipts := genreceipts[i] - for _, r := range genBlockReceipts { - if r.BlockNumber.Cmp(block.Number()) != 0 { - t.Errorf("receipt has wrong block number %d, want %d", r.BlockNumber, block.Number()) - } - if r.BlockHash != block.Hash() { - t.Errorf("receipt has wrong block hash %v, want %v", r.BlockHash, block.Hash()) - } - - // patch up empty logs list to make DeepEqual below work - if r.Logs == nil { - r.Logs = []*types.Log{} - } - } - blockchainReceipts := blockchain.GetReceiptsByHash(block.Hash()) - if !reflect.DeepEqual(genBlockReceipts, blockchainReceipts) { - t.Fatalf("receipts mismatch\ngenerated: %s\nblockchain: %s", spew.Sdump(genBlockReceipts), spew.Sdump(blockchainReceipts)) - } - - // Verify withdrawals. - if len(block.Withdrawals()) == 0 { - continue - } - for j := 0; j < len(block.Withdrawals()); j++ { - if block.Withdrawals()[j].Index != withdrawalIndex { - t.Fatalf("withdrawal index %d does not equal expected index %d", block.Withdrawals()[j].Index, withdrawalIndex) - } - withdrawalIndex += 1 - } - - // Verify parent beacon root. - want := common.Hash{byte(blocknum)} - if got := block.BeaconRoot(); *got != want { - t.Fatalf("block %d, wrong parent beacon root: got %s, want %s", i, got, want) - } - state, _ := blockchain.State() - idx := block.Time()%8191 + 8191 - got := state.GetState(params.BeaconRootsAddress, common.BigToHash(new(big.Int).SetUint64(idx))) - if got != want { - t.Fatalf("block %d, wrong parent beacon root in state: got %s, want %s", i, got, want) - } - } -} - -func ExampleGenerateChain() { - var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - key3, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - addr3 = crypto.PubkeyToAddress(key3.PublicKey) - db = rawdb.NewMemoryDatabase() - genDb = rawdb.NewMemoryDatabase() - ) - - // Ensure that key1 has some funds in the genesis block. - gspec := &Genesis{ - Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, - Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, - } - genesis := gspec.MustCommit(genDb, triedb.NewDatabase(genDb, triedb.HashDefaults)) - - // This call generates a chain of 5 blocks. The function runs for - // each block and adds different features to gen based on the - // block index. - signer := types.HomesteadSigner{} - chain, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), genDb, 5, func(i int, gen *BlockGen) { - switch i { - case 0: - // In block 1, addr1 sends addr2 some ether. - tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) - gen.AddTx(tx) - case 1: - // In block 2, addr1 sends some more ether to addr2. - // addr2 passes it on to addr3. - tx1, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(1000), params.TxGas, nil, nil), signer, key1) - tx2, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr2), addr3, big.NewInt(1000), params.TxGas, nil, nil), signer, key2) - gen.AddTx(tx1) - gen.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by addr3. - gen.SetCoinbase(addr3) - gen.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := gen.PrevBlock(1).Header() - b2.Extra = []byte("foo") - gen.AddUncle(b2) - b3 := gen.PrevBlock(2).Header() - b3.Extra = []byte("foo") - gen.AddUncle(b3) - } - }) - - // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(rawdb.HashScheme), gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer blockchain.Stop() - - if i, err := blockchain.InsertChain(chain); err != nil { - fmt.Printf("insert error (block %d): %v\n", chain[i].NumberU64(), err) - return - } - - state, _ := blockchain.State() - fmt.Printf("last block: #%d\n", blockchain.CurrentBlock().Number) - fmt.Println("balance of addr1:", state.GetBalance(addr1)) - fmt.Println("balance of addr2:", state.GetBalance(addr2)) - fmt.Println("balance of addr3:", state.GetBalance(addr3)) - // Output: - // last block: #5 - // balance of addr1: 989000 - // balance of addr2: 10000 - // balance of addr3: 19687500000000001000 -} diff --git a/core/dao_test.go b/core/dao_test.go deleted file mode 100644 index b9a899ef2f..0000000000 --- a/core/dao_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/params" -) - -// Tests that DAO-fork enabled clients can properly filter out fork-commencing -// blocks based on their extradata fields. -func TestDAOForkRangeExtradata(t *testing.T) { - forkBlock := big.NewInt(32) - chainConfig := *params.NonActivatedConfig - chainConfig.HomesteadBlock = big.NewInt(0) - - // Generate a common prefix for both pro-forkers and non-forkers - gspec := &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: &chainConfig, - } - genDb, prefix, _ := GenerateChainWithGenesis(gspec, ethash.NewFaker(), int(forkBlock.Int64()-1), func(i int, gen *BlockGen) {}) - - // Create the concurrent, conflicting two nodes - proDb := rawdb.NewMemoryDatabase() - proConf := *params.NonActivatedConfig - proConf.HomesteadBlock = big.NewInt(0) - proConf.DAOForkBlock = forkBlock - proConf.DAOForkSupport = true - progspec := &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: &proConf, - } - proBc, _ := NewBlockChain(proDb, nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer proBc.Stop() - - conDb := rawdb.NewMemoryDatabase() - conConf := *params.NonActivatedConfig - conConf.HomesteadBlock = big.NewInt(0) - conConf.DAOForkBlock = forkBlock - conConf.DAOForkSupport = false - congspec := &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: &conConf, - } - conBc, _ := NewBlockChain(conDb, nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer conBc.Stop() - - if _, err := proBc.InsertChain(prefix); err != nil { - t.Fatalf("pro-fork: failed to import chain prefix: %v", err) - } - if _, err := conBc.InsertChain(prefix); err != nil { - t.Fatalf("con-fork: failed to import chain prefix: %v", err) - } - // Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks - for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ { - // Create a pro-fork block, and try to feed into the no-fork chain - bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - - blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import contra-fork chain for expansion: %v", err) - } - if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { - t.Fatalf("failed to commit contra-fork head for expansion: %v", err) - } - bc.Stop() - blocks, _ = GenerateChain(&proConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err == nil { - t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0]) - } - // Create a proper no-fork block for the contra-forker - blocks, _ = GenerateChain(&conConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err != nil { - t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err) - } - // Create a no-fork block, and try to feed into the pro-fork chain - bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - - blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import pro-fork chain for expansion: %v", err) - } - if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { - t.Fatalf("failed to commit pro-fork head for expansion: %v", err) - } - bc.Stop() - blocks, _ = GenerateChain(&conConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err == nil { - t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0]) - } - // Create a proper pro-fork block for the pro-forker - blocks, _ = GenerateChain(&proConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err != nil { - t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err) - } - } - // Verify that contra-forkers accept pro-fork extra-datas after forking finishes - bc, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, congspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer bc.Stop() - - blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().Number.Uint64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import contra-fork chain for expansion: %v", err) - } - if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { - t.Fatalf("failed to commit contra-fork head for expansion: %v", err) - } - blocks, _ = GenerateChain(&proConf, conBc.GetBlockByHash(conBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err != nil { - t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err) - } - // Verify that pro-forkers accept contra-fork extra-datas after forking finishes - bc, _ = NewBlockChain(rawdb.NewMemoryDatabase(), nil, progspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer bc.Stop() - - blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().Number.Uint64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import pro-fork chain for expansion: %v", err) - } - if err := bc.triedb.Commit(bc.CurrentHeader().Root, false); err != nil { - t.Fatalf("failed to commit pro-fork head for expansion: %v", err) - } - blocks, _ = GenerateChain(&conConf, proBc.GetBlockByHash(proBc.CurrentBlock().Hash()), ethash.NewFaker(), genDb, 1, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err != nil { - t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err) - } -} diff --git a/core/data_availability_test.go b/core/data_availability_test.go deleted file mode 100644 index 2269aee232..0000000000 --- a/core/data_availability_test.go +++ /dev/null @@ -1,436 +0,0 @@ -package core - -import ( - "crypto/rand" - "math/big" - "testing" - - "github.com/consensys/gnark-crypto/ecc/bls12-381/fr" - gokzg4844 "github.com/crate-crypto/go-kzg-4844" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/params" - "github.com/holiman/uint256" - "github.com/stretchr/testify/require" -) - -var ( - emptyBlob = kzg4844.Blob{} - emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob) - emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit) -) - -func TestIsDataAvailable(t *testing.T) { - hr := NewMockDAHeaderReader(params.ParliaTestChainConfig) - tests := []struct { - block *types.Block - chasingHead uint64 - withSidecar bool - err bool - }{ - { - block: types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), nil), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }), - }, nil), - chasingHead: 1, - withSidecar: true, - err: false, - }, - { - block: types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), nil), - createMockDATx(hr.Config(), nil), - }, nil), - chasingHead: 1, - withSidecar: true, - err: false, - }, - { - block: types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), nil), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }), - }, nil), - chasingHead: 1, - withSidecar: false, - err: true, - }, - { - block: types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), nil), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof}, - }), - }, nil), - chasingHead: 1, - withSidecar: true, - err: false, - }, - - { - block: types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), nil), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof, emptyBlobProof, emptyBlobProof}, - }), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof, emptyBlobProof, emptyBlobProof}, - }), - }, nil), - chasingHead: params.MinBlocksForBlobRequests + 1, - withSidecar: true, - err: true, - }, - { - block: types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(0), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), nil), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }), - }, nil), - chasingHead: params.MinBlocksForBlobRequests + 1, - withSidecar: false, - err: false, - }, - } - - for i, item := range tests { - if item.withSidecar { - item.block = item.block.WithSidecars(collectBlobsFromTxs(item.block.Header(), item.block.Transactions())) - } - hr.setChasingHead(item.chasingHead) - err := IsDataAvailable(hr, item.block) - if item.err { - require.Error(t, err, i) - t.Log(err) - continue - } - require.NoError(t, err, i) - } -} - -func TestCheckDataAvailableInBatch(t *testing.T) { - hr := NewMockDAHeaderReader(params.ParliaTestChainConfig) - tests := []struct { - chain types.Blocks - err bool - index int - }{ - { - chain: types.Blocks{ - types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), nil), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof}, - }), - }, nil), - types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(2), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof}, - }), - }, nil), - }, - err: false, - }, - { - chain: types.Blocks{ - types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }), - }, nil), - types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(2), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }), - }, nil), - types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(3), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }), - }, nil), - }, - err: true, - index: 1, - }, - { - chain: types.Blocks{ - types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), nil), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof, emptyBlobProof, emptyBlobProof}, - }), - createMockDATx(hr.Config(), &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob, emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit, emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof, emptyBlobProof, emptyBlobProof}, - }), - }, nil), - }, - err: true, - index: 0, - }, - } - - for i, item := range tests { - for j, block := range item.chain { - item.chain[j] = block.WithSidecars(collectBlobsFromTxs(block.Header(), block.Transactions())) - } - index, err := CheckDataAvailableInBatch(hr, item.chain) - if item.err { - t.Log(index, err) - require.Error(t, err, i) - require.Equal(t, item.index, index, i) - continue - } - require.NoError(t, err, i) - } -} - -func BenchmarkEmptySidecarDAChecking(b *testing.B) { - hr := NewMockDAHeaderReader(params.ParliaTestChainConfig) - block := types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), emptySidecar()), - createMockDATx(hr.Config(), emptySidecar()), - createMockDATx(hr.Config(), emptySidecar()), - createMockDATx(hr.Config(), emptySidecar()), - createMockDATx(hr.Config(), emptySidecar()), - createMockDATx(hr.Config(), emptySidecar()), - }, nil) - block = block.WithSidecars(collectBlobsFromTxs(block.Header(), block.Transactions())) - b.ResetTimer() - for i := 0; i < b.N; i++ { - IsDataAvailable(hr, block) - } -} - -func BenchmarkRandomSidecarDAChecking(b *testing.B) { - hr := NewMockDAHeaderReader(params.ParliaTestChainConfig) - const count = 10 - blocks := make([]*types.Block, count) - for i := 0; i < len(blocks); i++ { - block := types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(1), - }).WithBody(types.Transactions{ - createMockDATx(hr.Config(), randomSidecar()), - createMockDATx(hr.Config(), randomSidecar()), - createMockDATx(hr.Config(), randomSidecar()), - createMockDATx(hr.Config(), randomSidecar()), - createMockDATx(hr.Config(), randomSidecar()), - createMockDATx(hr.Config(), randomSidecar()), - }, nil) - block = block.WithSidecars(collectBlobsFromTxs(block.Header(), block.Transactions())) - blocks[i] = block - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - IsDataAvailable(hr, blocks[i%count]) - } -} - -func collectBlobsFromTxs(header *types.Header, txs types.Transactions) types.BlobSidecars { - sidecars := make(types.BlobSidecars, 0, len(txs)) - for i, tx := range txs { - sidecar := types.NewBlobSidecarFromTx(tx) - if sidecar == nil { - continue - } - sidecar.TxIndex = uint64(i) - sidecar.TxHash = tx.Hash() - sidecar.BlockNumber = header.Number - sidecar.BlockHash = header.Hash() - sidecars = append(sidecars, sidecar) - } - return sidecars -} - -type mockDAHeaderReader struct { - config *params.ChainConfig - chasingHead uint64 -} - -func NewMockDAHeaderReader(config *params.ChainConfig) *mockDAHeaderReader { - return &mockDAHeaderReader{ - config: config, - chasingHead: 0, - } -} - -func (r *mockDAHeaderReader) setChasingHead(h uint64) { - r.chasingHead = h -} - -func (r *mockDAHeaderReader) Config() *params.ChainConfig { - return r.config -} - -func (r *mockDAHeaderReader) CurrentHeader() *types.Header { - return &types.Header{ - Number: new(big.Int).SetUint64(r.chasingHead), - } -} - -func (r *mockDAHeaderReader) ChasingHead() *types.Header { - return &types.Header{ - Number: new(big.Int).SetUint64(r.chasingHead), - } -} - -func (r *mockDAHeaderReader) GenesisHeader() *types.Header { - panic("not supported") -} - -func (r *mockDAHeaderReader) GetHeader(hash common.Hash, number uint64) *types.Header { - panic("not supported") -} - -func (r *mockDAHeaderReader) GetHeaderByNumber(number uint64) *types.Header { - panic("not supported") -} - -func (r *mockDAHeaderReader) GetHeaderByHash(hash common.Hash) *types.Header { - panic("not supported") -} - -func (r *mockDAHeaderReader) GetTd(hash common.Hash, number uint64) *big.Int { - panic("not supported") -} - -func (r *mockDAHeaderReader) GetHighestVerifiedHeader() *types.Header { - panic("not supported") -} - -func createMockDATx(config *params.ChainConfig, sidecar *types.BlobTxSidecar) *types.Transaction { - if sidecar == nil { - tx := &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: 0, - GasTipCap: big.NewInt(22), - GasFeeCap: big.NewInt(5), - Gas: 25000, - To: &common.Address{0x03, 0x04, 0x05}, - Value: big.NewInt(99), - Data: make([]byte, 50), - } - return types.NewTx(tx) - } - tx := &types.BlobTx{ - ChainID: uint256.MustFromBig(config.ChainID), - Nonce: 5, - GasTipCap: uint256.NewInt(22), - GasFeeCap: uint256.NewInt(5), - Gas: 25000, - To: common.Address{0x03, 0x04, 0x05}, - Value: uint256.NewInt(99), - Data: make([]byte, 50), - BlobFeeCap: uint256.NewInt(15), - BlobHashes: sidecar.BlobHashes(), - Sidecar: sidecar, - } - return types.NewTx(tx) -} - -func randFieldElement() [32]byte { - bytes := make([]byte, 32) - _, err := rand.Read(bytes) - if err != nil { - panic("failed to get random field element") - } - var r fr.Element - r.SetBytes(bytes) - - return gokzg4844.SerializeScalar(r) -} - -func randBlob() kzg4844.Blob { - var blob kzg4844.Blob - for i := 0; i < len(blob); i += gokzg4844.SerializedScalarSize { - fieldElementBytes := randFieldElement() - copy(blob[i:i+gokzg4844.SerializedScalarSize], fieldElementBytes[:]) - } - return blob -} - -func randomSidecar() *types.BlobTxSidecar { - blob := randBlob() - commitment, _ := kzg4844.BlobToCommitment(blob) - proof, _ := kzg4844.ComputeBlobProof(blob, commitment) - return &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{blob}, - Commitments: []kzg4844.Commitment{commitment}, - Proofs: []kzg4844.Proof{proof}, - } -} - -func emptySidecar() *types.BlobTxSidecar { - return &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - } -} diff --git a/core/eip3529tests/eip3529_ethash_test.go b/core/eip3529tests/eip3529_ethash_test.go deleted file mode 100644 index f58afad1f4..0000000000 --- a/core/eip3529tests/eip3529_ethash_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package eip3529tests - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/params" -) - -func postLondonConfig() *params.ChainConfig { - config := *params.TestChainConfig - config.LondonBlock = big.NewInt(0) - return &config -} - -func preLondonConfig() *params.ChainConfig { - config := *params.TestChainConfig - config.LondonBlock = nil - return &config -} - -func TestSelfDestructGasPreLondon(t *testing.T) { - bytecode := []byte{ - byte(vm.PC), - byte(vm.SELFDESTRUCT), - } - - // Expected gas is (intrinsic + pc + cold load (due to legacy tx) + selfdestruct cost ) / 2 - // The refund of 24000 gas (i.e. params.SelfdestructRefundGas) is not applied since refunds pre-EIP3529 are - // capped to half of the transaction's gas. - expectedGasUsed := (params.TxGas + vm.GasQuickStep + params.ColdAccountAccessCostEIP2929 + params.SelfdestructGasEIP150) / 2 - TestGasUsage(t, preLondonConfig(), ethash.NewFaker(), bytecode, nil, 60_000, expectedGasUsed) -} - -func TestSstoreModifyGasPreLondon(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x1, // location - byte(vm.SSTORE), // Set slot[1] = 3 - } - // initialize contract storage - initialStorage := make(map[common.Hash]common.Hash) - // Populate two slots - initialStorage[common.HexToHash("01")] = common.HexToHash("01") - initialStorage[common.HexToHash("02")] = common.HexToHash("02") - // Expected gas is intrinsic + 2*pushGas + cold load (due to legacy tx) + SstoreReset (a->b such that a!=0) - // i.e. no refund - expectedGasUsed := params.TxGas + 2*vm.GasFastestStep + params.ColdSloadCostEIP2929 + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929) - TestGasUsage(t, preLondonConfig(), ethash.NewFaker(), bytecode, initialStorage, 60_000, expectedGasUsed) -} - -func TestSstoreClearGasPreLondon(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x0, // value - byte(vm.PUSH1), 0x1, // location - byte(vm.SSTORE), // Set slot[1] = 0 - } - // initialize contract storage - initialStorage := make(map[common.Hash]common.Hash) - // Populate two slots - initialStorage[common.HexToHash("01")] = common.HexToHash("01") - initialStorage[common.HexToHash("02")] = common.HexToHash("02") - - // Expected gas is (intrinsic + 2*pushGas + cold load (due to legacy tx) + SstoreReset (a->b such that a!=0) ) / 2 - // The refund of params.SstoreClearsScheduleRefundEIP2200 is not applied because of the refund cap to half the gas cost. - expectedGasUsage := (params.TxGas + 2*vm.GasFastestStep + params.ColdSloadCostEIP2929 + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929)) / 2 - TestGasUsage(t, preLondonConfig(), ethash.NewFaker(), bytecode, initialStorage, 60_000, expectedGasUsage) -} - -func TestSstoreGasPreLondon(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x3, // location - byte(vm.SSTORE), // Set slot[3] = 3 - } - // Expected gas is intrinsic + 2*pushGas + cold load (due to legacy tx) + SstoreGas - // i.e. No refund - expectedGasUsed := params.TxGas + 2*vm.GasFastestStep + params.ColdSloadCostEIP2929 + params.SstoreSetGasEIP2200 - TestGasUsage(t, preLondonConfig(), ethash.NewFaker(), bytecode, nil, 60_000, expectedGasUsed) -} - -func TestSelfDestructGasPostLondon(t *testing.T) { - bytecode := []byte{ - byte(vm.PC), - byte(vm.SELFDESTRUCT), - } - // Expected gas is intrinsic + pc + cold load (due to legacy tx) + SelfDestructGas - // i.e. No refund - expectedGasUsed := params.TxGas + vm.GasQuickStep + params.ColdAccountAccessCostEIP2929 + params.SelfdestructGasEIP150 - TestGasUsage(t, postLondonConfig(), ethash.NewFaker(), bytecode, nil, 60_000, expectedGasUsed) -} - -func TestSstoreGasPostLondon(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x3, // location - byte(vm.SSTORE), // Set slot[3] = 3 - } - // Expected gas is intrinsic + 2*pushGas + cold load (due to legacy tx) + SstoreGas - // i.e. No refund - expectedGasUsed := params.TxGas + 2*vm.GasFastestStep + params.ColdSloadCostEIP2929 + params.SstoreSetGasEIP2200 - TestGasUsage(t, postLondonConfig(), ethash.NewFaker(), bytecode, nil, 60_000, expectedGasUsed) -} - -func TestSstoreModifyGasPostLondon(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x1, // location - byte(vm.SSTORE), // Set slot[1] = 3 - } - // initialize contract storage - initialStorage := make(map[common.Hash]common.Hash) - // Populate two slots - initialStorage[common.HexToHash("01")] = common.HexToHash("01") - initialStorage[common.HexToHash("02")] = common.HexToHash("02") - // Expected gas is intrinsic + 2*pushGas + cold load (due to legacy tx) + SstoreReset (a->b such that a!=0) - // i.e. No refund - expectedGasUsed := params.TxGas + 2*vm.GasFastestStep + params.ColdSloadCostEIP2929 + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929) - TestGasUsage(t, postLondonConfig(), ethash.NewFaker(), bytecode, initialStorage, 60_000, expectedGasUsed) -} - -func TestSstoreClearGasPostLondon(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x0, // value - byte(vm.PUSH1), 0x1, // location - byte(vm.SSTORE), // Set slot[1] = 0 - } - // initialize contract storage - initialStorage := make(map[common.Hash]common.Hash) - // Populate two slots - initialStorage[common.HexToHash("01")] = common.HexToHash("01") - initialStorage[common.HexToHash("02")] = common.HexToHash("02") - - // Expected gas is intrinsic + 2*pushGas + cold load (due to legacy tx) + SstoreReset (a->b such that a!=0) - sstoreClearGasRefund - expectedGasUsage := params.TxGas + 2*vm.GasFastestStep + params.ColdSloadCostEIP2929 + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929) - params.SstoreClearsScheduleRefundEIP3529 - TestGasUsage(t, postLondonConfig(), ethash.NewFaker(), bytecode, initialStorage, 60_000, expectedGasUsage) -} diff --git a/core/eip3529tests/eip3529_parlia_test.go b/core/eip3529tests/eip3529_parlia_test.go deleted file mode 100644 index a5b86e162d..0000000000 --- a/core/eip3529tests/eip3529_parlia_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package eip3529tests - -import ( - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/params" -) - -func postHertzPreShanghaiConfig() *params.ChainConfig { - config := *params.ParliaTestChainConfig - config.ShanghaiTime = nil - config.KeplerTime = nil - config.FeynmanTime = nil - config.FeynmanFixTime = nil - config.CancunTime = nil - return &config -} - -func preHertzConfig() *params.ChainConfig { - config := *params.ParliaTestChainConfig - config.LondonBlock = nil - config.BerlinBlock = nil - config.HertzBlock = nil - config.HertzfixBlock = nil - config.ShanghaiTime = nil - config.KeplerTime = nil - config.FeynmanTime = nil - config.FeynmanFixTime = nil - config.CancunTime = nil - return &config -} - -func TestSelfDestructGasPreHertz(t *testing.T) { - bytecode := []byte{ - byte(vm.PC), - byte(vm.SELFDESTRUCT), - } - - // Expected gas is (intrinsic + selfdestruct cost ) / 2 - // The refund of 24000 gas (i.e. params.SelfdestructRefundGas) is not applied since refunds pre-EIP3529 are - // capped to half of the transaction's gas. - expectedGasUsed := (params.TxGas + vm.GasQuickStep + params.SelfdestructGasEIP150) / 2 - TestGasUsage(t, preHertzConfig(), ethash.NewFaker(), bytecode, nil, 60_000, expectedGasUsed) -} - -func TestSstoreClearGasPreHertz(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x0, // value - byte(vm.PUSH1), 0x1, // location - byte(vm.SSTORE), // Set slot[1] = 0 - } - // initialize contract storage - initialStorage := make(map[common.Hash]common.Hash) - // Populate two slots - initialStorage[common.HexToHash("01")] = common.HexToHash("01") - initialStorage[common.HexToHash("02")] = common.HexToHash("02") - - // Expected gas is (intrinsic + 2*pushGas + SstoreReset (a->b such that a!=0) ) / 2 - // The refund of params.SstoreClearsScheduleRefundEIP2200 is not applied because of the refund cap to half the gas cost. - expectedGasUsage := (params.TxGas + 2*vm.GasFastestStep + params.SstoreResetGasEIP2200) / 2 - TestGasUsage(t, preHertzConfig(), ethash.NewFaker(), bytecode, initialStorage, 60_000, expectedGasUsage) -} - -func TestSstoreModifyGasPreHertz(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x1, // location - byte(vm.SSTORE), // Set slot[1] = 3 - } - // initialize contract storage - initialStorage := make(map[common.Hash]common.Hash) - // Populate two slots - initialStorage[common.HexToHash("01")] = common.HexToHash("01") - initialStorage[common.HexToHash("02")] = common.HexToHash("02") - // Expected gas is intrinsic + 2*pushGas + SstoreReset (a->b such that a!=0) - // i.e. no refund - expectedGasUsed := params.TxGas + 2*vm.GasFastestStep + params.SstoreResetGasEIP2200 - TestGasUsage(t, preHertzConfig(), ethash.NewFaker(), bytecode, initialStorage, 60_000, expectedGasUsed) -} - -func TestSstoreGasPreHertz(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x3, // location - byte(vm.SSTORE), // Set slot[3] = 3 - } - // Expected gas is intrinsic + 2*pushGas + SstoreGas - // i.e. No refund - expectedGasUsed := params.TxGas + 2*vm.GasFastestStep + params.SstoreSetGasEIP2200 - TestGasUsage(t, preHertzConfig(), ethash.NewFaker(), bytecode, nil, 60_000, expectedGasUsed) -} - -func TestSelfDestructGasPostHertz(t *testing.T) { - bytecode := []byte{ - byte(vm.PC), - byte(vm.SELFDESTRUCT), - } - // Expected gas is intrinsic + pc + cold load (due to legacy tx) + SelfDestructGas - // i.e. No refund - expectedGasUsed := params.TxGas + vm.GasQuickStep + params.ColdAccountAccessCostEIP2929 + params.SelfdestructGasEIP150 - TestGasUsage(t, postHertzPreShanghaiConfig(), ethash.NewFaker(), bytecode, nil, 60_000, expectedGasUsed) -} - -func TestSstoreGasPostHertz(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x3, // location - byte(vm.SSTORE), // Set slot[3] = 3 - } - // Expected gas is intrinsic + 2*pushGas + cold load (due to legacy tx) + SstoreGas - // i.e. No refund - expectedGasUsed := params.TxGas + 2*vm.GasFastestStep + params.ColdSloadCostEIP2929 + params.SstoreSetGasEIP2200 - TestGasUsage(t, postHertzPreShanghaiConfig(), ethash.NewFaker(), bytecode, nil, 60_000, expectedGasUsed) -} - -func TestSstoreModifyGasPostHertz(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x3, // value - byte(vm.PUSH1), 0x1, // location - byte(vm.SSTORE), // Set slot[1] = 3 - } - // initialize contract storage - initialStorage := make(map[common.Hash]common.Hash) - // Populate two slots - initialStorage[common.HexToHash("01")] = common.HexToHash("01") - initialStorage[common.HexToHash("02")] = common.HexToHash("02") - // Expected gas is intrinsic + 2*pushGas + cold load (due to legacy tx) + SstoreReset (a->b such that a!=0) - // i.e. No refund - expectedGasUsed := params.TxGas + 2*vm.GasFastestStep + params.SstoreResetGasEIP2200 - TestGasUsage(t, postHertzPreShanghaiConfig(), ethash.NewFaker(), bytecode, initialStorage, 60_000, expectedGasUsed) -} - -func TestSstoreClearGasPostHertz(t *testing.T) { - bytecode := []byte{ - byte(vm.PUSH1), 0x0, // value - byte(vm.PUSH1), 0x1, // location - byte(vm.SSTORE), // Set slot[1] = 0 - } - // initialize contract storage - initialStorage := make(map[common.Hash]common.Hash) - // Populate two slots - initialStorage[common.HexToHash("01")] = common.HexToHash("01") - initialStorage[common.HexToHash("02")] = common.HexToHash("02") - - // Expected gas is intrinsic + 2*pushGas + SstoreReset (a->b such that a!=0) - sstoreClearGasRefund - expectedGasUsage := params.TxGas + 2*vm.GasFastestStep + params.SstoreResetGasEIP2200 - params.SstoreClearsScheduleRefundEIP3529 - TestGasUsage(t, postHertzPreShanghaiConfig(), ethash.NewFaker(), bytecode, initialStorage, 60_000, expectedGasUsage) -} diff --git a/core/eip3529tests/eip3529_test_util.go b/core/eip3529tests/eip3529_test_util.go deleted file mode 100644 index d2448bb332..0000000000 --- a/core/eip3529tests/eip3529_test_util.go +++ /dev/null @@ -1,80 +0,0 @@ -package eip3529tests - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/triedb" -) - -func newGwei(n int64) *big.Int { - return new(big.Int).Mul(big.NewInt(n), big.NewInt(params.GWei)) -} - -// Test the gas used by running a transaction sent to a smart contract with given bytecode and storage. -func TestGasUsage(t *testing.T, config *params.ChainConfig, engine consensus.Engine, bytecode []byte, initialStorage map[common.Hash]common.Hash, initialGas, expectedGasUsed uint64) { - var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") - - // Generate a canonical chain to act as the main dataset - db = rawdb.NewMemoryDatabase() - - // A sender who makes transactions, has some funds - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - balanceBefore = big.NewInt(1000000000000000) - gspec = &core.Genesis{ - Config: config, - Alloc: types.GenesisAlloc{ - address: {Balance: balanceBefore}, - aa: { - Code: bytecode, - Storage: initialStorage, - Nonce: 0, - Balance: big.NewInt(0), - }, - }, - } - genesis = gspec.MustCommit(db, triedb.NewDatabase(db, nil)) - ) - - blocks, _ := core.GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - // One transaction to 0xAAAA - signer := types.LatestSigner(gspec.Config) - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: 0, - To: &aa, - Gas: initialGas, - GasPrice: newGwei(5), - }) - b.AddTx(tx) - }) - - // Import the canonical chain - diskdb := rawdb.NewMemoryDatabase() - gspec.MustCommit(diskdb, triedb.NewDatabase(diskdb, nil)) - - chain, err := core.NewBlockChain(diskdb, nil, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - block := chain.GetBlockByNumber(1) - - if block.GasUsed() != expectedGasUsed { - t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expectedGasUsed, block.GasUsed()) - } -} diff --git a/core/genesis_test.go b/core/genesis_test.go deleted file mode 100644 index 4b280bcf13..0000000000 --- a/core/genesis_test.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "bytes" - "encoding/json" - "math/big" - "reflect" - "testing" - - "github.com/davecgh/go-spew/spew" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum/go-ethereum/triedb/pathdb" -) - -func TestSetupGenesis(t *testing.T) { - testSetupGenesis(t, rawdb.HashScheme) - testSetupGenesis(t, rawdb.PathScheme) -} - -func testSetupGenesis(t *testing.T, scheme string) { - var ( - customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50") - customg = Genesis{ - Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3)}, - Alloc: types.GenesisAlloc{ - {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, - }, - } - oldcustomg = customg - ) - oldcustomg.Config = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(2)} - - tests := []struct { - name string - fn func(ethdb.Database) (*params.ChainConfig, common.Hash, error) - wantConfig *params.ChainConfig - wantHash common.Hash - wantErr error - }{ - { - name: "genesis without ChainConfig", - fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), new(Genesis)) - }, - wantErr: errGenesisNoConfig, - wantConfig: params.AllEthashProtocolChanges, - }, - { - name: "no block in DB, genesis == nil", - fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil) - }, - wantHash: params.BSCGenesisHash, - wantConfig: params.BSCChainConfig, - }, - { - name: "mainnet block in DB, genesis == nil", - fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - DefaultGenesisBlock().MustCommit(db, triedb.NewDatabase(db, newDbConfig(scheme))) - return SetupGenesisBlock(db, triedb.NewDatabase(db, newDbConfig(scheme)), nil) - }, - wantHash: params.MainnetGenesisHash, - wantConfig: params.MainnetChainConfig, - }, - { - name: "custom block in DB, genesis == nil", - fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - customg.Commit(db, tdb) - return SetupGenesisBlock(db, tdb, nil) - }, - wantHash: customghash, - wantConfig: customg.Config, - }, - { - name: "custom block in DB, genesis == chapel", - fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - customg.Commit(db, tdb) - return SetupGenesisBlock(db, tdb, DefaultChapelGenesisBlock()) - }, - wantErr: &GenesisMismatchError{Stored: customghash, New: params.ChapelGenesisHash}, - wantHash: params.ChapelGenesisHash, - wantConfig: params.ChapelChainConfig, - }, - { - name: "compatible config in DB", - fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - oldcustomg.Commit(db, tdb) - return SetupGenesisBlock(db, tdb, &customg) - }, - wantHash: customghash, - wantConfig: customg.Config, - }, - { - name: "incompatible config in DB", - fn: func(db ethdb.Database) (*params.ChainConfig, common.Hash, error) { - // Commit the 'old' genesis block with Homestead transition at #2. - // Advance to block #4, past the homestead transition block of customg. - tdb := triedb.NewDatabase(db, newDbConfig(scheme)) - oldcustomg.Commit(db, tdb) - - bc, _ := NewBlockChain(db, DefaultCacheConfigWithScheme(scheme), &oldcustomg, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) - defer bc.Stop() - - _, blocks, _ := GenerateChainWithGenesis(&oldcustomg, ethash.NewFaker(), 4, nil) - bc.InsertChain(blocks) - - // This should return a compatibility error. - return SetupGenesisBlock(db, tdb, &customg) - }, - wantHash: customghash, - wantConfig: customg.Config, - wantErr: ¶ms.ConfigCompatError{ - What: "Homestead fork block", - StoredBlock: big.NewInt(2), - NewBlock: big.NewInt(3), - RewindToBlock: 1, - }, - }, - } - - for _, test := range tests { - db := rawdb.NewMemoryDatabase() - config, hash, err := test.fn(db) - // Check the return values. - if !reflect.DeepEqual(err, test.wantErr) { - spew := spew.ConfigState{DisablePointerAddresses: true, DisableCapacities: true} - t.Errorf("%s: returned error %#v, want %#v", test.name, spew.NewFormatter(err), spew.NewFormatter(test.wantErr)) - } - if !reflect.DeepEqual(config, test.wantConfig) { - t.Errorf("%s:\nreturned %v\nwant %v", test.name, config, test.wantConfig) - } - if hash != test.wantHash { - t.Errorf("%s: returned hash %s, want %s", test.name, hash.Hex(), test.wantHash.Hex()) - } else if err == nil { - // Check database content. - stored := rawdb.ReadBlock(db, test.wantHash, 0) - if stored.Hash() != test.wantHash { - t.Errorf("%s: block in DB has hash %s, want %s", test.name, stored.Hash(), test.wantHash) - } - } - } -} - -// TestGenesisHashes checks the congruity of default genesis data to -// corresponding hardcoded genesis hash values. -func TestGenesisHashes(t *testing.T) { - for i, c := range []struct { - genesis *Genesis - want common.Hash - }{ - {DefaultGenesisBlock(), params.MainnetGenesisHash}, - } { - // Test via MustCommit - db := rawdb.NewMemoryDatabase() - if have := c.genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults)).Hash(); have != c.want { - t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) - } - // Test via ToBlock - if have := c.genesis.ToBlock().Hash(); have != c.want { - t.Errorf("case: %d a), want: %s, got: %s", i, c.want.Hex(), have.Hex()) - } - } -} - -func TestGenesis_Commit(t *testing.T) { - genesis := &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: params.TestChainConfig, - // difficulty is nil - } - - db := rawdb.NewMemoryDatabase() - genesisBlock := genesis.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults)) - - if genesis.Difficulty != nil { - t.Fatalf("assumption wrong") - } - - // This value should have been set as default in the ToBlock method. - if genesisBlock.Difficulty().Cmp(params.GenesisDifficulty) != 0 { - t.Errorf("assumption wrong: want: %d, got: %v", params.GenesisDifficulty, genesisBlock.Difficulty()) - } - - // Expect the stored total difficulty to be the difficulty of the genesis block. - stored := rawdb.ReadTd(db, genesisBlock.Hash(), genesisBlock.NumberU64()) - - if stored.Cmp(genesisBlock.Difficulty()) != 0 { - t.Errorf("inequal difficulty; stored: %v, genesisBlock: %v", stored, genesisBlock.Difficulty()) - } -} - -func TestReadWriteGenesisAlloc(t *testing.T) { - var ( - db = rawdb.NewMemoryDatabase() - alloc = &types.GenesisAlloc{ - {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, - {2}: {Balance: big.NewInt(2), Storage: map[common.Hash]common.Hash{{2}: {2}}}, - } - hash, _ = hashAlloc(alloc, false) - ) - blob, _ := json.Marshal(alloc) - rawdb.WriteGenesisStateSpec(db, hash, blob) - - var reload types.GenesisAlloc - err := reload.UnmarshalJSON(rawdb.ReadGenesisStateSpec(db, hash)) - if err != nil { - t.Fatalf("Failed to load genesis state %v", err) - } - if len(reload) != len(*alloc) { - t.Fatal("Unexpected genesis allocation") - } - for addr, account := range reload { - want, ok := (*alloc)[addr] - if !ok { - t.Fatal("Account is not found") - } - if !reflect.DeepEqual(want, account) { - t.Fatal("Unexpected account") - } - } -} - -func TestConfigOrDefault(t *testing.T) { - defaultGenesis := DefaultGenesisBlock() - if defaultGenesis.Config.PlanckBlock != nil { - t.Errorf("initial config should have PlanckBlock = nil, but instead PlanckBlock = %v", defaultGenesis.Config.PlanckBlock) - } - gHash := params.BSCGenesisHash - config := defaultGenesis.configOrDefault(gHash) - - if config.ChainID.Cmp(params.BSCChainConfig.ChainID) != 0 { - t.Errorf("ChainID of resulting config should be %v, but is %v instead", params.BSCChainConfig.ChainID, config.ChainID) - } - - if config.HomesteadBlock.Cmp(params.BSCChainConfig.HomesteadBlock) != 0 { - t.Errorf("resulting config should have HomesteadBlock = %v, but instead is %v", params.BSCChainConfig, config.HomesteadBlock) - } - - if config.PlanckBlock == nil { - t.Errorf("resulting config should have PlanckBlock = %v , but instead is nil", params.BSCChainConfig.PlanckBlock) - } - - if config.PlanckBlock.Cmp(params.BSCChainConfig.PlanckBlock) != 0 { - t.Errorf("resulting config should have PlanckBlock = %v , but instead is %v", params.BSCChainConfig.PlanckBlock, config.PlanckBlock) - } -} - -func newDbConfig(scheme string) *triedb.Config { - if scheme == rawdb.HashScheme { - return triedb.HashDefaults - } - return &triedb.Config{PathDB: pathdb.Defaults} -} - -func TestVerkleGenesisCommit(t *testing.T) { - var verkleTime uint64 = 0 - verkleConfig := ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: false, - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - ArrowGlacierBlock: big.NewInt(0), - GrayGlacierBlock: big.NewInt(0), - MergeNetsplitBlock: nil, - ShanghaiTime: &verkleTime, - CancunTime: &verkleTime, - PragueTime: &verkleTime, - VerkleTime: &verkleTime, - TerminalTotalDifficulty: big.NewInt(0), - TerminalTotalDifficultyPassed: true, - Ethash: nil, - Clique: nil, - } - - genesis := &Genesis{ - BaseFee: big.NewInt(params.InitialBaseFee), - Config: verkleConfig, - Timestamp: verkleTime, - Difficulty: big.NewInt(0), - Alloc: types.GenesisAlloc{ - {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, - }, - } - - expected := common.Hex2Bytes("14398d42be3394ff8d50681816a4b7bf8d8283306f577faba2d5bc57498de23b") - got := genesis.ToBlock().Root().Bytes() - if !bytes.Equal(got, expected) { - t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) - } - - db := rawdb.NewMemoryDatabase() - triedb := triedb.NewDatabase(db, &triedb.Config{IsVerkle: true, PathDB: pathdb.Defaults}) - block := genesis.MustCommit(db, triedb) - if !bytes.Equal(block.Root().Bytes(), expected) { - t.Fatalf("invalid genesis state root, expected %x, got %x", expected, got) - } - - // Test that the trie is verkle - if !triedb.IsVerkle() { - t.Fatalf("expected trie to be verkle") - } - - if !rawdb.ExistsAccountTrieNode(db, nil) { - t.Fatal("could not find node") - } -} diff --git a/core/headerchain.go b/core/headerchain.go index f09ab4347c..38d07d8265 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/core/blockarchiver" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" @@ -72,6 +73,8 @@ type HeaderChain struct { rand *mrand.Rand engine consensus.Engine + + blockArchiverService blockarchiver.BlockArchiver } // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points @@ -85,7 +88,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c hc := &HeaderChain{ config: config, chainDb: chainDb, - headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit), tdCache: lru.NewCache[common.Hash, *big.Int](tdCacheLimit), numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit), procInterrupt: procInterrupt, @@ -96,17 +98,6 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c if hc.genesisHeader == nil { return nil, ErrNoGenesis } - hc.currentHeader.Store(hc.genesisHeader) - if head := rawdb.ReadHeadBlockHash(chainDb.BlockStore()); head != (common.Hash{}) { - if chead := hc.GetHeaderByHash(head); chead != nil { - hc.currentHeader.Store(chead) - } - } - hc.currentHeaderHash = hc.CurrentHeader().Hash() - headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) - justifiedBlockGauge.Update(int64(hc.GetJustifiedNumber(hc.CurrentHeader()))) - finalizedBlockGauge.Update(int64(hc.getFinalizedNumber(hc.CurrentHeader()))) - return hc, nil } @@ -139,16 +130,17 @@ func (hc *HeaderChain) GenesisHeader() *types.Header { } // GetBlockNumber retrieves the block number belonging to the given hash -// from the cache or database +// from the cache or the remote block archiver. func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { if cached, ok := hc.numberCache.Get(hash); ok { return &cached } - number := rawdb.ReadHeaderNumber(hc.chainDb.BlockStore(), hash) - if number != nil { - hc.numberCache.Add(hash, *number) + _, header, _ := hc.blockArchiverService.GetBlockByHash(hash) + if header == nil { + return nil } - return number + number := header.Number.Uint64() + return &number } type headerWriteResult struct { @@ -499,15 +491,24 @@ func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { // caching it if found. func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { // Short circuit if the header's already in the cache, retrieve otherwise - if header, ok := hc.headerCache.Get(hash); ok { - return header + if hash != (common.Hash{}) { + if header, ok := hc.headerCache.Get(hash); ok { + return header + } } - header := rawdb.ReadHeader(hc.chainDb, hash, number) - if header == nil { - return nil + + // the genesis header is stored in DB + if number == 0 { + header := rawdb.ReadHeader(hc.chainDb, hash, number) + if header == nil { + return nil + } + hc.headerCache.Add(hash, header) + return header } - // Cache the found header for next time and return - hc.headerCache.Add(hash, header) + + // get header from block archiver, and the headerCache will be updated there + _, header, _ := hc.blockArchiverService.GetBlockByNumber(number) return header } @@ -535,9 +536,12 @@ func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { // caching it (associated with its hash) if found. func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { hash := rawdb.ReadCanonicalHash(hc.chainDb, number) - if hash == (common.Hash{}) { - return nil + // the genesis is stored in DB + if number == 0 { + return rawdb.ReadHeader(hc.chainDb, hash, number) } + + // get header from blockhub, and the headerCache will be updated there return hc.GetHeader(hash, number) } @@ -557,26 +561,17 @@ func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue { } } var headers []rlp.RawValue - // If we have some of the headers in cache already, use that before going to db. - hash := rawdb.ReadCanonicalHash(hc.chainDb, number) - if hash == (common.Hash{}) { - return nil - } for count > 0 { - header, ok := hc.headerCache.Get(hash) - if !ok { + // GetBlockByNumber gets the header from block archiver service, cache is updated there + body, header, _ := hc.blockArchiverService.GetBlockByNumber(number) + if header == nil || body == nil { break } rlpData, _ := rlp.EncodeToBytes(header) headers = append(headers, rlpData) - hash = header.ParentHash count-- number-- } - // Read remaining from db - if count > 0 { - headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...) - } return headers } @@ -596,8 +591,6 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { hc.currentHeader.Store(head) hc.currentHeaderHash = head.Hash() headHeaderGauge.Update(head.Number.Int64()) - justifiedBlockGauge.Update(int64(hc.GetJustifiedNumber(head))) - finalizedBlockGauge.Update(int64(hc.getFinalizedNumber(head))) } type ( diff --git a/core/headerchain_test.go b/core/headerchain_test.go deleted file mode 100644 index 25d9bfffcb..0000000000 --- a/core/headerchain_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "errors" - "fmt" - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/triedb" -) - -func verifyUnbrokenCanonchain(hc *HeaderChain) error { - h := hc.CurrentHeader() - for { - canonHash := rawdb.ReadCanonicalHash(hc.chainDb, h.Number.Uint64()) - if exp := h.Hash(); canonHash != exp { - return fmt.Errorf("Canon hash chain broken, block %d got %x, expected %x", - h.Number, canonHash[:8], exp[:8]) - } - // Verify that we have the TD - if td := rawdb.ReadTd(hc.chainDb, canonHash, h.Number.Uint64()); td == nil { - return fmt.Errorf("Canon TD missing at block %d", h.Number) - } - if h.Number.Uint64() == 0 { - break - } - h = hc.GetHeader(h.ParentHash, h.Number.Uint64()-1) - } - return nil -} - -func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error, forker *ForkChoice) { - t.Helper() - - status, err := hc.InsertHeaderChain(chain, time.Now(), forker) - if status != wantStatus { - t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v", status, wantStatus) - } - // Always verify that the header chain is unbroken - if err := verifyUnbrokenCanonchain(hc); err != nil { - t.Fatal(err) - } - if !errors.Is(err, wantErr) { - t.Fatalf("unexpected error from InsertHeaderChain: %v", err) - } -} - -// This test checks status reporting of InsertHeaderChain. -func TestHeaderInsertion(t *testing.T) { - var ( - db = rawdb.NewMemoryDatabase() - gspec = &Genesis{BaseFee: big.NewInt(params.InitialBaseFee), Config: params.AllEthashProtocolChanges} - ) - gspec.Commit(db, triedb.NewDatabase(db, nil)) - hc, err := NewHeaderChain(db, gspec.Config, ethash.NewFaker(), func() bool { return false }) - if err != nil { - t.Fatal(err) - } - // chain A: G->A1->A2...A128 - genDb, chainA := makeHeaderChainWithGenesis(gspec, 128, ethash.NewFaker(), 10) - // chain B: G->A1->B1...B128 - chainB := makeHeaderChain(gspec.Config, chainA[0], 128, ethash.NewFaker(), genDb, 10) - - forker := NewForkChoice(hc, nil) - // Inserting 64 headers on an empty chain, expecting - // 1 callbacks, 1 canon-status, 0 sidestatus, - testInsert(t, hc, chainA[:64], CanonStatTy, nil, forker) - - // Inserting 64 identical headers, expecting - // 0 callbacks, 0 canon-status, 0 sidestatus, - testInsert(t, hc, chainA[:64], NonStatTy, nil, forker) - - // Inserting the same some old, some new headers - // 1 callbacks, 1 canon, 0 side - testInsert(t, hc, chainA[32:96], CanonStatTy, nil, forker) - - // Inserting side blocks, but not overtaking the canon chain - testInsert(t, hc, chainB[0:32], SideStatTy, nil, forker) - - // Inserting more side blocks, but we don't have the parent - testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor, forker) - - // Inserting more sideblocks, overtaking the canon chain - testInsert(t, hc, chainB[32:97], CanonStatTy, nil, forker) - - // Inserting more A-headers, taking back the canonicality - testInsert(t, hc, chainA[90:100], CanonStatTy, nil, forker) - - // And B becomes canon again - testInsert(t, hc, chainB[97:107], CanonStatTy, nil, forker) - - // And B becomes even longer - testInsert(t, hc, chainB[107:128], CanonStatTy, nil, forker) -} diff --git a/core/state_prefetcher_test.go b/core/state_prefetcher_test.go deleted file mode 100644 index b1c5974151..0000000000 --- a/core/state_prefetcher_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package core - -import ( - "bytes" - "context" - "errors" - "fmt" - "math/big" - "runtime/pprof" - "strings" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/triedb" - - "github.com/google/pprof/profile" -) - -func TestPrefetchLeaking(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - var ( - gendb = rawdb.NewMemoryDatabase() - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(100000000000000000) - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: GenesisAlloc{address: {Balance: funds}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - triedb = triedb.NewDatabase(gendb, nil) - genesis = gspec.MustCommit(gendb, triedb) - signer = types.LatestSigner(gspec.Config) - ) - blocks, _ := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1, func(i int, block *BlockGen) { - block.SetCoinbase(common.Address{0x00}) - for j := 0; j < 100; j++ { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(address), common.Address{0x00}, big.NewInt(1000), params.TxGas, block.header.BaseFee, nil), signer, key) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - }) - archiveDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(archiveDb, triedb) - archive, _ := NewBlockChain(archiveDb, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - defer archive.Stop() - - block := blocks[0] - parent := archive.GetHeader(block.ParentHash(), block.NumberU64()-1) - statedb, _ := state.NewWithSharedPool(parent.Root, archive.stateCache, archive.snaps) - inter := make(chan struct{}) - - Track(ctx, t, func(ctx context.Context) { - close(inter) - go archive.prefetcher.Prefetch(block, statedb, &archive.vmConfig, inter) - time.Sleep(1 * time.Second) - }) -} - -func Track(ctx context.Context, t *testing.T, fn func(context.Context)) { - label := t.Name() - pprof.Do(ctx, pprof.Labels("test", label), fn) - if err := CheckNoGoroutines("test", label); err != nil { - t.Fatal("Leaked goroutines\n", err) - } -} - -func CheckNoGoroutines(key, value string) error { - var pb bytes.Buffer - profiler := pprof.Lookup("goroutine") - if profiler == nil { - return errors.New("unable to find profile") - } - err := profiler.WriteTo(&pb, 0) - if err != nil { - return fmt.Errorf("unable to read profile: %w", err) - } - - p, err := profile.ParseData(pb.Bytes()) - if err != nil { - return fmt.Errorf("unable to parse profile: %w", err) - } - - return summarizeGoroutines(p, key, value) -} - -func summarizeGoroutines(p *profile.Profile, key, expectedValue string) error { - var b strings.Builder - - for _, sample := range p.Sample { - if !matchesLabel(sample, key, expectedValue) { - continue - } - - fmt.Fprintf(&b, "count %d @", sample.Value[0]) - // format the stack trace for each goroutine - for _, loc := range sample.Location { - for i, ln := range loc.Line { - if i == 0 { - fmt.Fprintf(&b, "# %#8x", loc.Address) - if loc.IsFolded { - fmt.Fprint(&b, " [F]") - } - } else { - fmt.Fprint(&b, "# ") - } - if fn := ln.Function; fn != nil { - fmt.Fprintf(&b, " %-50s %s:%d", fn.Name, fn.Filename, ln.Line) - } else { - fmt.Fprintf(&b, " ???") - } - fmt.Fprintf(&b, "\n") - } - } - fmt.Fprintf(&b, "\n") - } - - if b.Len() == 0 { - return nil - } - - return errors.New(b.String()) -} - -func matchesLabel(sample *profile.Sample, key, expectedValue string) bool { - values, hasLabel := sample.Label[key] - if !hasLabel { - return false - } - - for _, value := range values { - if value == expectedValue { - return true - } - } - - return false -} diff --git a/core/state_processor_test.go b/core/state_processor_test.go deleted file mode 100644 index f87997c7ed..0000000000 --- a/core/state_processor_test.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "crypto/ecdsa" - "errors" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/consensus/misc/eip1559" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" - "github.com/holiman/uint256" - "golang.org/x/crypto/sha3" -) - -func u64(val uint64) *uint64 { return &val } - -// TestStateProcessorErrors tests the output from the 'core' errors -// as defined in core/error.go. These errors are generated when the -// blockchain imports bad blocks, meaning blocks which have valid headers but -// contain invalid transactions -func TestStateProcessorErrors(t *testing.T) { - var ( - config = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - MirrorSyncBlock: big.NewInt(0), - BrunoBlock: big.NewInt(0), - EulerBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - GibbsBlock: big.NewInt(0), - Ethash: new(params.EthashConfig), - TerminalTotalDifficulty: big.NewInt(0), - TerminalTotalDifficultyPassed: true, - ShanghaiTime: new(uint64), - CancunTime: new(uint64), - } - signer = types.LatestSigner(config) - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - key2, _ = crypto.HexToECDSA("0202020202020202020202020202020202020202020202020202002020202020") - ) - var makeTx = func(key *ecdsa.PrivateKey, nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, key) - return tx - } - var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction { - tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Gas: gasLimit, - To: &to, - Value: big.NewInt(0), - }), signer, key1) - return tx - } - var mkDynamicCreationTx = func(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, data []byte) *types.Transaction { - tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Gas: gasLimit, - Value: big.NewInt(0), - Data: data, - }), signer, key1) - return tx - } - var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap, blobGasFeeCap *big.Int, hashes []common.Hash) *types.Transaction { - tx, err := types.SignTx(types.NewTx(&types.BlobTx{ - Nonce: nonce, - GasTipCap: uint256.MustFromBig(gasTipCap), - GasFeeCap: uint256.MustFromBig(gasFeeCap), - Gas: gasLimit, - To: to, - BlobHashes: hashes, - BlobFeeCap: uint256.MustFromBig(blobGasFeeCap), - Value: new(uint256.Int), - }), signer, key1) - if err != nil { - t.Fatal(err) - } - return tx - } - - { // Tests against a 'recent' chain definition - var ( - db = rawdb.NewMemoryDatabase() - gspec = &Genesis{ - Config: config, - Alloc: types.GenesisAlloc{ - common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - common.HexToAddress("0xfd0810DD14796680f72adf1a371963d0745BCc64"): types.Account{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: math.MaxUint64, - }, - }, - } - blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) - tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} - ) - - defer blockchain.Stop() - bigNumber := new(big.Int).SetBytes(common.MaxHash.Bytes()) - tooBigNumber := new(big.Int).Set(bigNumber) - tooBigNumber.Add(tooBigNumber, common.Big1) - for i, tt := range []struct { - txs []*types.Transaction - want string - }{ - { // ErrNonceTooLow - txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), - }, - want: "could not apply tx 1 [0x0026256b3939ed97e2c4a6f3fce8ecf83bdcfa6d507c47838c308a1fb0436f62]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1", - }, - { // ErrNonceTooHigh - txs: []*types.Transaction{ - makeTx(key1, 100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), - }, - want: "could not apply tx 0 [0xdebad714ca7f363bd0d8121c4518ad48fa469ca81b0a081be3d10c17460f751b]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0", - }, - { // ErrNonceMax - txs: []*types.Transaction{ - makeTx(key2, math.MaxUint64, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(875000000), nil), - }, - want: "could not apply tx 0 [0x84ea18d60eb2bb3b040e3add0eb72f757727122cc257dd858c67cb6591a85986]: nonce has max value: address 0xfd0810DD14796680f72adf1a371963d0745BCc64, nonce: 18446744073709551615", - }, - { // ErrGasLimitReached - txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), 21000000, big.NewInt(875000000), nil), - }, - want: "could not apply tx 0 [0xbd49d8dadfd47fb846986695f7d4da3f7b2c48c8da82dbc211a26eb124883de9]: gas limit reached", - }, - { // ErrInsufficientFundsForTransfer - txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(1000000000000000000), params.TxGas, big.NewInt(875000000), nil), - }, - want: "could not apply tx 0 [0x98c796b470f7fcab40aaef5c965a602b0238e1034cce6fb73823042dd0638d74]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 1000018375000000000", - }, - { // ErrInsufficientFunds - txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil), - }, - want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 18900000000000000000000", - }, - // ErrGasUintOverflow - // One missing 'core' error is ErrGasUintOverflow: "gas uint64 overflow", - // In order to trigger that one, we'd have to allocate a _huge_ chunk of data, such that the - // multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment - { // ErrIntrinsicGas - txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(875000000), nil), - }, - want: "could not apply tx 0 [0xcf3b049a0b516cb4f9274b3e2a264359e2ba53b2fb64b7bda2c634d5c9d01fca]: intrinsic gas too low: have 20000, want 21000", - }, - { // ErrGasLimitReached - txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas*1000, big.NewInt(875000000), nil), - }, - want: "could not apply tx 0 [0xbd49d8dadfd47fb846986695f7d4da3f7b2c48c8da82dbc211a26eb124883de9]: gas limit reached", - }, - { // ErrFeeCapTooLow does not apply because default BaseFee is 0 - txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(0), big.NewInt(0)), - }, - want: "could not apply tx 0 [0xc4ab868fef0c82ae0387b742aee87907f2d0fc528fc6ea0a021459fb0fc4a4a8]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 0, baseFee: 875000000", - }, - { // ErrTipVeryHigh - txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas, tooBigNumber, big.NewInt(1)), - }, - want: "could not apply tx 0 [0x15b8391b9981f266b32f3ab7da564bbeb3d6c21628364ea9b32a21139f89f712]: max priority fee per gas higher than 2^256-1: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxPriorityFeePerGas bit length: 257", - }, - { // ErrFeeCapVeryHigh - txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(1), tooBigNumber), - }, - want: "could not apply tx 0 [0x48bc299b83fdb345c57478f239e89814bb3063eb4e4b49f3b6057a69255c16bd]: max fee per gas higher than 2^256-1: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas bit length: 257", - }, - { // ErrTipAboveFeeCap - txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(2), big.NewInt(1)), - }, - want: "could not apply tx 0 [0xf987a31ff0c71895780a7612f965a0c8b056deb54e020bb44fa478092f14c9b4]: max priority fee per gas higher than max fee per gas: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxPriorityFeePerGas: 2, maxFeePerGas: 1", - }, - { // ErrInsufficientFunds - // Available balance: 1000000000000000000 - // Effective cost: 18375000021000 - // FeeCap * gas: 1050000000000000000 - // This test is designed to have the effective cost be covered by the balance, but - // the extended requirement on FeeCap*gas < balance to fail - txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(50000000000000)), - }, - want: "could not apply tx 0 [0x413603cd096a87f41b1660d3ed3e27d62e1da78eac138961c0a1314ed43bd129]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 1000000000000000000 want 1050000000000000000", - }, - { // Another ErrInsufficientFunds, this one to ensure that feecap/tip of max u256 is allowed - txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas, bigNumber, bigNumber), - }, - want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 required balance exceeds 256 bits", - }, - { // ErrMaxInitCodeSizeExceeded - txs: []*types.Transaction{ - mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.InitialBaseFee), tooBigInitCode[:]), - }, - want: "could not apply tx 0 [0xd491405f06c92d118dd3208376fcee18a57c54bc52063ee4a26b1cf296857c25]: max initcode size exceeded: code size 49153 limit 49152", - }, - { // ErrIntrinsicGas: Not enough gas to cover init code - txs: []*types.Transaction{ - mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.InitialBaseFee), make([]byte, 320)), - }, - want: "could not apply tx 0 [0xfd49536a9b323769d8472fcb3ebb3689b707a349379baee3e2ee3fe7baae06a1]: intrinsic gas too low: have 54299, want 54300", - }, - { // ErrBlobFeeCapTooLow - txs: []*types.Transaction{ - mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), big.NewInt(0), []common.Hash{(common.Hash{1})}), - }, - want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1, baseFee: 875000000", - }, - } { - block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config) - _, err := blockchain.InsertChain(types.Blocks{block}) - if err == nil { - t.Fatal("block imported without errors") - } - if have, want := err.Error(), tt.want; have != want { - t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) - } - } - } - - // ErrTxTypeNotSupported, For this, we need an older chain - { - var ( - db = rawdb.NewMemoryDatabase() - gspec = &Genesis{ - Config: ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - }, - Alloc: types.GenesisAlloc{ - common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - }, - }, - } - blockchain, _ = NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - ) - defer blockchain.Stop() - for i, tt := range []struct { - txs []*types.Transaction - want error - }{ - { // ErrTxTypeNotSupported - txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)), - }, - want: types.ErrTxTypeNotSupported, - }, - } { - block := GenerateBadBlock(gspec.ToBlock(), ethash.NewFaker(), tt.txs, gspec.Config) - _, err := blockchain.InsertChain(types.Blocks{block}) - if err == nil { - t.Fatal("block imported without errors") - } - if have, want := err, tt.want; !errors.Is(have, want) { - t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) - } - } - } - - // ErrSenderNoEOA, for this we need the sender to have contract code - { - var ( - db = rawdb.NewMemoryDatabase() - gspec = &Genesis{ - Config: config, - Alloc: types.GenesisAlloc{ - common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): types.Account{ - Balance: big.NewInt(1000000000000000000), // 1 ether - Nonce: 0, - Code: common.FromHex("0xB0B0FACE"), - }, - }, - } - blockchain, _ = NewBlockChain(db, nil, gspec, nil, beacon.New(ethash.NewFaker()), vm.Config{}, nil, nil) - ) - defer blockchain.Stop() - for i, tt := range []struct { - txs []*types.Transaction - want string - }{ - { // ErrSenderNoEOA - txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas-1000, big.NewInt(0), big.NewInt(0)), - }, - want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1", - }, - } { - block := GenerateBadBlock(gspec.ToBlock(), beacon.New(ethash.NewFaker()), tt.txs, gspec.Config) - _, err := blockchain.InsertChain(types.Blocks{block}) - if err == nil { - t.Fatal("block imported without errors") - } - if have, want := err.Error(), tt.want; have != want { - t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) - } - } - } -} - -// GenerateBadBlock constructs a "block" which contains the transactions. The transactions are not expected to be -// valid, and no proper post-state can be made. But from the perspective of the blockchain, the block is sufficiently -// valid to be considered for import: -// - valid pow (fake), ancestry, difficulty, gaslimit etc -func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions, config *params.ChainConfig) *types.Block { - difficulty := big.NewInt(0) - if !config.TerminalTotalDifficultyPassed { - fakeChainReader := newChainMaker(nil, config, engine) - difficulty = engine.CalcDifficulty(fakeChainReader, parent.Time()+10, &types.Header{ - Number: parent.Number(), - Time: parent.Time(), - Difficulty: parent.Difficulty(), - UncleHash: parent.UncleHash(), - }) - } - - header := &types.Header{ - ParentHash: parent.Hash(), - Coinbase: parent.Coinbase(), - Difficulty: difficulty, - GasLimit: parent.GasLimit(), - Number: new(big.Int).Add(parent.Number(), common.Big1), - Time: parent.Time() + 10, - UncleHash: types.EmptyUncleHash, - } - if config.IsLondon(header.Number) { - header.BaseFee = eip1559.CalcBaseFee(config, parent.Header()) - } - if config.IsShanghai(header.Number, header.Time) { - header.WithdrawalsHash = &types.EmptyWithdrawalsHash - } - var receipts []*types.Receipt - // The post-state result doesn't need to be correct (this is a bad block), but we do need something there - // Preferably something unique. So let's use a combo of blocknum + txhash - hasher := sha3.NewLegacyKeccak256() - hasher.Write(header.Number.Bytes()) - var cumulativeGas uint64 - var nBlobs int - for _, tx := range txs { - txh := tx.Hash() - hasher.Write(txh[:]) - receipt := types.NewReceipt(nil, false, cumulativeGas+tx.Gas()) - receipt.TxHash = tx.Hash() - receipt.GasUsed = tx.Gas() - receipts = append(receipts, receipt) - cumulativeGas += tx.Gas() - nBlobs += len(tx.BlobHashes()) - } - header.Root = common.BytesToHash(hasher.Sum(nil)) - if config.IsCancun(header.Number, header.Time) { - var pExcess, pUsed = uint64(0), uint64(0) - if parent.ExcessBlobGas() != nil { - pExcess = *parent.ExcessBlobGas() - pUsed = *parent.BlobGasUsed() - } - excess := eip4844.CalcExcessBlobGas(pExcess, pUsed) - used := uint64(nBlobs * params.BlobTxBlobGasPerBlob) - header.ExcessBlobGas = &excess - header.BlobGasUsed = &used - - beaconRoot := common.HexToHash("0xbeac00") - if config.Parlia == nil { - header.ParentBeaconRoot = &beaconRoot - } - } - // Assemble and return the final block for sealing - if config.IsShanghai(header.Number, header.Time) { - return types.NewBlockWithWithdrawals(header, txs, nil, receipts, []*types.Withdrawal{}, trie.NewStackTrie(nil)) - } - return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) -} diff --git a/core/txindexer_test.go b/core/txindexer_test.go deleted file mode 100644 index d078046355..0000000000 --- a/core/txindexer_test.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright 2024 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package core - -import ( - "math/big" - "os" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/params" -) - -// TestTxIndexer tests the functionalities for managing transaction indexes. -func TestTxIndexer(t *testing.T) { - var ( - testBankKey, _ = crypto.GenerateKey() - testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) - testBankFunds = big.NewInt(1000000000000000000) - - gspec = &Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - engine = ethash.NewFaker() - nonce = uint64(0) - chainHead = uint64(128) - ) - _, blocks, receipts := GenerateChainWithGenesis(gspec, engine, int(chainHead), func(i int, gen *BlockGen) { - tx, _ := types.SignTx(types.NewTransaction(nonce, common.HexToAddress("0xdeadbeef"), big.NewInt(1000), params.TxGas, big.NewInt(10*params.InitialBaseFee), nil), types.HomesteadSigner{}, testBankKey) - gen.AddTx(tx) - nonce += 1 - }) - - // verifyIndexes checks if the transaction indexes are present or not - // of the specified block. - verifyIndexes := func(db ethdb.Database, number uint64, exist bool) { - if number == 0 { - return - } - block := blocks[number-1] - for _, tx := range block.Transactions() { - lookup := rawdb.ReadTxLookupEntry(db, tx.Hash()) - if exist && lookup == nil { - t.Fatalf("missing %d %x", number, tx.Hash().Hex()) - } - if !exist && lookup != nil { - t.Fatalf("unexpected %d %x", number, tx.Hash().Hex()) - } - } - } - verify := func(db ethdb.Database, expTail uint64, indexer *txIndexer) { - tail := rawdb.ReadTxIndexTail(db) - if tail == nil { - t.Fatal("Failed to write tx index tail") - } - if *tail != expTail { - t.Fatalf("Unexpected tx index tail, want %v, got %d", expTail, *tail) - } - if *tail != 0 { - for number := uint64(0); number < *tail; number += 1 { - verifyIndexes(db, number, false) - } - } - for number := *tail; number <= chainHead; number += 1 { - verifyIndexes(db, number, true) - } - progress := indexer.report(chainHead, tail) - if !progress.Done() { - t.Fatalf("Expect fully indexed") - } - } - - var cases = []struct { - limitA uint64 - tailA uint64 - limitB uint64 - tailB uint64 - limitC uint64 - tailC uint64 - }{ - { - // LimitA: 0 - // TailA: 0 - // - // all blocks are indexed - limitA: 0, - tailA: 0, - - // LimitB: 1 - // TailB: 128 - // - // block-128 is indexed - limitB: 1, - tailB: 128, - - // LimitB: 64 - // TailB: 65 - // - // block [65, 128] are indexed - limitC: 64, - tailC: 65, - }, - { - // LimitA: 64 - // TailA: 65 - // - // block [65, 128] are indexed - limitA: 64, - tailA: 65, - - // LimitB: 1 - // TailB: 128 - // - // block-128 is indexed - limitB: 1, - tailB: 128, - - // LimitB: 64 - // TailB: 65 - // - // block [65, 128] are indexed - limitC: 64, - tailC: 65, - }, - { - // LimitA: 127 - // TailA: 2 - // - // block [2, 128] are indexed - limitA: 127, - tailA: 2, - - // LimitB: 1 - // TailB: 128 - // - // block-128 is indexed - limitB: 1, - tailB: 128, - - // LimitB: 64 - // TailB: 65 - // - // block [65, 128] are indexed - limitC: 64, - tailC: 65, - }, - { - // LimitA: 128 - // TailA: 1 - // - // block [2, 128] are indexed - limitA: 128, - tailA: 1, - - // LimitB: 1 - // TailB: 128 - // - // block-128 is indexed - limitB: 1, - tailB: 128, - - // LimitB: 64 - // TailB: 65 - // - // block [65, 128] are indexed - limitC: 64, - tailC: 65, - }, - { - // LimitA: 129 - // TailA: 0 - // - // block [0, 128] are indexed - limitA: 129, - tailA: 0, - - // LimitB: 1 - // TailB: 128 - // - // block-128 is indexed - limitB: 1, - tailB: 128, - - // LimitB: 64 - // TailB: 65 - // - // block [65, 128] are indexed - limitC: 64, - tailC: 65, - }, - } - for _, c := range cases { - frdir := t.TempDir() - db, _ := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false) - rawdb.WriteAncientBlocks(db, append([]*types.Block{gspec.ToBlock()}, blocks...), append([]types.Receipts{{}}, receipts...), big.NewInt(0)) - - // Index the initial blocks from ancient store - indexer := &txIndexer{ - limit: c.limitA, - db: db, - progress: make(chan chan TxIndexProgress), - } - indexer.run(nil, 128, make(chan struct{}), make(chan struct{})) - verify(db, c.tailA, indexer) - - indexer.limit = c.limitB - indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{})) - verify(db, c.tailB, indexer) - - indexer.limit = c.limitC - indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{})) - verify(db, c.tailC, indexer) - - // Recover all indexes - indexer.limit = 0 - indexer.run(rawdb.ReadTxIndexTail(db), 128, make(chan struct{}), make(chan struct{})) - verify(db, 0, indexer) - - db.Close() - os.RemoveAll(frdir) - } -} diff --git a/core/vote/vote_pool_test.go b/core/vote/vote_pool_test.go index bb8374e90f..b94e73c797 100644 --- a/core/vote/vote_pool_test.go +++ b/core/vote/vote_pool_test.go @@ -17,12 +17,10 @@ package vote import ( - "container/heap" "context" "encoding/json" "errors" "fmt" - "math/big" "os" "path/filepath" "testing" @@ -37,15 +35,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/params" ) var ( @@ -133,273 +125,6 @@ func (journal *VoteJournal) verifyJournal(size, lastLatestVoteNumber int) bool { return false } -func TestValidVotePool(t *testing.T) { - testVotePool(t, true) -} - -func TestInvalidVotePool(t *testing.T) { - testVotePool(t, false) -} - -func testVotePool(t *testing.T, isValidRules bool) { - walletPasswordDir, walletDir := setUpKeyManager(t) - - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, - } - - mux := new(event.TypeMux) - db := rawdb.NewMemoryDatabase() - chain, _ := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFullFaker(), vm.Config{}, nil, nil) - - var mockEngine consensus.PoSA - if isValidRules { - mockEngine = &mockPOSA{} - } else { - mockEngine = &mockInvalidPOSA{} - } - - // Create vote pool - votePool := NewVotePool(chain, mockEngine) - - // Create vote manager - // Create a temporary file for the votes journal - file, err := os.CreateTemp("", "") - if err != nil { - t.Fatalf("failed to create temporary file path: %v", err) - } - journal := file.Name() - defer os.Remove(journal) - - // Clean up the temporary file, we only need the path for now - file.Close() - os.Remove(journal) - - voteManager, err := NewVoteManager(newTestBackend(), chain, votePool, journal, walletPasswordDir, walletDir, mockEngine) - if err != nil { - t.Fatalf("failed to create vote managers") - } - - voteJournal := voteManager.journal - - // Send the done event of downloader - time.Sleep(10 * time.Millisecond) - mux.Post(downloader.DoneEvent{}) - - bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, 1, nil) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - for i := 0; i < 10+blocksNumberSinceMining; i++ { - bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - } - - if !isValidRules { - if votePool.verifyStructureSizeOfVotePool(11, 11, 0, 11, 0) { - t.Fatalf("put vote failed") - } - return - } - - if !votePool.verifyStructureSizeOfVotePool(11, 11, 0, 11, 0) { - t.Fatalf("put vote failed") - } - - // Verify if votesPq is min heap - votesPq := votePool.curVotesPq - pqBuffer := make([]*types.VoteData, 0) - lastVotedBlockNumber := uint64(0) - for votesPq.Len() > 0 { - voteData := heap.Pop(votesPq).(*types.VoteData) - if voteData.TargetNumber < lastVotedBlockNumber { - t.Fatalf("votesPq verification failed") - } - lastVotedBlockNumber = voteData.TargetNumber - pqBuffer = append(pqBuffer, voteData) - } - for _, voteData := range pqBuffer { - heap.Push(votesPq, voteData) - } - - // Verify journal - if !voteJournal.verifyJournal(11, 11) { - t.Fatalf("journal failed") - } - - bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - - if !votePool.verifyStructureSizeOfVotePool(12, 12, 0, 12, 0) { - t.Fatalf("put vote failed") - } - - // Verify journal - if !voteJournal.verifyJournal(12, 12) { - t.Fatalf("journal failed") - } - - for i := 0; i < 256; i++ { - bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - } - - // Verify journal - if !voteJournal.verifyJournal(268, 268) { - t.Fatalf("journal failed") - } - - // currently chain size is 268, and votePool should be pruned, so vote pool size should be 256! - if !votePool.verifyStructureSizeOfVotePool(256, 256, 0, 256, 0) { - t.Fatalf("put vote failed") - } - - // Test invalid vote whose number larger than latestHeader + 13 - invalidVote := &types.VoteEnvelope{ - Data: &types.VoteData{ - TargetNumber: 1000, - }, - } - voteManager.pool.PutVote(invalidVote) - - if !votePool.verifyStructureSizeOfVotePool(256, 256, 0, 256, 0) { - t.Fatalf("put vote failed") - } - - votes := votePool.GetVotes() - if len(votes) != 256 { - t.Fatalf("get votes failed") - } - - // Verify journal - if !voteJournal.verifyJournal(268, 268) { - t.Fatalf("journal failed") - } - - // Test future votes scenario: votes number within latestBlockHeader ~ latestBlockHeader + 13 - futureVote := &types.VoteEnvelope{ - Data: &types.VoteData{ - TargetNumber: 279, - }, - } - if err := voteManager.signer.SignVote(futureVote); err != nil { - t.Fatalf("sign vote failed") - } - voteManager.pool.PutVote(futureVote) - - if !votePool.verifyStructureSizeOfVotePool(257, 256, 1, 256, 1) { - t.Fatalf("put vote failed") - } - - // Verify journal - if !voteJournal.verifyJournal(268, 268) { - t.Fatalf("journal failed") - } - - // Test duplicate vote case, shouldn'd be put into vote pool - duplicateVote := &types.VoteEnvelope{ - Data: &types.VoteData{ - TargetNumber: 279, - }, - } - if err := voteManager.signer.SignVote(duplicateVote); err != nil { - t.Fatalf("sign vote failed") - } - voteManager.pool.PutVote(duplicateVote) - - if !votePool.verifyStructureSizeOfVotePool(257, 256, 1, 256, 1) { - t.Fatalf("put vote failed") - } - - // Verify journal - if !voteJournal.verifyJournal(268, 268) { - t.Fatalf("journal failed") - } - - // Test future votes larger than latestBlockNumber + 13 should be rejected - futureVote = &types.VoteEnvelope{ - Data: &types.VoteData{ - TargetNumber: 282, - TargetHash: common.Hash{}, - }, - } - voteManager.pool.PutVote(futureVote) - if !votePool.verifyStructureSizeOfVotePool(257, 256, 1, 256, 1) { - t.Fatalf("put vote failed") - } - - // Test transfer votes from future to cur, latest block header is #288 after the following generation - // For the above BlockNumber 279, it did not have blockHash, should be assigned as well below. - curNumber := 268 - var futureBlockHash common.Hash - for i := 0; i < 20; i++ { - bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil) - curNumber += 1 - if curNumber == 279 { - futureBlockHash = bs[0].Hash() - futureVotesMap := votePool.futureVotes - voteBox := futureVotesMap[common.Hash{}] - futureVotesMap[futureBlockHash] = voteBox - delete(futureVotesMap, common.Hash{}) - futureVotesPq := votePool.futureVotesPq - futureVotesPq.Peek().TargetHash = futureBlockHash - } - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - } - - for i := 0; i < timeThreshold; i++ { - time.Sleep(1 * time.Second) - _, ok := votePool.curVotes[futureBlockHash] - if ok && len(votePool.curVotes[futureBlockHash].voteMessages) == 2 { - break - } - } - if votePool.curVotes[futureBlockHash] == nil || len(votePool.curVotes[futureBlockHash].voteMessages) != 2 { - t.Fatalf("transfer vote failed") - } - - // Pruner will keep the size of votePool as latestBlockHeader-255~latestBlockHeader, then final result should be 256! - if !votePool.verifyStructureSizeOfVotePool(257, 256, 0, 256, 0) { - t.Fatalf("put vote failed") - } - - // Verify journal - if !voteJournal.verifyJournal(288, 288) { - t.Fatalf("journal failed") - } - - for i := 0; i < 224; i++ { - bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - } - - // Verify journal - if !voteJournal.verifyJournal(512, 512) { - t.Fatalf("journal failed") - } - - bs, _ = core.GenerateChain(params.TestChainConfig, bs[len(bs)-1], ethash.NewFaker(), db, 1, nil) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - - // Verify if journal no longer than 512 - if !voteJournal.verifyJournal(512, 513) { - t.Fatalf("journal failed") - } -} - func setUpKeyManager(t *testing.T) (string, string) { walletDir := filepath.Join(t.TempDir(), "wallet") opts := []accounts.Option{} diff --git a/eth/backend.go b/eth/backend.go index 27f8fbbad2..67dedc21d6 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -46,9 +46,7 @@ import ( "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/gasprice" - "github.com/ethereum/go-ethereum/eth/protocols/bsc" "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/eth/protocols/trust" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -124,8 +122,8 @@ type Ethereum struct { // initialisation of the common Ethereum object) func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // Ensure configuration values are compatible and sane - if config.SyncMode == downloader.LightSync { - return nil, errors.New("can't run eth.Ethereum in light sync mode, light mode has been deprecated") + if config.SyncMode != downloader.NoSync { + return nil, errors.New("can only run BSC client in nosync mode") } if !config.SyncMode.IsValid() { return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode) @@ -298,9 +296,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if stack.Config().EnableDoubleSignMonitor { bcOps = append(bcOps, core.EnableDoubleSignChecker) } + bcOps = append(bcOps, core.EnableBlockArchiverConfig(&config.BlockArchiverConfig)) peers := newPeerSet() bcOps = append(bcOps, core.EnableBlockValidator(chainConfig, eth.engine, config.TriesVerifyMode, peers)) + eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TransactionHistory, bcOps...) if err != nil { return nil, err @@ -644,14 +644,9 @@ func (s *Ethereum) SyncMode() downloader.SyncMode { // network protocols to start. func (s *Ethereum) Protocols() []p2p.Protocol { protos := eth.MakeProtocols((*ethHandler)(s.handler), s.networkID, s.ethDialCandidates) - if !s.config.DisableSnapProtocol && s.config.SnapshotCache > 0 { - protos = append(protos, snap.MakeProtocols((*snapHandler)(s.handler), s.snapDialCandidates)...) - } if s.config.EnableTrustProtocol { protos = append(protos, trust.MakeProtocols((*trustHandler)(s.handler), s.snapDialCandidates)...) } - protos = append(protos, bsc.MakeProtocols((*bscHandler)(s.handler), s.bscDialCandidates)...) - return protos } diff --git a/eth/catalyst/simulated_beacon_test.go b/eth/catalyst/simulated_beacon_test.go deleted file mode 100644 index 6fa97ad87a..0000000000 --- a/eth/catalyst/simulated_beacon_test.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package catalyst - -import ( - "context" - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/params" -) - -func startSimulatedBeaconEthService(t *testing.T, genesis *core.Genesis) (*node.Node, *eth.Ethereum, *SimulatedBeacon) { - t.Helper() - - n, err := node.New(&node.Config{ - P2P: p2p.Config{ - ListenAddr: "127.0.0.1:8545", - NoDiscovery: true, - MaxPeers: 0, - }, - }) - if err != nil { - t.Fatal("can't create node:", err) - } - - ethcfg := ðconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256} - ethservice, err := eth.New(n, ethcfg) - if err != nil { - t.Fatal("can't create eth service:", err) - } - - simBeacon, err := NewSimulatedBeacon(1, ethservice) - if err != nil { - t.Fatal("can't create simulated beacon:", err) - } - - n.RegisterLifecycle(simBeacon) - - if err := n.Start(); err != nil { - t.Fatal("can't start node:", err) - } - - ethservice.SetSynced() - return n, ethservice, simBeacon -} - -// send 20 transactions, >10 withdrawals and ensure they are included in order -// send enough transactions to fill multiple blocks -func TestSimulatedBeaconSendWithdrawals(t *testing.T) { - var withdrawals []types.Withdrawal - txs := make(map[common.Hash]types.Transaction) - - var ( - // testKey is a private key to use for funding a tester account. - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - - // testAddr is the Ethereum address of the tester account. - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) - ) - - // short period (1 second) for testing purposes - var gasLimit uint64 = 10_000_000 - genesis := core.DeveloperGenesisBlock(gasLimit, &testAddr) - node, ethService, mock := startSimulatedBeaconEthService(t, genesis) - _ = mock - defer node.Close() - - chainHeadCh := make(chan core.ChainHeadEvent, 10) - subscription := ethService.BlockChain().SubscribeChainHeadEvent(chainHeadCh) - defer subscription.Unsubscribe() - - // generate some withdrawals - for i := 0; i < 20; i++ { - withdrawals = append(withdrawals, types.Withdrawal{Index: uint64(i)}) - if err := mock.withdrawals.add(&withdrawals[i]); err != nil { - t.Fatal("addWithdrawal failed", err) - } - } - - // generate a bunch of transactions - signer := types.NewEIP155Signer(ethService.BlockChain().Config().ChainID) - for i := 0; i < 20; i++ { - tx, err := types.SignTx(types.NewTransaction(uint64(i), common.Address{}, big.NewInt(1000), params.TxGas, big.NewInt(params.InitialBaseFee), nil), signer, testKey) - if err != nil { - t.Fatalf("error signing transaction, err=%v", err) - } - txs[tx.Hash()] = *tx - - if err := ethService.APIBackend.SendTx(context.Background(), tx); err != nil { - t.Fatal("SendTx failed", err) - } - } - - includedTxs := make(map[common.Hash]struct{}) - var includedWithdrawals []uint64 - - timer := time.NewTimer(12 * time.Second) - for { - select { - case evt := <-chainHeadCh: - for _, includedTx := range evt.Block.Transactions() { - includedTxs[includedTx.Hash()] = struct{}{} - } - for _, includedWithdrawal := range evt.Block.Withdrawals() { - includedWithdrawals = append(includedWithdrawals, includedWithdrawal.Index) - } - - // ensure all withdrawals/txs included. this will take two blocks b/c number of withdrawals > 10 - if len(includedTxs) == len(txs) && len(includedWithdrawals) == len(withdrawals) && evt.Block.Number().Cmp(big.NewInt(2)) == 0 { - return - } - case <-timer.C: - t.Fatal("timed out without including all withdrawals/txs") - } - } -} diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go deleted file mode 100644 index 3c113b9134..0000000000 --- a/eth/downloader/downloader_test.go +++ /dev/null @@ -1,1335 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - "math/big" - "os" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/eth/protocols/snap" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/trie" -) - -// downloadTester is a test simulator for mocking out local block chain. -type downloadTester struct { - freezer string - chain *core.BlockChain - downloader *Downloader - - peers map[string]*downloadTesterPeer - lock sync.RWMutex -} - -// newTester creates a new downloader test mocker. -func newTester(t *testing.T) *downloadTester { - return newTesterWithNotification(t, nil) -} - -// newTester creates a new downloader test mocker. -func newTesterWithNotification(t *testing.T, success func()) *downloadTester { - freezer := t.TempDir() - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false, false, false, false, false) - if err != nil { - panic(err) - } - t.Cleanup(func() { - db.Close() - }) - gspec := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - chain, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if err != nil { - panic(err) - } - tester := &downloadTester{ - freezer: freezer, - chain: chain, - peers: make(map[string]*downloadTesterPeer), - } - tester.downloader = New(db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, success) - return tester -} - -// terminate aborts any operations on the embedded downloader and releases all -// held resources. -func (dl *downloadTester) terminate() { - dl.downloader.Terminate() - dl.chain.Stop() - - os.RemoveAll(dl.freezer) -} - -// sync starts synchronizing with a remote peer, blocking until it completes. -func (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error { - head := dl.peers[id].chain.CurrentBlock() - if td == nil { - // If no particular TD was requested, load from the peer's blockchain - td = dl.peers[id].chain.GetTd(head.Hash(), head.Number.Uint64()) - } - // Synchronise with the chosen peer and ensure proper cleanup afterwards - err := dl.downloader.synchronise(id, head.Hash(), td, nil, mode, false, nil) - select { - case <-dl.downloader.cancelCh: - // Ok, downloader fully cancelled after sync cycle - default: - // Downloader is still accepting packets, can block a peer up - panic("downloader active post sync cycle") // panic will be caught by tester - } - return err -} - -// newPeer registers a new block download source into the downloader. -func (dl *downloadTester) newPeer(id string, version uint, blocks []*types.Block) *downloadTesterPeer { - dl.lock.Lock() - defer dl.lock.Unlock() - - peer := &downloadTesterPeer{ - dl: dl, - id: id, - chain: newTestBlockchain(blocks), - withholdHeaders: make(map[common.Hash]struct{}), - } - dl.peers[id] = peer - - if err := dl.downloader.RegisterPeer(id, version, peer); err != nil { - panic(err) - } - if err := dl.downloader.SnapSyncer.Register(peer); err != nil { - panic(err) - } - return peer -} - -// dropPeer simulates a hard peer removal from the connection pool. -func (dl *downloadTester) dropPeer(id string) { - dl.lock.Lock() - defer dl.lock.Unlock() - - delete(dl.peers, id) - dl.downloader.SnapSyncer.Unregister(id) - dl.downloader.UnregisterPeer(id) -} - -type downloadTesterPeer struct { - dl *downloadTester - id string - chain *core.BlockChain - - withholdHeaders map[common.Hash]struct{} -} - -func (dlp *downloadTesterPeer) MarkLagging() { -} - -// Head constructs a function to retrieve a peer's current head hash -// and total difficulty. -func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { - head := dlp.chain.CurrentBlock() - return head.Hash(), dlp.chain.GetTd(head.Hash(), head.Number.Uint64()) -} - -func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { - var headers = make([]*types.Header, len(rlpdata)) - for i, data := range rlpdata { - var h types.Header - if err := rlp.DecodeBytes(data, &h); err != nil { - panic(err) - } - headers[i] = &h - } - return headers -} - -// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed -// origin; associated with a particular peer in the download tester. The returned -// function can be used to retrieve batches of headers from the particular peer. -func (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { - // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ - Origin: eth.HashOrNumber{ - Hash: origin, - }, - Amount: uint64(amount), - Skip: uint64(skip), - Reverse: reverse, - }, nil) - headers := unmarshalRlpHeaders(rlpHeaders) - // If a malicious peer is simulated withholding headers, delete them - for hash := range dlp.withholdHeaders { - for i, header := range headers { - if header.Hash() == hash { - headers = append(headers[:i], headers[i+1:]...) - break - } - } - } - hashes := make([]common.Hash, len(headers)) - for i, header := range headers { - hashes[i] = header.Hash() - } - // Deliver the headers to the downloader - req := ð.Request{ - Peer: dlp.id, - } - res := ð.Response{ - Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), - Meta: hashes, - Time: 1, - Done: make(chan error, 1), // Ignore the returned status - } - go func() { - sink <- res - }() - return req, nil -} - -// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered -// origin; associated with a particular peer in the download tester. The returned -// function can be used to retrieve batches of headers from the particular peer. -func (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool, sink chan *eth.Response) (*eth.Request, error) { - // Service the header query via the live handler code - rlpHeaders := eth.ServiceGetBlockHeadersQuery(dlp.chain, ð.GetBlockHeadersRequest{ - Origin: eth.HashOrNumber{ - Number: origin, - }, - Amount: uint64(amount), - Skip: uint64(skip), - Reverse: reverse, - }, nil) - headers := unmarshalRlpHeaders(rlpHeaders) - // If a malicious peer is simulated withholding headers, delete them - for hash := range dlp.withholdHeaders { - for i, header := range headers { - if header.Hash() == hash { - headers = append(headers[:i], headers[i+1:]...) - break - } - } - } - hashes := make([]common.Hash, len(headers)) - for i, header := range headers { - hashes[i] = header.Hash() - } - // Deliver the headers to the downloader - req := ð.Request{ - Peer: dlp.id, - } - res := ð.Response{ - Req: req, - Res: (*eth.BlockHeadersRequest)(&headers), - Meta: hashes, - Time: 1, - Done: make(chan error, 1), // Ignore the returned status - } - go func() { - sink <- res - }() - return req, nil -} - -// RequestBodies constructs a getBlockBodies method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of block bodies from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { - blobs := eth.ServiceGetBlockBodiesQuery(dlp.chain, hashes) - - bodies := make([]*eth.BlockBody, len(blobs)) - for i, blob := range blobs { - bodies[i] = new(eth.BlockBody) - rlp.DecodeBytes(blob, bodies[i]) - } - var ( - txsHashes = make([]common.Hash, len(bodies)) - uncleHashes = make([]common.Hash, len(bodies)) - withdrawalHashes = make([]common.Hash, len(bodies)) - ) - hasher := trie.NewStackTrie(nil) - for i, body := range bodies { - txsHashes[i] = types.DeriveSha(types.Transactions(body.Transactions), hasher) - uncleHashes[i] = types.CalcUncleHash(body.Uncles) - } - req := ð.Request{ - Peer: dlp.id, - } - res := ð.Response{ - Req: req, - Res: (*eth.BlockBodiesResponse)(&bodies), - Meta: [][]common.Hash{txsHashes, uncleHashes, withdrawalHashes}, - Time: 1, - Done: make(chan error, 1), // Ignore the returned status - } - go func() { - sink <- res - }() - return req, nil -} - -// RequestReceipts constructs a getReceipts method associated with a particular -// peer in the download tester. The returned function can be used to retrieve -// batches of block receipts from the particularly requested peer. -func (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash, sink chan *eth.Response) (*eth.Request, error) { - blobs := eth.ServiceGetReceiptsQuery(dlp.chain, hashes) - - receipts := make([][]*types.Receipt, len(blobs)) - for i, blob := range blobs { - rlp.DecodeBytes(blob, &receipts[i]) - } - hasher := trie.NewStackTrie(nil) - hashes = make([]common.Hash, len(receipts)) - for i, receipt := range receipts { - hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) - } - req := ð.Request{ - Peer: dlp.id, - } - res := ð.Response{ - Req: req, - Res: (*eth.ReceiptsResponse)(&receipts), - Meta: hashes, - Time: 1, - Done: make(chan error, 1), // Ignore the returned status - } - go func() { - sink <- res - }() - return req, nil -} - -// ID retrieves the peer's unique identifier. -func (dlp *downloadTesterPeer) ID() string { - return dlp.id -} - -// RequestAccountRange fetches a batch of accounts rooted in a specific account -// trie, starting with the origin. -func (dlp *downloadTesterPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { - // Create the request and service it - req := &snap.GetAccountRangePacket{ - ID: id, - Root: root, - Origin: origin, - Limit: limit, - Bytes: bytes, - } - slimaccs, proofs := snap.ServiceGetAccountRangeQuery(dlp.chain, req) - - // We need to convert to non-slim format, delegate to the packet code - res := &snap.AccountRangePacket{ - ID: id, - Accounts: slimaccs, - Proof: proofs, - } - hashes, accounts, _ := res.Unpack() - - go dlp.dl.downloader.SnapSyncer.OnAccounts(dlp, id, hashes, accounts, proofs) - return nil -} - -// RequestStorageRanges fetches a batch of storage slots belonging to one or -// more accounts. If slots from only one account is requested, an origin marker -// may also be used to retrieve from there. -func (dlp *downloadTesterPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { - // Create the request and service it - req := &snap.GetStorageRangesPacket{ - ID: id, - Accounts: accounts, - Root: root, - Origin: origin, - Limit: limit, - Bytes: bytes, - } - storage, proofs := snap.ServiceGetStorageRangesQuery(dlp.chain, req) - - // We need to convert to demultiplex, delegate to the packet code - res := &snap.StorageRangesPacket{ - ID: id, - Slots: storage, - Proof: proofs, - } - hashes, slots := res.Unpack() - - go dlp.dl.downloader.SnapSyncer.OnStorage(dlp, id, hashes, slots, proofs) - return nil -} - -// RequestByteCodes fetches a batch of bytecodes by hash. -func (dlp *downloadTesterPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { - req := &snap.GetByteCodesPacket{ - ID: id, - Hashes: hashes, - Bytes: bytes, - } - codes := snap.ServiceGetByteCodesQuery(dlp.chain, req) - go dlp.dl.downloader.SnapSyncer.OnByteCodes(dlp, id, codes) - return nil -} - -// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in -// a specific state trie. -func (dlp *downloadTesterPeer) RequestTrieNodes(id uint64, root common.Hash, paths []snap.TrieNodePathSet, bytes uint64) error { - req := &snap.GetTrieNodesPacket{ - ID: id, - Root: root, - Paths: paths, - Bytes: bytes, - } - nodes, _ := snap.ServiceGetTrieNodesQuery(dlp.chain, req, time.Now()) - go dlp.dl.downloader.SnapSyncer.OnTrieNodes(dlp, id, nodes) - return nil -} - -// Log retrieves the peer's own contextual logger. -func (dlp *downloadTesterPeer) Log() log.Logger { - return log.New("peer", dlp.id) -} - -// assertOwnChain checks if the local chain contains the correct number of items -// of the various chain components. -func assertOwnChain(t *testing.T, tester *downloadTester, length int) { - // Mark this method as a helper to report errors at callsite, not in here - t.Helper() - - headers, blocks, receipts := length, length, length - if tester.downloader.getMode() == LightSync { - blocks, receipts = 1, 1 - } - if hs := int(tester.chain.CurrentHeader().Number.Uint64()) + 1; hs != headers { - t.Fatalf("synchronised headers mismatch: have %v, want %v", hs, headers) - } - if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != blocks { - t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, blocks) - } - if rs := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1; rs != receipts { - t.Fatalf("synchronised receipts mismatch: have %v, want %v", rs, receipts) - } -} - -func TestCanonicalSynchronisation68Full(t *testing.T) { testCanonSync(t, eth.ETH68, FullSync) } -func TestCanonicalSynchronisation68Snap(t *testing.T) { testCanonSync(t, eth.ETH68, SnapSync) } -func TestCanonicalSynchronisation68Light(t *testing.T) { testCanonSync(t, eth.ETH68, LightSync) } - -func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a small enough block chain to download - chain := testChainBase.shorten(blockCacheMaxItems - 15) - tester.newPeer("peer", protocol, chain.blocks[1:]) - - // Synchronise with the peer and make sure all relevant data was retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that if a large batch of blocks are being downloaded, it is throttled -// until the cached blocks are retrieved. -func TestThrottling68Full(t *testing.T) { testThrottling(t, eth.ETH68, FullSync) } -func TestThrottling68Snap(t *testing.T) { testThrottling(t, eth.ETH68, SnapSync) } - -func testThrottling(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a long block chain to download and the tester - targetBlocks := len(testChainBase.blocks) - 1 - tester.newPeer("peer", protocol, testChainBase.blocks[1:]) - - // Wrap the importer to allow stepping - var blocked atomic.Uint32 - proceed := make(chan struct{}) - tester.downloader.chainInsertHook = func(results []*fetchResult, _ chan struct{}) { - blocked.Store(uint32(len(results))) - <-proceed - } - // Start a synchronisation concurrently - errc := make(chan error, 1) - go func() { - errc <- tester.sync("peer", nil, mode) - }() - // Iteratively take some blocks, always checking the retrieval count - for { - // Check the retrieval count synchronously (! reason for this ugly block) - tester.lock.RLock() - retrieved := int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1 - tester.lock.RUnlock() - if retrieved >= targetBlocks+1 { - break - } - // Wait a bit for sync to throttle itself - var cached, frozen int - for start := time.Now(); time.Since(start) < 3*time.Second; { - time.Sleep(25 * time.Millisecond) - - tester.lock.Lock() - tester.downloader.queue.lock.Lock() - tester.downloader.queue.resultCache.lock.Lock() - { - cached = tester.downloader.queue.resultCache.countCompleted() - frozen = int(blocked.Load()) - retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1 - } - tester.downloader.queue.resultCache.lock.Unlock() - tester.downloader.queue.lock.Unlock() - tester.lock.Unlock() - - if cached == blockCacheMaxItems || - cached == blockCacheMaxItems-reorgProtHeaderDelay || - retrieved+cached+frozen == targetBlocks+1 || - retrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay { - break - } - } - // Make sure we filled up the cache, then exhaust it - time.Sleep(25 * time.Millisecond) // give it a chance to screw up - tester.lock.RLock() - retrieved = int(tester.chain.CurrentSnapBlock().Number.Uint64()) + 1 - tester.lock.RUnlock() - if cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay { - t.Fatalf("block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1) - } - // Permit the blocked blocks to import - if blocked.Load() > 0 { - blocked.Store(uint32(0)) - proceed <- struct{}{} - } - } - // Check that we haven't pulled more blocks than available - assertOwnChain(t, tester, targetBlocks+1) - if err := <-errc; err != nil { - t.Fatalf("block synchronization failed: %v", err) - } -} - -// Tests that simple synchronization against a forked chain works correctly. In -// this test common ancestor lookup should *not* be short circuited, and a full -// binary search should be executed. -func TestForkedSync68Full(t *testing.T) { testForkedSync(t, eth.ETH68, FullSync) } -func TestForkedSync68Snap(t *testing.T) { testForkedSync(t, eth.ETH68, SnapSync) } -func TestForkedSync68Light(t *testing.T) { testForkedSync(t, eth.ETH68, LightSync) } - -func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + 81) - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("fork A", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("fork B", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainB.blocks)) -} - -// Tests that synchronising against a much shorter but much heavier fork works -// currently and is not dropped. -func TestHeavyForkedSync68Full(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, FullSync) } -func TestHeavyForkedSync68Snap(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, SnapSync) } -func TestHeavyForkedSync68Light(t *testing.T) { testHeavyForkedSync(t, eth.ETH68, LightSync) } - -func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + 80) - chainB := testChainForkHeavy.shorten(len(testChainBase.blocks) + 79) - tester.newPeer("light", protocol, chainA.blocks[1:]) - tester.newPeer("heavy", protocol, chainB.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("light", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and make sure that fork is pulled too - if err := tester.sync("heavy", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainB.blocks)) -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head, ensuring that malicious peers cannot waste resources by feeding -// long dead chains. -func TestBoundedForkedSync68Full(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, FullSync) } -func TestBoundedForkedSync68Snap(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, SnapSync) } -func TestBoundedForkedSync68Light(t *testing.T) { testBoundedForkedSync(t, eth.ETH68, LightSync) } - -func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA - chainB := testChainForkLightB - tester.newPeer("original", protocol, chainA.blocks[1:]) - tester.newPeer("rewriter", protocol, chainB.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - -// Tests that chain forks are contained within a certain interval of the current -// chain head for short but heavy forks too. These are a bit special because they -// take different ancestor lookup paths. -func TestBoundedHeavyForkedSync68Full(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, FullSync) -} -func TestBoundedHeavyForkedSync68Snap(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, SnapSync) -} -func TestBoundedHeavyForkedSync68Light(t *testing.T) { - testBoundedHeavyForkedSync(t, eth.ETH68, LightSync) -} - -func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a long enough forked chain - chainA := testChainForkLightA - chainB := testChainForkHeavy - tester.newPeer("original", protocol, chainA.blocks[1:]) - - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("original", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chainA.blocks)) - - tester.newPeer("heavy-rewriter", protocol, chainB.blocks[1:]) - // Synchronise with the second peer and ensure that the fork is rejected to being too old - if err := tester.sync("heavy-rewriter", nil, mode); err != errInvalidAncestor { - t.Fatalf("sync failure mismatch: have %v, want %v", err, errInvalidAncestor) - } -} - -// Tests that a canceled download wipes all previously accumulated state. -func TestCancel68Full(t *testing.T) { testCancel(t, eth.ETH68, FullSync) } -func TestCancel68Snap(t *testing.T) { testCancel(t, eth.ETH68, SnapSync) } -func TestCancel68Light(t *testing.T) { testCancel(t, eth.ETH68, LightSync) } - -func testCancel(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(MaxHeaderFetch) - tester.newPeer("peer", protocol, chain.blocks[1:]) - - // Make sure canceling works with a pristine downloader - tester.downloader.Cancel() - if !tester.downloader.queue.Idle() { - t.Errorf("download queue not idle") - } - // Synchronise with the peer, but cancel afterwards - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - tester.downloader.Cancel() - if !tester.downloader.queue.Idle() { - t.Errorf("download queue not idle") - } -} - -// Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation68Full(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, FullSync) } -func TestMultiSynchronisation68Snap(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, SnapSync) } -func TestMultiSynchronisation68Light(t *testing.T) { testMultiSynchronisation(t, eth.ETH68, LightSync) } - -func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create various peers with various parts of the chain - targetPeers := 8 - chain := testChainBase.shorten(targetPeers * 100) - - for i := 0; i < targetPeers; i++ { - id := fmt.Sprintf("peer #%d", i) - tester.newPeer(id, protocol, chain.shorten(len(chain.blocks) / (i + 1)).blocks[1:]) - } - if err := tester.sync("peer #0", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that synchronisations behave well in multi-version protocol environments -// and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation68Full(t *testing.T) { testMultiProtoSync(t, eth.ETH68, FullSync) } -func TestMultiProtoSynchronisation68Snap(t *testing.T) { testMultiProtoSync(t, eth.ETH68, SnapSync) } -func TestMultiProtoSynchronisation68Light(t *testing.T) { testMultiProtoSync(t, eth.ETH68, LightSync) } - -func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a small enough block chain to download - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Create peers of every type - tester.newPeer("peer 68", eth.ETH68, chain.blocks[1:]) - - // Synchronise with the requested peer and make sure all blocks were retrieved - if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) - - // Check that no peers have been dropped off - for _, version := range []int{68} { - peer := fmt.Sprintf("peer %d", version) - if _, ok := tester.peers[peer]; !ok { - t.Errorf("%s dropped", peer) - } - } -} - -// Tests that if a block is empty (e.g. header only), no body request should be -// made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit68Full(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, FullSync) } -func TestEmptyShortCircuit68Snap(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, SnapSync) } -func TestEmptyShortCircuit68Light(t *testing.T) { testEmptyShortCircuit(t, eth.ETH68, LightSync) } - -func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - // Create a block chain to download - chain := testChainBase - tester.newPeer("peer", protocol, chain.blocks[1:]) - - // Instrument the downloader to signal body requests - var bodiesHave, receiptsHave atomic.Int32 - tester.downloader.bodyFetchHook = func(headers []*types.Header) { - bodiesHave.Add(int32(len(headers))) - } - tester.downloader.receiptFetchHook = func(headers []*types.Header) { - receiptsHave.Add(int32(len(headers))) - } - // Synchronise with the peer and make sure all blocks were retrieved - if err := tester.sync("peer", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) - - // Validate the number of block bodies that should have been requested - bodiesNeeded, receiptsNeeded := 0, 0 - for _, block := range chain.blocks[1:] { - if mode != LightSync && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) { - bodiesNeeded++ - } - } - for _, block := range chain.blocks[1:] { - if mode == SnapSync && len(block.Transactions()) > 0 { - receiptsNeeded++ - } - } - if int(bodiesHave.Load()) != bodiesNeeded { - t.Errorf("body retrieval count mismatch: have %v, want %v", bodiesHave.Load(), bodiesNeeded) - } - if int(receiptsHave.Load()) != receiptsNeeded { - t.Errorf("receipt retrieval count mismatch: have %v, want %v", receiptsHave.Load(), receiptsNeeded) - } -} - -// Tests that headers are enqueued continuously, preventing malicious nodes from -// stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack68Full(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, FullSync) } -func TestMissingHeaderAttack68Snap(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, SnapSync) } -func TestMissingHeaderAttack68Light(t *testing.T) { testMissingHeaderAttack(t, eth.ETH68, LightSync) } - -func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - attacker.withholdHeaders[chain.blocks[len(chain.blocks)/2-1].Hash()] = struct{}{} - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that if requested headers are shifted (i.e. first is missing), the queue -// detects the invalid numbering. -func TestShiftedHeaderAttack68Full(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, FullSync) } -func TestShiftedHeaderAttack68Snap(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, SnapSync) } -func TestShiftedHeaderAttack68Light(t *testing.T) { testShiftedHeaderAttack(t, eth.ETH68, LightSync) } - -func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Attempt a full sync with an attacker feeding shifted headers - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - attacker.withholdHeaders[chain.blocks[1].Hash()] = struct{}{} - - if err := tester.sync("attack", nil, mode); err == nil { - t.Fatalf("succeeded attacker synchronisation") - } - // Synchronise with the valid peer and make sure sync succeeds - tester.newPeer("valid", protocol, chain.blocks[1:]) - if err := tester.sync("valid", nil, mode); err != nil { - t.Fatalf("failed to synchronise blocks: %v", err) - } - assertOwnChain(t, tester, len(chain.blocks)) -} - -// Tests that a peer advertising a high TD doesn't get to stall the downloader -// afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack68Full(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, FullSync) -} -func TestHighTDStarvationAttack68Snap(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, SnapSync) -} -func TestHighTDStarvationAttack68Light(t *testing.T) { - testHighTDStarvationAttack(t, eth.ETH68, LightSync) -} - -func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(1) - tester.newPeer("attack", protocol, chain.blocks[1:]) - if err := tester.sync("attack", big.NewInt(1000000), mode); err != errLaggingPeer { - t.Fatalf("synchronisation error mismatch: have %v, want %v", err, errLaggingPeer) - } -} - -// Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping68(t *testing.T) { testBlockHeaderAttackerDropping(t, eth.ETH68) } - -func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { - // Define the disconnection requirement for individual hash fetch errors - tests := []struct { - result error - drop bool - }{ - {nil, false}, // Sync succeeded, all is well - {errBusy, false}, // Sync is already in progress, no problem - {errUnknownPeer, false}, // Peer is unknown, was already dropped, don't double drop - {errBadPeer, true}, // Peer was deemed bad for some reason, drop it - {errStallingPeer, true}, // Peer was detected to be stalling, drop it - {errUnsyncedPeer, true}, // Peer was detected to be unsynced, drop it - {errNoPeers, false}, // No peers to download from, soft race, no issue - {errTimeout, true}, // No hashes received in due time, drop the peer - {errEmptyHeaderSet, true}, // No headers were returned as a response, drop as it's a dead end - {errPeersUnavailable, true}, // Nobody had the advertised blocks, drop the advertiser - {errInvalidAncestor, true}, // Agreed upon ancestor is not acceptable, drop the chain rewriter - {errInvalidChain, true}, // Hash chain was detected as invalid, definitely drop - {errInvalidBody, false}, // A bad peer was detected, but not the sync origin - {errInvalidReceipt, false}, // A bad peer was detected, but not the sync origin - {errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop - } - // Run the tests and check disconnection status - tester := newTester(t) - defer tester.terminate() - chain := testChainBase.shorten(1) - - for i, tt := range tests { - // Register a new peer and ensure its presence - id := fmt.Sprintf("test %d", i) - tester.newPeer(id, protocol, chain.blocks[1:]) - if _, ok := tester.peers[id]; !ok { - t.Fatalf("test %d: registered peer not found", i) - } - // Simulate a synchronisation and check the required result - tester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result } - - tester.downloader.LegacySync(id, tester.chain.Genesis().Hash(), big.NewInt(1000), nil, FullSync) - if _, ok := tester.peers[id]; !ok != tt.drop { - t.Errorf("test %d: peer drop mismatch for %v: have %v, want %v", i, tt.result, !ok, tt.drop) - } - } -} - -// Tests that synchronisation progress (origin block number, current block number -// and highest block number) is tracked and updated correctly. -func TestSyncProgress68Full(t *testing.T) { testSyncProgress(t, eth.ETH68, FullSync) } -func TestSyncProgress68Snap(t *testing.T) { testSyncProgress(t, eth.ETH68, SnapSync) } -func TestSyncProgress68Light(t *testing.T) { testSyncProgress(t, eth.ETH68, LightSync) } - -func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise half the blocks and check initial progress - tester.newPeer("peer-half", protocol, chain.shorten(len(chain.blocks) / 2).blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("peer-half", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks)/2 - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Synchronise all the blocks and check continuation progress - tester.newPeer("peer-full", protocol, chain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("peer-full", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - StartingBlock: uint64(len(chain.blocks)/2 - 1), - CurrentBlock: uint64(len(chain.blocks)/2 - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(len(chain.blocks)/2 - 1), - CurrentBlock: uint64(len(chain.blocks) - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) -} - -func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) { - // Mark this method as a helper to report errors at callsite, not in here - t.Helper() - - p := d.Progress() - if p.StartingBlock != want.StartingBlock || p.CurrentBlock != want.CurrentBlock || p.HighestBlock != want.HighestBlock { - t.Fatalf("%s progress mismatch:\nhave %+v\nwant %+v", stage, p, want) - } -} - -// Tests that synchronisation progress (origin block number and highest block -// number) is tracked and updated correctly in case of a fork (or manual head -// revertal). -func TestForkedSyncProgress68Full(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, FullSync) } -func TestForkedSyncProgress68Snap(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, SnapSync) } -func TestForkedSyncProgress68Light(t *testing.T) { testForkedSyncProgress(t, eth.ETH68, LightSync) } - -func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chainA := testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - chainB := testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Synchronise with one of the forks and check progress - tester.newPeer("fork A", protocol, chainA.blocks[1:]) - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork A", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chainA.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - - // Simulate a successful sync above the fork - tester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight - - // Synchronise with the second fork and check progress resets - tester.newPeer("fork B", protocol, chainB.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("fork B", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "forking", ethereum.SyncProgress{ - StartingBlock: uint64(len(testChainBase.blocks)) - 1, - CurrentBlock: uint64(len(chainA.blocks) - 1), - HighestBlock: uint64(len(chainB.blocks) - 1), - }) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - StartingBlock: uint64(len(testChainBase.blocks)) - 1, - CurrentBlock: uint64(len(chainB.blocks) - 1), - HighestBlock: uint64(len(chainB.blocks) - 1), - }) -} - -// Tests that if synchronisation is aborted due to some failure, then the progress -// origin is not updated in the next sync cycle, as it should be considered the -// continuation of the previous sync and not a new instance. -func TestFailedSyncProgress68Full(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, FullSync) } -func TestFailedSyncProgress68Snap(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFailedSyncProgress68Light(t *testing.T) { testFailedSyncProgress(t, eth.ETH68, LightSync) } - -func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Attempt a full sync with a faulty peer - missing := len(chain.blocks)/2 - 1 - - faulter := tester.newPeer("faulty", protocol, chain.blocks[1:]) - faulter.withholdHeaders[chain.blocks[missing].Hash()] = struct{}{} - - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("faulty", nil, mode); err == nil { - panic("succeeded faulty synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // Synchronise with a good peer and check that the progress origin remind the same - // after a failure - tester.newPeer("valid", protocol, chain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", afterFailedSync) - - // Check final progress after successful sync - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(len(chain.blocks) - 1), - HighestBlock: uint64(len(chain.blocks) - 1), - }) -} - -// Tests that if an attacker fakes a chain height, after the attack is detected, -// the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress68Full(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, FullSync) } -func TestFakedSyncProgress68Snap(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, SnapSync) } -func TestFakedSyncProgress68Light(t *testing.T) { testFakedSyncProgress(t, eth.ETH68, LightSync) } - -func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { - tester := newTester(t) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - - // Set a sync init hook to catch progress changes - starting := make(chan struct{}) - progress := make(chan struct{}) - tester.downloader.syncInitHook = func(origin, latest uint64) { - starting <- struct{}{} - <-progress - } - checkProgress(t, tester.downloader, "pristine", ethereum.SyncProgress{}) - - // Create and sync with an attacker that promises a higher chain than available. - attacker := tester.newPeer("attack", protocol, chain.blocks[1:]) - numMissing := 5 - for i := len(chain.blocks) - 2; i > len(chain.blocks)-numMissing; i-- { - attacker.withholdHeaders[chain.blocks[i].Hash()] = struct{}{} - } - pending := new(sync.WaitGroup) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("attack", nil, mode); err == nil { - panic("succeeded attacker synchronisation") - } - }() - <-starting - checkProgress(t, tester.downloader, "initial", ethereum.SyncProgress{ - HighestBlock: uint64(len(chain.blocks) - 1), - }) - progress <- struct{}{} - pending.Wait() - afterFailedSync := tester.downloader.Progress() - - // it is no longer valid to sync to a lagging peer - laggingChain := chain.shorten(800 / 2) - tester.newPeer("lagging", protocol, laggingChain.blocks[1:]) - pending.Add(1) - go func() { - defer pending.Done() - if err := tester.sync("lagging", nil, mode); err != errLaggingPeer { - panic(fmt.Sprintf("unexpected lagging synchronisation err:%v", err)) - } - }() - // lagging peer will return before syncInitHook, skip <-starting and progress <- struct{}{} - checkProgress(t, tester.downloader, "lagging", ethereum.SyncProgress{ - CurrentBlock: afterFailedSync.CurrentBlock, - HighestBlock: uint64(len(chain.blocks) - 1), - }) - pending.Wait() - - // Synchronise with a good peer and check that the progress height has been increased to - // the true value. - validChain := chain.shorten(len(chain.blocks)) - tester.newPeer("valid", protocol, validChain.blocks[1:]) - pending.Add(1) - - go func() { - defer pending.Done() - if err := tester.sync("valid", nil, mode); err != nil { - panic(fmt.Sprintf("failed to synchronise blocks: %v", err)) - } - }() - <-starting - checkProgress(t, tester.downloader, "completing", ethereum.SyncProgress{ - CurrentBlock: afterFailedSync.CurrentBlock, - HighestBlock: uint64(len(validChain.blocks) - 1), - }) - // Check final progress after successful sync. - progress <- struct{}{} - pending.Wait() - checkProgress(t, tester.downloader, "final", ethereum.SyncProgress{ - CurrentBlock: uint64(len(validChain.blocks) - 1), - HighestBlock: uint64(len(validChain.blocks) - 1), - }) -} - -func TestRemoteHeaderRequestSpan(t *testing.T) { - testCases := []struct { - remoteHeight uint64 - localHeight uint64 - expected []int - }{ - // Remote is way higher. We should ask for the remote head and go backwards - {1500, 1000, - []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, - }, - {15000, 13006, - []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, - }, - // Remote is pretty close to us. We don't have to fetch as many - {1200, 1150, - []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, - }, - // Remote is equal to us (so on a fork with higher td) - // We should get the closest couple of ancestors - {1500, 1500, - []int{1497, 1499}, - }, - // We're higher than the remote! Odd - {1000, 1500, - []int{997, 999}, - }, - // Check some weird edgecases that it behaves somewhat rationally - {0, 1500, - []int{0, 2}, - }, - {6000000, 0, - []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, - }, - {0, 0, - []int{0, 2}, - }, - } - reqs := func(from, count, span int) []int { - var r []int - num := from - for len(r) < count { - r = append(r, num) - num += span + 1 - } - return r - } - for i, tt := range testCases { - from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) - data := reqs(int(from), count, span) - - if max != uint64(data[len(data)-1]) { - t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) - } - failed := false - if len(data) != len(tt.expected) { - failed = true - t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) - } else { - for j, n := range data { - if n != tt.expected[j] { - failed = true - break - } - } - } - if failed { - res := strings.ReplaceAll(fmt.Sprint(data), " ", ",") - exp := strings.ReplaceAll(fmt.Sprint(tt.expected), " ", ",") - t.Logf("got: %v\n", res) - t.Logf("exp: %v\n", exp) - t.Errorf("test %d: wrong values", i) - } - } -} - -/* -// Tests that peers below a pre-configured checkpoint block are prevented from -// being fast-synced from, avoiding potential cheap eclipse attacks. -func TestBeaconSync68Full(t *testing.T) { testBeaconSync(t, eth.ETH68, FullSync) } -func TestBeaconSync68Snap(t *testing.T) { testBeaconSync(t, eth.ETH68, SnapSync) } - -func testBeaconSync(t *testing.T, protocol uint, mode SyncMode) { - //log.SetDefault(log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stderr, log.LevelInfo, true))) - - var cases = []struct { - name string // The name of testing scenario - local int // The length of local chain(canonical chain assumed), 0 means genesis is the head - }{ - {name: "Beacon sync since genesis", local: 0}, - {name: "Beacon sync with short local chain", local: 1}, - {name: "Beacon sync with long local chain", local: blockCacheMaxItems - 15 - fsMinFullBlocks/2}, - {name: "Beacon sync with full local chain", local: blockCacheMaxItems - 15 - 1}, - } - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - success := make(chan struct{}) - tester := newTesterWithNotification(t, func() { - close(success) - }) - defer tester.terminate() - - chain := testChainBase.shorten(blockCacheMaxItems - 15) - tester.newPeer("peer", protocol, chain.blocks[1:]) - - // Build the local chain segment if it's required - if c.local > 0 { - tester.chain.InsertChain(chain.blocks[1 : c.local+1]) - } - if err := tester.downloader.BeaconSync(mode, chain.blocks[len(chain.blocks)-1].Header(), nil); err != nil { - t.Fatalf("Failed to beacon sync chain %v %v", c.name, err) - } - select { - case <-success: - // Ok, downloader fully cancelled after sync cycle - if bs := int(tester.chain.CurrentBlock().Number.Uint64()) + 1; bs != len(chain.blocks) { - t.Fatalf("synchronised blocks mismatch: have %v, want %v", bs, len(chain.blocks)) - } - case <-time.NewTimer(time.Second * 3).C: - t.Fatalf("Failed to sync chain in three seconds") - } - }) - } -} -*/ diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go index d388b9ee4d..b46a3f788a 100644 --- a/eth/downloader/modes.go +++ b/eth/downloader/modes.go @@ -26,10 +26,11 @@ const ( FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks SnapSync // Download the chain and the state via compact snapshots LightSync // Download only the headers and terminate afterwards + NoSync // Do not download the chain ) func (mode SyncMode) IsValid() bool { - return mode >= FullSync && mode <= LightSync + return mode >= FullSync && mode <= NoSync } // String implements the stringer interface. @@ -41,6 +42,8 @@ func (mode SyncMode) String() string { return "snap" case LightSync: return "light" + case NoSync: + return "nosync" default: return "unknown" } @@ -54,6 +57,8 @@ func (mode SyncMode) MarshalText() ([]byte, error) { return []byte("snap"), nil case LightSync: return []byte("light"), nil + case NoSync: + return []byte("nosync"), nil default: return nil, fmt.Errorf("unknown sync mode %d", mode) } @@ -67,6 +72,8 @@ func (mode *SyncMode) UnmarshalText(text []byte) error { *mode = SnapSync case "light": *mode = LightSync + case "nosync": + *mode = NoSync default: return fmt.Errorf(`unknown sync mode %q, want "full", "snap" or "light"`, text) } diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go deleted file mode 100644 index c10483b5b2..0000000000 --- a/eth/downloader/queue_test.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - "math/big" - "math/rand" - "os" - "sync" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" - "golang.org/x/exp/slog" -) - -// makeChain creates a chain of n blocks starting at and including parent. -// the returned hash chain is ordered head->parent. In addition, every 3rd block -// contains a transaction and every 5th an uncle to allow testing correct block -// reassembly. -func makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) { - blocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { - block.SetCoinbase(common.Address{seed}) - // Add one tx to every secondblock - if !empty && i%2 == 0 { - signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp()) - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - }) - return blocks, receipts -} - -type chainData struct { - blocks []*types.Block - offset int -} - -var chain *chainData -var emptyChain *chainData - -func init() { - // Create a chain of blocks to import - targetBlocks := 128 - blocks, _ := makeChain(targetBlocks, 0, testGenesis, false) - chain = &chainData{blocks, 0} - - blocks, _ = makeChain(targetBlocks, 0, testGenesis, true) - emptyChain = &chainData{blocks, 0} -} - -func (chain *chainData) headers() []*types.Header { - hdrs := make([]*types.Header, len(chain.blocks)) - for i, b := range chain.blocks { - hdrs[i] = b.Header() - } - return hdrs -} - -func (chain *chainData) Len() int { - return len(chain.blocks) -} - -func dummyPeer(id string) *peerConnection { - p := &peerConnection{ - id: id, - lacking: make(map[common.Hash]struct{}), - } - return p -} - -func TestBasics(t *testing.T) { - numOfBlocks := len(emptyChain.blocks) - numOfReceipts := len(emptyChain.blocks) / 2 - - q := newQueue(10, 10) - if !q.Idle() { - t.Errorf("new queue should be idle") - } - q.Prepare(1, SnapSync) - if res := q.Results(false); len(res) != 0 { - t.Fatal("new queue should have 0 results") - } - - // Schedule a batch of headers - headers := chain.headers() - hashes := make([]common.Hash, len(headers)) - for i, header := range headers { - hashes[i] = header.Hash() - } - q.Schedule(headers, hashes, 1) - if q.Idle() { - t.Errorf("queue should not be idle") - } - if got, exp := q.PendingBodies(), chain.Len(); got != exp { - t.Errorf("wrong pending block count, got %d, exp %d", got, exp) - } - // Only non-empty receipts get added to task-queue - if got, exp := q.PendingReceipts(), 64; got != exp { - t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) - } - // Items are now queued for downloading, next step is that we tell the - // queue that a certain peer will deliver them for us - { - peer := dummyPeer("peer-1") - fetchReq, _, throttle := q.ReserveBodies(peer, 50) - if !throttle { - // queue size is only 10, so throttling should occur - t.Fatal("should throttle") - } - // But we should still get the first things to fetch - if got, exp := len(fetchReq.Headers), 5; got != exp { - t.Fatalf("expected %d requests, got %d", exp, got) - } - if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { - t.Fatalf("expected header %d, got %d", exp, got) - } - } - if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { - t.Errorf("expected block task queue to be %d, got %d", exp, got) - } - if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { - t.Errorf("expected receipt task queue to be %d, got %d", exp, got) - } - { - peer := dummyPeer("peer-2") - fetchReq, _, throttle := q.ReserveBodies(peer, 50) - - // The second peer should hit throttling - if !throttle { - t.Fatalf("should throttle") - } - // And not get any fetches at all, since it was throttled to begin with - if fetchReq != nil { - t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers)) - } - } - if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { - t.Errorf("expected block task queue to be %d, got %d", exp, got) - } - if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { - t.Errorf("expected receipt task queue to be %d, got %d", exp, got) - } - { - // The receipt delivering peer should not be affected - // by the throttling of body deliveries - peer := dummyPeer("peer-3") - fetchReq, _, throttle := q.ReserveReceipts(peer, 50) - if !throttle { - // queue size is only 10, so throttling should occur - t.Fatal("should throttle") - } - // But we should still get the first things to fetch - if got, exp := len(fetchReq.Headers), 5; got != exp { - t.Fatalf("expected %d requests, got %d", exp, got) - } - if got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp { - t.Fatalf("expected header %d, got %d", exp, got) - } - } - if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { - t.Errorf("expected block task queue to be %d, got %d", exp, got) - } - if exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got { - t.Errorf("expected receipt task queue to be %d, got %d", exp, got) - } - if got, exp := q.resultCache.countCompleted(), 0; got != exp { - t.Errorf("wrong processable count, got %d, exp %d", got, exp) - } -} - -func TestEmptyBlocks(t *testing.T) { - numOfBlocks := len(emptyChain.blocks) - - q := newQueue(10, 10) - - q.Prepare(1, SnapSync) - - // Schedule a batch of headers - headers := emptyChain.headers() - hashes := make([]common.Hash, len(headers)) - for i, header := range headers { - hashes[i] = header.Hash() - } - q.Schedule(headers, hashes, 1) - if q.Idle() { - t.Errorf("queue should not be idle") - } - if got, exp := q.PendingBodies(), len(emptyChain.blocks); got != exp { - t.Errorf("wrong pending block count, got %d, exp %d", got, exp) - } - if got, exp := q.PendingReceipts(), 0; got != exp { - t.Errorf("wrong pending receipt count, got %d, exp %d", got, exp) - } - // They won't be processable, because the fetchresults haven't been - // created yet - if got, exp := q.resultCache.countCompleted(), 0; got != exp { - t.Errorf("wrong processable count, got %d, exp %d", got, exp) - } - - // Items are now queued for downloading, next step is that we tell the - // queue that a certain peer will deliver them for us - // That should trigger all of them to suddenly become 'done' - { - // Reserve blocks - peer := dummyPeer("peer-1") - fetchReq, _, _ := q.ReserveBodies(peer, 50) - - // there should be nothing to fetch, blocks are empty - if fetchReq != nil { - t.Fatal("there should be no body fetch tasks remaining") - } - } - if q.blockTaskQueue.Size() != numOfBlocks-10 { - t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) - } - if q.receiptTaskQueue.Size() != 0 { - t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) - } - { - peer := dummyPeer("peer-3") - fetchReq, _, _ := q.ReserveReceipts(peer, 50) - - // there should be nothing to fetch, blocks are empty - if fetchReq != nil { - t.Fatal("there should be no receipt fetch tasks remaining") - } - } - if q.blockTaskQueue.Size() != numOfBlocks-10 { - t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) - } - if q.receiptTaskQueue.Size() != 0 { - t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) - } - if got, exp := q.resultCache.countCompleted(), 10; got != exp { - t.Errorf("wrong processable count, got %d, exp %d", got, exp) - } -} - -// XTestDelivery does some more extensive testing of events that happen, -// blocks that become known and peers that make reservations and deliveries. -// disabled since it's not really a unit-test, but can be executed to test -// some more advanced scenarios -func XTestDelivery(t *testing.T) { - // the outside network, holding blocks - blo, rec := makeChain(128, 0, testGenesis, false) - world := newNetwork() - world.receipts = rec - world.chain = blo - world.progress(10) - if false { - log.SetDefault(log.NewLogger(slog.NewTextHandler(os.Stdout, nil))) - } - q := newQueue(10, 10) - var wg sync.WaitGroup - q.Prepare(1, SnapSync) - wg.Add(1) - go func() { - // deliver headers - defer wg.Done() - c := 1 - for { - //fmt.Printf("getting headers from %d\n", c) - headers := world.headers(c) - hashes := make([]common.Hash, len(headers)) - for i, header := range headers { - hashes[i] = header.Hash() - } - l := len(headers) - //fmt.Printf("scheduling %d headers, first %d last %d\n", - // l, headers[0].Number.Uint64(), headers[len(headers)-1].Number.Uint64()) - q.Schedule(headers, hashes, uint64(c)) - c += l - } - }() - wg.Add(1) - go func() { - // collect results - defer wg.Done() - tot := 0 - for { - res := q.Results(true) - tot += len(res) - fmt.Printf("got %d results, %d tot\n", len(res), tot) - // Now we can forget about these - world.forget(res[len(res)-1].Header.Number.Uint64()) - } - }() - wg.Add(1) - go func() { - defer wg.Done() - // reserve body fetch - i := 4 - for { - peer := dummyPeer(fmt.Sprintf("peer-%d", i)) - f, _, _ := q.ReserveBodies(peer, rand.Intn(30)) - if f != nil { - var ( - emptyList []*types.Header - txset [][]*types.Transaction - uncleset [][]*types.Header - ) - numToSkip := rand.Intn(len(f.Headers)) - for _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] { - txset = append(txset, world.getTransactions(hdr.Number.Uint64())) - uncleset = append(uncleset, emptyList) - } - var ( - txsHashes = make([]common.Hash, len(txset)) - uncleHashes = make([]common.Hash, len(uncleset)) - ) - hasher := trie.NewStackTrie(nil) - for i, txs := range txset { - txsHashes[i] = types.DeriveSha(types.Transactions(txs), hasher) - } - for i, uncles := range uncleset { - uncleHashes[i] = types.CalcUncleHash(uncles) - } - time.Sleep(100 * time.Millisecond) - _, err := q.DeliverBodies(peer.id, txset, txsHashes, uncleset, uncleHashes, nil, nil, nil) - if err != nil { - fmt.Printf("delivered %d bodies %v\n", len(txset), err) - } - } else { - i++ - time.Sleep(200 * time.Millisecond) - } - } - }() - go func() { - defer wg.Done() - // reserve receiptfetch - peer := dummyPeer("peer-3") - for { - f, _, _ := q.ReserveReceipts(peer, rand.Intn(50)) - if f != nil { - var rcs [][]*types.Receipt - for _, hdr := range f.Headers { - rcs = append(rcs, world.getReceipts(hdr.Number.Uint64())) - } - hasher := trie.NewStackTrie(nil) - hashes := make([]common.Hash, len(rcs)) - for i, receipt := range rcs { - hashes[i] = types.DeriveSha(types.Receipts(receipt), hasher) - } - _, err := q.DeliverReceipts(peer.id, rcs, hashes) - if err != nil { - fmt.Printf("delivered %d receipts %v\n", len(rcs), err) - } - time.Sleep(100 * time.Millisecond) - } else { - time.Sleep(200 * time.Millisecond) - } - } - }() - wg.Add(1) - go func() { - defer wg.Done() - for i := 0; i < 50; i++ { - time.Sleep(300 * time.Millisecond) - //world.tick() - //fmt.Printf("trying to progress\n") - world.progress(rand.Intn(100)) - } - for i := 0; i < 50; i++ { - time.Sleep(2990 * time.Millisecond) - } - }() - wg.Add(1) - go func() { - defer wg.Done() - for { - time.Sleep(990 * time.Millisecond) - fmt.Printf("world block tip is %d\n", - world.chain[len(world.chain)-1].Header().Number.Uint64()) - fmt.Println(q.Stats()) - } - }() - wg.Wait() -} - -func newNetwork() *network { - var l sync.RWMutex - return &network{ - cond: sync.NewCond(&l), - offset: 1, // block 1 is at blocks[0] - } -} - -// represents the network -type network struct { - offset int - chain []*types.Block - receipts []types.Receipts - lock sync.RWMutex - cond *sync.Cond -} - -func (n *network) getTransactions(blocknum uint64) types.Transactions { - index := blocknum - uint64(n.offset) - return n.chain[index].Transactions() -} -func (n *network) getReceipts(blocknum uint64) types.Receipts { - index := blocknum - uint64(n.offset) - if got := n.chain[index].Header().Number.Uint64(); got != blocknum { - fmt.Printf("Err, got %d exp %d\n", got, blocknum) - panic("sd") - } - return n.receipts[index] -} - -func (n *network) forget(blocknum uint64) { - index := blocknum - uint64(n.offset) - n.chain = n.chain[index:] - n.receipts = n.receipts[index:] - n.offset = int(blocknum) -} -func (n *network) progress(numBlocks int) { - n.lock.Lock() - defer n.lock.Unlock() - //fmt.Printf("progressing...\n") - newBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false) - n.chain = append(n.chain, newBlocks...) - n.receipts = append(n.receipts, newR...) - n.cond.Broadcast() -} - -func (n *network) headers(from int) []*types.Header { - numHeaders := 128 - var hdrs []*types.Header - index := from - n.offset - - for index >= len(n.chain) { - // wait for progress - n.cond.L.Lock() - //fmt.Printf("header going into wait\n") - n.cond.Wait() - index = from - n.offset - n.cond.L.Unlock() - } - n.lock.RLock() - defer n.lock.RUnlock() - for i, b := range n.chain[index:] { - hdrs = append(hdrs, b.Header()) - if i >= numHeaders { - break - } - } - return hdrs -} diff --git a/eth/downloader/testchain_test.go b/eth/downloader/testchain_test.go deleted file mode 100644 index 52a8cedf0a..0000000000 --- a/eth/downloader/testchain_test.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package downloader - -import ( - "fmt" - "math/big" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/triedb" -) - -// Test chain parameters. -var ( - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAddress = crypto.PubkeyToAddress(testKey.PublicKey) - testDB = rawdb.NewMemoryDatabase() - - testGspec = &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{testAddress: {Balance: big.NewInt(1000000000000000)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - } - testGenesis = testGspec.MustCommit(testDB, triedb.NewDatabase(testDB, triedb.HashDefaults)) -) - -// The common prefix of all test chains: -var testChainBase *testChain - -// Different forks on top of the base chain: -var testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain - -var pregenerated bool - -func init() { - // Reduce some of the parameters to make the tester faster - FullMaxForkAncestry = 10000 - lightMaxForkAncestry = 10000 - blockCacheMaxItems = 1024 - fsHeaderSafetyNet = 256 - fsHeaderContCheck = 500 * time.Millisecond - - testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis) - - var forkLen = int(FullMaxForkAncestry + 50) - var wg sync.WaitGroup - - // Generate the test chains to seed the peers with - wg.Add(3) - go func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }() - go func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }() - go func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }() - wg.Wait() - - // Generate the test peers used by the tests to avoid overloading during testing. - // These seemingly random chains are used in various downloader tests. We're just - // pre-generating them here. - chains := []*testChain{ - testChainBase, - testChainForkLightA, - testChainForkLightB, - testChainForkHeavy, - testChainBase.shorten(1), - testChainBase.shorten(blockCacheMaxItems - 15), - testChainBase.shorten((blockCacheMaxItems - 15) / 2), - testChainBase.shorten(blockCacheMaxItems - 15 - 5), - testChainBase.shorten(MaxHeaderFetch), - testChainBase.shorten(800), - testChainBase.shorten(800 / 2), - testChainBase.shorten(800 / 3), - testChainBase.shorten(800 / 4), - testChainBase.shorten(800 / 5), - testChainBase.shorten(800 / 6), - testChainBase.shorten(800 / 7), - testChainBase.shorten(800 / 8), - testChainBase.shorten(3*fsHeaderSafetyNet + 256 + fsMinFullBlocks), - testChainBase.shorten(fsMinFullBlocks + 256 - 1), - testChainForkLightA.shorten(len(testChainBase.blocks) + 80), - testChainForkLightB.shorten(len(testChainBase.blocks) + 81), - testChainForkLightA.shorten(len(testChainBase.blocks) + MaxHeaderFetch), - testChainForkLightB.shorten(len(testChainBase.blocks) + MaxHeaderFetch), - testChainForkHeavy.shorten(len(testChainBase.blocks) + 79), - } - wg.Add(len(chains)) - for _, chain := range chains { - go func(blocks []*types.Block) { - newTestBlockchain(blocks) - wg.Done() - }(chain.blocks[1:]) - } - wg.Wait() - - // Mark the chains pregenerated. Generating a new one will lead to a panic. - pregenerated = true -} - -type testChain struct { - blocks []*types.Block -} - -// newTestChain creates a blockchain of the given length. -func newTestChain(length int, genesis *types.Block) *testChain { - tc := &testChain{ - blocks: []*types.Block{genesis}, - } - tc.generate(length-1, 0, genesis, false) - return tc -} - -// makeFork creates a fork on top of the test chain. -func (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain { - fork := tc.copy(len(tc.blocks) + length) - fork.generate(length, seed, tc.blocks[len(tc.blocks)-1], heavy) - return fork -} - -// shorten creates a copy of the chain with the given length. It panics if the -// length is longer than the number of available blocks. -func (tc *testChain) shorten(length int) *testChain { - if length > len(tc.blocks) { - panic(fmt.Errorf("can't shorten test chain to %d blocks, it's only %d blocks long", length, len(tc.blocks))) - } - return tc.copy(length) -} - -func (tc *testChain) copy(newlen int) *testChain { - if newlen > len(tc.blocks) { - newlen = len(tc.blocks) - } - cpy := &testChain{ - blocks: append([]*types.Block{}, tc.blocks[:newlen]...), - } - return cpy -} - -// generate creates a chain of n blocks starting at and including parent. -// the returned hash chain is ordered head->parent. In addition, every 22th block -// contains a transaction and every 5th an uncle to allow testing correct block -// reassembly. -func (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) { - blocks, _ := core.GenerateChain(testGspec.Config, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) { - block.SetCoinbase(common.Address{seed}) - // If a heavy chain is requested, delay blocks to raise difficulty - if heavy { - block.OffsetTime(-9) - } - // Include transactions to the miner to make blocks more interesting. - if parent == tc.blocks[0] && i%22 == 0 { - signer := types.MakeSigner(params.TestChainConfig, block.Number(), block.Timestamp()) - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) - if err != nil { - panic(err) - } - block.AddTx(tx) - } - // if the block number is a multiple of 5, add a bonus uncle to the block - if i > 0 && i%5 == 0 { - block.AddUncle(&types.Header{ - ParentHash: block.PrevBlock(i - 2).Hash(), - Number: big.NewInt(block.Number().Int64() - 1), - }) - } - }) - tc.blocks = append(tc.blocks, blocks...) -} - -var ( - testBlockchains = make(map[common.Hash]*testBlockchain) - testBlockchainsLock sync.Mutex -) - -type testBlockchain struct { - chain *core.BlockChain - gen sync.Once -} - -// newTestBlockchain creates a blockchain database built by running the given blocks, -// either actually running them, or reusing a previously created one. The returned -// chains are *shared*, so *do not* mutate them. -func newTestBlockchain(blocks []*types.Block) *core.BlockChain { - // Retrieve an existing database, or create a new one - head := testGenesis.Hash() - if len(blocks) > 0 { - head = blocks[len(blocks)-1].Hash() - } - testBlockchainsLock.Lock() - if _, ok := testBlockchains[head]; !ok { - testBlockchains[head] = new(testBlockchain) - } - tbc := testBlockchains[head] - testBlockchainsLock.Unlock() - - // Ensure that the database is generated - tbc.gen.Do(func() { - if pregenerated { - panic("Requested chain generation outside of init") - } - chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, testGspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if err != nil { - panic(err) - } - if n, err := chain.InsertChain(blocks); err != nil { - panic(fmt.Sprintf("block %d: %v", n, err)) - } - tbc.chain = chain - }) - return tbc.chain -} diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index a389a52fde..91845579bf 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -28,6 +28,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/consensus/parlia" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/blockarchiver" "github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/eth/downloader" @@ -49,29 +50,30 @@ var FullNodeGPO = gasprice.Config{ // Defaults contains default settings for use on the BSC main net. var Defaults = Config{ - SyncMode: downloader.SnapSync, - NetworkId: 0, // enable auto configuration of networkID == chainID - TxLookupLimit: 2350000, - TransactionHistory: 2350000, - StateHistory: params.FullImmutabilityThreshold, - LightPeers: 100, - DatabaseCache: 512, - TrieCleanCache: 154, - TrieDirtyCache: 256, - TrieTimeout: 60 * time.Minute, - TriesInMemory: 128, - TriesVerifyMode: core.LocalVerify, - SnapshotCache: 102, - DiffBlock: uint64(86400), - FilterLogCacheSize: 32, - Miner: miner.DefaultConfig, - TxPool: legacypool.DefaultConfig, - BlobPool: blobpool.DefaultConfig, - RPCGasCap: 50000000, - RPCEVMTimeout: 5 * time.Second, - GPO: FullNodeGPO, - RPCTxFeeCap: 1, // 1 ether - BlobExtraReserve: params.DefaultExtraReserveForBlobRequests, // Extra reserve threshold for blob, blob never expires when -1 is set, default 28800 + SyncMode: downloader.NoSync, + NetworkId: 0, // enable auto configuration of networkID == chainID + TxLookupLimit: 2350000, + TransactionHistory: 2350000, + StateHistory: params.FullImmutabilityThreshold, + LightPeers: 100, + DatabaseCache: 512, + TrieCleanCache: 154, + TrieDirtyCache: 256, + TrieTimeout: 60 * time.Minute, + TriesInMemory: 128, + TriesVerifyMode: core.LocalVerify, + SnapshotCache: 102, + DiffBlock: uint64(86400), + FilterLogCacheSize: 32, + Miner: miner.DefaultConfig, + TxPool: legacypool.DefaultConfig, + BlobPool: blobpool.DefaultConfig, + RPCGasCap: 50000000, + RPCEVMTimeout: 5 * time.Second, + GPO: FullNodeGPO, + RPCTxFeeCap: 1, // 1 ether + BlobExtraReserve: params.DefaultExtraReserveForBlobRequests, // Extra reserve threshold for blob, blob never expires when -1 is set, default 28800 + BlockArchiverConfig: blockarchiver.DefaultBlockArchiverConfig, } //go:generate go run github.com/fjl/gencodec -type Config -formats toml -out gen_config.go @@ -202,6 +204,9 @@ type Config struct { // blob setting BlobExtraReserve uint64 + + // block archive setting + BlockArchiverConfig blockarchiver.BlockArchiverConfig } // CreateConsensusEngine creates a consensus engine for the given chain config. diff --git a/eth/ethconfig/gen_config.go b/eth/ethconfig/gen_config.go index 26c4587fe4..9d55cf990f 100644 --- a/eth/ethconfig/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/blockarchiver" "github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/eth/downloader" @@ -75,6 +76,7 @@ func (c Config) MarshalTOML() (interface{}, error) { OverrideBohr *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"` BlobExtraReserve uint64 + BlockArchiverConfig blockarchiver.BlockArchiverConfig } var enc Config enc.Genesis = c.Genesis @@ -135,6 +137,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.OverrideBohr = c.OverrideBohr enc.OverrideVerkle = c.OverrideVerkle enc.BlobExtraReserve = c.BlobExtraReserve + enc.BlockArchiverConfig = c.BlockArchiverConfig return &enc, nil } @@ -199,6 +202,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { OverrideBohr *uint64 `toml:",omitempty"` OverrideVerkle *uint64 `toml:",omitempty"` BlobExtraReserve *uint64 + BlockArchiverConfig *blockarchiver.BlockArchiverConfig } var dec Config if err := unmarshal(&dec); err != nil { @@ -378,5 +382,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.BlobExtraReserve != nil { c.BlobExtraReserve = *dec.BlobExtraReserve } + if dec.BlockArchiverConfig != nil { + c.BlockArchiverConfig = *dec.BlockArchiverConfig + } return nil } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go deleted file mode 100644 index 59b6165863..0000000000 --- a/eth/filters/filter_test.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package filters - -import ( - "context" - "encoding/json" - "math/big" - "strings" - "testing" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/triedb" -) - -func makeReceipt(addr common.Address) *types.Receipt { - receipt := types.NewReceipt(nil, false, 0) - receipt.Logs = []*types.Log{ - {Address: addr}, - } - receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - return receipt -} - -func BenchmarkFilters(b *testing.B) { - var ( - db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false) - _, sys = newTestFilterSystem(b, db, Config{}) - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = common.BytesToAddress([]byte("jeff")) - addr3 = common.BytesToAddress([]byte("ethereum")) - addr4 = common.BytesToAddress([]byte("random addresses please")) - - gspec = &core.Genesis{ - Alloc: types.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, - BaseFee: big.NewInt(params.InitialBaseFee), - Config: params.TestChainConfig, - } - ) - defer db.Close() - _, chain, receipts := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), 100010, func(i int, gen *core.BlockGen) { - switch i { - case 2403: - receipt := makeReceipt(addr1) - gen.AddUncheckedReceipt(receipt) - gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) - case 1034: - receipt := makeReceipt(addr2) - gen.AddUncheckedReceipt(receipt) - gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) - case 34: - receipt := makeReceipt(addr3) - gen.AddUncheckedReceipt(receipt) - gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) - case 99999: - receipt := makeReceipt(addr4) - gen.AddUncheckedReceipt(receipt) - gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) - } - }) - // The test txs are not properly signed, can't simply create a chain - // and then import blocks. TODO(rjl493456442) try to get rid of the - // manual database writes. - gspec.MustCommit(db, triedb.NewDatabase(db, triedb.HashDefaults)) - - for i, block := range chain { - rawdb.WriteBlock(db, block) - rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) - rawdb.WriteHeadBlockHash(db, block.Hash()) - rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i]) - } - b.ResetTimer() - - filter := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil, false) - - for i := 0; i < b.N; i++ { - filter.begin = 0 - logs, _ := filter.Logs(context.Background()) - if len(logs) != 4 { - b.Fatal("expected 4 logs, got", len(logs)) - } - } -} - -func TestFilters(t *testing.T) { - var ( - db = rawdb.NewMemoryDatabase() - _, sys = newTestFilterSystem(t, db, Config{}) - // Sender account - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr = crypto.PubkeyToAddress(key1.PublicKey) - signer = types.NewLondonSigner(big.NewInt(1)) - // Logging contract - contract = common.Address{0xfe} - contract2 = common.Address{0xff} - abiStr = `[{"inputs":[],"name":"log0","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"}],"name":"log1","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"}],"name":"log2","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"}],"name":"log3","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"},{"internalType":"uint256","name":"t4","type":"uint256"}],"name":"log4","outputs":[],"stateMutability":"nonpayable","type":"function"}]` - /* - // SPDX-License-Identifier: GPL-3.0 - pragma solidity >=0.7.0 <0.9.0; - - contract Logger { - function log0() external { - assembly { - log0(0, 0) - } - } - - function log1(uint t1) external { - assembly { - log1(0, 0, t1) - } - } - - function log2(uint t1, uint t2) external { - assembly { - log2(0, 0, t1, t2) - } - } - - function log3(uint t1, uint t2, uint t3) external { - assembly { - log3(0, 0, t1, t2, t3) - } - } - - function log4(uint t1, uint t2, uint t3, uint t4) external { - assembly { - log4(0, 0, t1, t2, t3, t4) - } - } - } - */ - bytecode = common.FromHex("608060405234801561001057600080fd5b50600436106100575760003560e01c80630aa731851461005c5780632a4c08961461006657806378b9a1f314610082578063c670f8641461009e578063c683d6a3146100ba575b600080fd5b6100646100d6565b005b610080600480360381019061007b9190610143565b6100dc565b005b61009c60048036038101906100979190610196565b6100e8565b005b6100b860048036038101906100b391906101d6565b6100f2565b005b6100d460048036038101906100cf9190610203565b6100fa565b005b600080a0565b808284600080a3505050565b8082600080a25050565b80600080a150565b80828486600080a450505050565b600080fd5b6000819050919050565b6101208161010d565b811461012b57600080fd5b50565b60008135905061013d81610117565b92915050565b60008060006060848603121561015c5761015b610108565b5b600061016a8682870161012e565b935050602061017b8682870161012e565b925050604061018c8682870161012e565b9150509250925092565b600080604083850312156101ad576101ac610108565b5b60006101bb8582860161012e565b92505060206101cc8582860161012e565b9150509250929050565b6000602082840312156101ec576101eb610108565b5b60006101fa8482850161012e565b91505092915050565b6000806000806080858703121561021d5761021c610108565b5b600061022b8782880161012e565b945050602061023c8782880161012e565b935050604061024d8782880161012e565b925050606061025e8782880161012e565b9150509295919450925056fea264697066735822122073a4b156f487e59970dc1ef449cc0d51467268f676033a17188edafcee861f9864736f6c63430008110033") - - hash1 = common.BytesToHash([]byte("topic1")) - hash2 = common.BytesToHash([]byte("topic2")) - hash3 = common.BytesToHash([]byte("topic3")) - hash4 = common.BytesToHash([]byte("topic4")) - hash5 = common.BytesToHash([]byte("topic5")) - - gspec = &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - addr: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))}, - contract: {Balance: big.NewInt(0), Code: bytecode}, - contract2: {Balance: big.NewInt(0), Code: bytecode}, - }, - BaseFee: big.NewInt(params.InitialBaseFee), - } - ) - - contractABI, err := abi.JSON(strings.NewReader(abiStr)) - if err != nil { - t.Fatal(err) - } - - // Hack: GenerateChainWithGenesis creates a new db. - // Commit the genesis manually and use GenerateChain. - _, err = gspec.Commit(db, triedb.NewDatabase(db, nil)) - if err != nil { - t.Fatal(err) - } - chain, _ := core.GenerateChain(gspec.Config, gspec.ToBlock(), ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) { - switch i { - case 1: - data, err := contractABI.Pack("log1", hash1.Big()) - if err != nil { - t.Fatal(err) - } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 0, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract, - Data: data, - }), signer, key1) - gen.AddTx(tx) - tx2, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 1, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract2, - Data: data, - }), signer, key1) - gen.AddTx(tx2) - case 2: - data, err := contractABI.Pack("log2", hash2.Big(), hash1.Big()) - if err != nil { - t.Fatal(err) - } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 2, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract, - Data: data, - }), signer, key1) - gen.AddTx(tx) - case 998: - data, err := contractABI.Pack("log1", hash3.Big()) - if err != nil { - t.Fatal(err) - } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 3, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract2, - Data: data, - }), signer, key1) - gen.AddTx(tx) - case 999: - data, err := contractABI.Pack("log1", hash4.Big()) - if err != nil { - t.Fatal(err) - } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 4, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract, - Data: data, - }), signer, key1) - gen.AddTx(tx) - } - }) - var l uint64 - bc, err := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, &l) - if err != nil { - t.Fatal(err) - } - _, err = bc.InsertChain(chain) - if err != nil { - t.Fatal(err) - } - - // Set block 998 as Finalized (-3) - // bc.SetFinalized(chain[998].Header()) - - // Generate pending block - pchain, preceipts := core.GenerateChain(gspec.Config, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) { - data, err := contractABI.Pack("log1", hash5.Big()) - if err != nil { - t.Fatal(err) - } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 5, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract, - Data: data, - }), signer, key1) - gen.AddTx(tx) - }) - sys.backend.(*testBackend).pendingBlock = pchain[0] - sys.backend.(*testBackend).pendingReceipts = preceipts[0] - - for i, tc := range []struct { - f *Filter - want string - err string - }{ - { - f: sys.NewBlockFilter(chain[2].Hash(), []common.Address{contract}, nil), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}, false), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}, false), - }, - { - f: sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}, false), - want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}, false), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}, false), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xdba3e2ea9a7d690b722d70ee605fd67ba4c00d1d3aecd5cf187a7b92ad8eb3df","transactionIndex":"0x1","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x1","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}, false), - }, - { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil, false), - }, - { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}, false), - }, - { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, false), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, - }, - /* - { - f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, false), - want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil, false), - want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), - }, - { - f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), - err: "safe header not found", - }, - { - f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), - err: "safe header not found", - }, - { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), - err: "safe header not found", - }, - */ - { - f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil, false), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xd5e8d4e4eb51a2a2a6ec20ef68a4c2801240743c8deb77a6a1d118ac3eefb725","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil, false), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696335"],"data":"0x","blockNumber":"0x3e9","transactionHash":"0x4110587c1b8d86edc85dce929a34127f1cb8809515a9f177c91c866de3eb0638","transactionIndex":"0x0","blockHash":"0xd5e8d4e4eb51a2a2a6ec20ef68a4c2801240743c8deb77a6a1d118ac3eefb725","logIndex":"0x0","removed":false}]`, - }, - { - f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil, false), - err: errInvalidBlockRange.Error(), - }, - } { - logs, err := tc.f.Logs(context.Background()) - if err == nil && tc.err != "" { - t.Fatalf("test %d, expected error %q, got nil", i, tc.err) - } else if err != nil && err.Error() != tc.err { - t.Fatalf("test %d, expected error %q, got %q", i, tc.err, err.Error()) - } - if tc.want == "" && len(logs) == 0 { - continue - } - have, err := json.Marshal(logs) - if err != nil { - t.Fatal(err) - } - if string(have) != tc.want { - t.Fatalf("test %d, have:\n%s\nwant:\n%s", i, have, tc.want) - } - } - - t.Run("timeout", func(t *testing.T) { - f := sys.NewRangeFilter(0, -1, nil, nil, false) - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Hour)) - defer cancel() - _, err := f.Logs(ctx) - if err == nil { - t.Fatal("expected error") - } - if err != context.DeadlineExceeded { - t.Fatalf("expected context.DeadlineExceeded, got %v", err) - } - }) -} diff --git a/eth/gasprice/feehistory_test.go b/eth/gasprice/feehistory_test.go deleted file mode 100644 index 3b4257496c..0000000000 --- a/eth/gasprice/feehistory_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package gasprice - -import ( - "context" - "errors" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/rpc" -) - -func TestFeeHistory(t *testing.T) { - var cases = []struct { - pending bool - maxHeader, maxBlock uint64 - count uint64 - last rpc.BlockNumber - percent []float64 - expFirst uint64 - expCount int - expErr error - }{ - {false, 1000, 1000, 10, 30, nil, 21, 10, nil}, - {false, 1000, 1000, 10, 30, []float64{0, 10}, 21, 10, nil}, - {false, 1000, 1000, 10, 30, []float64{20, 10}, 0, 0, errInvalidPercentile}, - {false, 1000, 1000, 1000000000, 30, nil, 0, 31, nil}, - {false, 1000, 1000, 1000000000, rpc.LatestBlockNumber, nil, 0, 33, nil}, - {false, 1000, 1000, 10, 40, nil, 0, 0, errRequestBeyondHead}, - {true, 1000, 1000, 10, 40, nil, 0, 0, errRequestBeyondHead}, - {false, 20, 2, 100, rpc.LatestBlockNumber, nil, 13, 20, nil}, - {false, 20, 2, 100, rpc.LatestBlockNumber, []float64{0, 10}, 31, 2, nil}, - {false, 20, 2, 100, 32, []float64{0, 10}, 31, 2, nil}, - {false, 1000, 1000, 1, rpc.PendingBlockNumber, nil, 0, 0, nil}, - {false, 1000, 1000, 2, rpc.PendingBlockNumber, nil, 32, 1, nil}, - {true, 1000, 1000, 2, rpc.PendingBlockNumber, nil, 32, 2, nil}, - {true, 1000, 1000, 2, rpc.PendingBlockNumber, []float64{0, 10}, 32, 2, nil}, - // {false, 1000, 1000, 2, rpc.FinalizedBlockNumber, []float64{0, 10}, 24, 2, nil}, - // {false, 1000, 1000, 2, rpc.SafeBlockNumber, []float64{0, 10}, 24, 2, nil}, - } - for i, c := range cases { - config := Config{ - MaxHeaderHistory: c.maxHeader, - MaxBlockHistory: c.maxBlock, - } - backend := newTestBackend(t, big.NewInt(16), c.pending) - oracle := NewOracle(backend, config) - - first, reward, baseFee, ratio, err := oracle.FeeHistory(context.Background(), c.count, c.last, c.percent) - backend.teardown() - expReward := c.expCount - if len(c.percent) == 0 { - expReward = 0 - } - expBaseFee := c.expCount - if expBaseFee != 0 { - expBaseFee++ - } - - if first.Uint64() != c.expFirst { - t.Fatalf("Test case %d: first block mismatch, want %d, got %d", i, c.expFirst, first) - } - if len(reward) != expReward { - t.Fatalf("Test case %d: reward array length mismatch, want %d, got %d", i, expReward, len(reward)) - } - if len(baseFee) != expBaseFee { - t.Fatalf("Test case %d: baseFee array length mismatch, want %d, got %d", i, expBaseFee, len(baseFee)) - } - if len(ratio) != c.expCount { - t.Fatalf("Test case %d: gasUsedRatio array length mismatch, want %d, got %d", i, c.expCount, len(ratio)) - } - if err != c.expErr && !errors.Is(err, c.expErr) { - t.Fatalf("Test case %d: error mismatch, want %v, got %v", i, c.expErr, err) - } - } -} diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go deleted file mode 100644 index c6ce443a06..0000000000 --- a/eth/gasprice/gasprice_test.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package gasprice - -import ( - "context" - "errors" - "math" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" -) - -const testHead = 32 - -type testBackend struct { - chain *core.BlockChain - pending bool // pending block available -} - -func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - if number > testHead { - return nil, nil - } - if number == rpc.EarliestBlockNumber { - number = 0 - } - if number == rpc.FinalizedBlockNumber { - header := b.chain.CurrentFinalBlock() - if header == nil { - return nil, errors.New("finalized block not found") - } - number = rpc.BlockNumber(header.Number.Uint64()) - } - if number == rpc.SafeBlockNumber { - header := b.chain.CurrentSafeBlock() - if header == nil { - return nil, errors.New("safe block not found") - } - number = rpc.BlockNumber(header.Number.Uint64()) - } - if number == rpc.LatestBlockNumber { - number = testHead - } - if number == rpc.PendingBlockNumber { - if b.pending { - number = testHead + 1 - } else { - return nil, nil - } - } - return b.chain.GetHeaderByNumber(uint64(number)), nil -} - -func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { - if number > testHead { - return nil, nil - } - if number == rpc.EarliestBlockNumber { - number = 0 - } - if number == rpc.FinalizedBlockNumber { - number = rpc.BlockNumber(b.chain.CurrentFinalBlock().Number.Uint64()) - } - if number == rpc.SafeBlockNumber { - number = rpc.BlockNumber(b.chain.CurrentSafeBlock().Number.Uint64()) - } - if number == rpc.LatestBlockNumber { - number = testHead - } - if number == rpc.PendingBlockNumber { - if b.pending { - number = testHead + 1 - } else { - return nil, nil - } - } - return b.chain.GetBlockByNumber(uint64(number)), nil -} - -func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { - return b.chain.GetReceiptsByHash(hash), nil -} - -func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - if b.pending { - block := b.chain.GetBlockByNumber(testHead + 1) - return block, b.chain.GetReceiptsByHash(block.Hash()) - } - return nil, nil -} - -func (b *testBackend) ChainConfig() *params.ChainConfig { - return b.chain.Config() -} - -func (b *testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { - return nil -} - -func (b *testBackend) teardown() { - b.chain.Stop() -} - -// newTestBackend creates a test backend. OBS: don't forget to invoke tearDown -// after use, otherwise the blockchain instance will mem-leak via goroutines. -func newTestBackend(t *testing.T, londonBlock *big.Int, pending bool) *testBackend { - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr = crypto.PubkeyToAddress(key.PublicKey) - config = *params.TestChainConfig // needs copy because it is modified below - gspec = &core.Genesis{ - Config: &config, - Alloc: types.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}}, - } - signer = types.LatestSigner(gspec.Config) - ) - config.LondonBlock = londonBlock - config.ArrowGlacierBlock = londonBlock - config.GrayGlacierBlock = londonBlock - config.GibbsBlock = nil - config.LubanBlock = nil - config.PlatoBlock = nil - config.HertzBlock = nil - config.HertzfixBlock = nil - config.TerminalTotalDifficulty = common.Big0 - engine := ethash.NewFaker() - - // Generate testing blocks - _, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, testHead+1, func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - var txdata types.TxData - if londonBlock != nil && b.Number().Cmp(londonBlock) >= 0 { - txdata = &types.DynamicFeeTx{ - ChainID: gspec.Config.ChainID, - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: 30000, - GasFeeCap: big.NewInt(100 * params.GWei), - GasTipCap: big.NewInt(int64(i+1) * params.GWei), - Data: []byte{}, - } - } else { - txdata = &types.LegacyTx{ - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: 21000, - GasPrice: big.NewInt(int64(i+1) * params.GWei), - Value: big.NewInt(100), - Data: []byte{}, - } - } - b.AddTx(types.MustSignNewTx(key, signer, txdata)) - }) - // Construct testing chain - chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), &core.CacheConfig{TrieCleanNoPrefetch: true}, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to create local chain, %v", err) - } - _, err = chain.InsertChain(blocks) - if err != nil { - t.Fatalf("Failed to insert blocks, %v", err) - } - return &testBackend{chain: chain, pending: pending} -} - -func (b *testBackend) CurrentHeader() *types.Header { - return b.chain.CurrentHeader() -} - -func (b *testBackend) GetBlockByNumber(number uint64) *types.Block { - return b.chain.GetBlockByNumber(number) -} - -func TestSuggestTipCap(t *testing.T) { - config := Config{ - Blocks: 3, - Percentile: 60, - Default: big.NewInt(params.GWei), - } - var cases = []struct { - fork *big.Int // London fork number - expect *big.Int // Expected gasprice suggestion - }{ - {nil, big.NewInt(params.GWei * int64(30))}, - {big.NewInt(0), big.NewInt(params.GWei * int64(30))}, // Fork point in genesis - {big.NewInt(1), big.NewInt(params.GWei * int64(30))}, // Fork point in first block - {big.NewInt(32), big.NewInt(params.GWei * int64(30))}, // Fork point in last block - {big.NewInt(33), big.NewInt(params.GWei * int64(30))}, // Fork point in the future - } - for _, c := range cases { - backend := newTestBackend(t, c.fork, false) - oracle := NewOracle(backend, config) - - // The gas price sampled is: 32G, 31G, 30G, 29G, 28G, 27G - got, err := oracle.SuggestTipCap(context.Background()) - backend.teardown() - if err != nil { - t.Fatalf("Failed to retrieve recommended gas price: %v", err) - } - if got.Cmp(c.expect) != 0 { - t.Fatalf("Gas price mismatch, want %d, got %d", c.expect, got) - } - } -} diff --git a/eth/handler.go b/eth/handler.go index 23dba9e14d..118fb0b230 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -233,10 +233,6 @@ func newHandler(config *handlerConfig) (*handler, error) { log.Info("Enabled snap sync", "head", head.Number, "hash", head.Hash()) } } - // If snap sync is requested but snapshots are disabled, fail loudly - if h.snapSync.Load() && config.Chain.Snapshots() == nil { - return nil, errors.New("snap sync not supported with snapshots disabled") - } // Construct the downloader (long sync) h.downloader = downloader.New(config.Database, h.eventMux, h.chain, nil, h.removePeer, h.enableSyncedFeatures) @@ -416,23 +412,11 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { } defer h.decHandlers() - // If the peer has a `snap` extension, wait for it to connect so we can have - // a uniform initialization/teardown mechanism - snap, err := h.peers.waitSnapExtension(peer) - if err != nil { - peer.Log().Error("Snapshot extension barrier failed", "err", err) - return err - } trust, err := h.peers.waitTrustExtension(peer) if err != nil { peer.Log().Error("Trust extension barrier failed", "err", err) return err } - bsc, err := h.peers.waitBscExtension(peer) - if err != nil { - peer.Log().Error("Bsc extension barrier failed", "err", err) - return err - } // Execute the Ethereum handshake var ( @@ -448,16 +432,6 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { return err } reject := false // reserved peer slots - if h.snapSync.Load() { - if snap == nil { - // If we are running snap-sync, we want to reserve roughly half the peer - // slots for peers supporting the snap protocol. - // The logic here is; we only allow up to 5 more non-snap peers than snap-peers. - if all, snp := h.peers.len(), h.peers.snapLen(); all-snp > snp+5 { - reject = true - } - } - } // Ignore maxPeers if this is a trusted peer peerInfo := peer.Peer.Info() if !peerInfo.Network.Trusted { @@ -486,7 +460,7 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { peer.Log().Debug("Ethereum peer connected", "name", peer.Name()) // Register the peer locally - if err := h.peers.registerPeer(peer, snap, trust, bsc); err != nil { + if err := h.peers.registerPeer(peer, nil, trust, nil); err != nil { peer.Log().Error("Ethereum peer registration failed", "err", err) return err } @@ -496,72 +470,11 @@ func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { if p == nil { return errors.New("peer dropped during handling") } - // Register the peer in the downloader. If the downloader considers it banned, we disconnect - if err := h.downloader.RegisterPeer(peer.ID(), peer.Version(), peer); err != nil { - peer.Log().Error("Failed to register peer in eth syncer", "err", err) - return err - } - if snap != nil { - if err := h.downloader.SnapSyncer.Register(snap); err != nil { - peer.Log().Error("Failed to register peer in snap syncer", "err", err) - return err - } - } - h.chainSync.handlePeerEvent() - - // Propagate existing transactions and votes. new transactions and votes appearing - // after this will be sent via broadcasts. - h.syncTransactions(peer) - if h.votepool != nil && p.bscExt != nil { - h.syncVotes(p.bscExt) - } // Create a notification channel for pending requests if the peer goes down dead := make(chan struct{}) defer close(dead) - // If we have any explicit peer required block hashes, request them - for number, hash := range h.requiredBlocks { - resCh := make(chan *eth.Response) - - req, err := peer.RequestHeadersByNumber(number, 1, 0, false, resCh) - if err != nil { - return err - } - go func(number uint64, hash common.Hash, req *eth.Request) { - // Ensure the request gets cancelled in case of error/drop - defer req.Close() - - timeout := time.NewTimer(syncChallengeTimeout) - defer timeout.Stop() - - select { - case res := <-resCh: - headers := ([]*types.Header)(*res.Res.(*eth.BlockHeadersRequest)) - if len(headers) == 0 { - // Required blocks are allowed to be missing if the remote - // node is not yet synced - res.Done <- nil - return - } - // Validate the header and either drop the peer or continue - if len(headers) > 1 { - res.Done <- errors.New("too many headers in required block response") - return - } - if headers[0].Number.Uint64() != number || headers[0].Hash() != hash { - peer.Log().Info("Required block mismatch, dropping peer", "number", number, "hash", headers[0].Hash(), "want", hash) - res.Done <- errors.New("required block mismatch") - return - } - peer.Log().Debug("Peer required block verified", "number", number, "hash", hash) - res.Done <- nil - case <-timeout.C: - peer.Log().Warn("Required block challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) - h.removePeer(peer.ID()) - } - }(number, hash, req) - } // Handle incoming messages until the connection is torn down return handler(peer) } @@ -670,8 +583,6 @@ func (h *handler) unregisterPeer(id string) { if peer.snapExt != nil { h.downloader.SnapSyncer.Unregister(id) } - h.downloader.UnregisterPeer(id) - h.txFetcher.Drop(id) if err := h.peers.unregisterPeer(id); err != nil { logger.Error("Ethereum peer removal failed", "err", err) @@ -702,39 +613,6 @@ func (h *handler) unregisterPeer(id string) { func (h *handler) Start(maxPeers int, maxPeersPerIP int) { h.maxPeers = maxPeers h.maxPeersPerIP = maxPeersPerIP - // broadcast and announce transactions (only new ones, not resurrected ones) - h.wg.Add(1) - h.txsCh = make(chan core.NewTxsEvent, txChanSize) - h.txsSub = h.txpool.SubscribeTransactions(h.txsCh, false) - go h.txBroadcastLoop() - - // broadcast votes - if h.votepool != nil { - h.wg.Add(1) - h.voteCh = make(chan core.NewVoteEvent, voteChanSize) - h.votesSub = h.votepool.SubscribeNewVoteEvent(h.voteCh) - go h.voteBroadcastLoop() - - if h.maliciousVoteMonitor != nil { - h.wg.Add(1) - go h.startMaliciousVoteMonitor() - } - } - - // announce local pending transactions again - h.wg.Add(1) - h.reannoTxsCh = make(chan core.ReannoTxsEvent, txChanSize) - h.reannoTxsSub = h.txpool.SubscribeReannoTxsEvent(h.reannoTxsCh) - go h.txReannounceLoop() - - // broadcast mined blocks - h.wg.Add(1) - h.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{}) - go h.minedBroadcastLoop() - - // start sync handlers - h.wg.Add(1) - go h.chainSync.loop() // start peer handler tracker h.wg.Add(1) diff --git a/eth/handler_bsc_test.go b/eth/handler_bsc_test.go deleted file mode 100644 index 076b08c213..0000000000 --- a/eth/handler_bsc_test.go +++ /dev/null @@ -1,259 +0,0 @@ -package eth - -import ( - "fmt" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/protocols/bsc" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -type testBscHandler struct { - voteBroadcasts event.Feed -} - -func (h *testBscHandler) Chain() *core.BlockChain { panic("no backing chain") } -func (h *testBscHandler) RunPeer(peer *bsc.Peer, handler bsc.Handler) error { - panic("not used in tests") -} -func (h *testBscHandler) PeerInfo(enode.ID) interface{} { panic("not used in tests") } -func (h *testBscHandler) Handle(peer *bsc.Peer, packet bsc.Packet) error { - switch packet := packet.(type) { - case *bsc.VotesPacket: - h.voteBroadcasts.Send(packet.Votes) - return nil - - default: - panic(fmt.Sprintf("unexpected bsc packet type in tests: %T", packet)) - } -} - -func TestSendVotes68(t *testing.T) { testSendVotes(t, eth.ETH68) } - -func testSendVotes(t *testing.T, protocol uint) { - t.Parallel() - - // Create a message handler and fill the pool with big votes - handler := newTestHandler() - defer handler.close() - - insert := make([]*types.VoteEnvelope, 100) - for index := range insert { - vote := types.VoteEnvelope{ - VoteAddress: types.BLSPublicKey{}, - Signature: types.BLSSignature{}, - Data: &types.VoteData{ - SourceNumber: uint64(0), - SourceHash: common.BytesToHash(common.Hex2Bytes(string(rune(0)))), - TargetNumber: uint64(index), - TargetHash: common.BytesToHash(common.Hex2Bytes(string(rune(index)))), - }, - } - insert[index] = &vote - go handler.votepool.PutVote(&vote) - } - time.Sleep(250 * time.Millisecond) // Wait until vote events get out of the system (can't use events, vote broadcaster races with peer join) - - protos := []p2p.Protocol{ - { - Name: "eth", - Version: eth.ETH68, - }, - { - Name: "bsc", - Version: bsc.Bsc1, - }, - } - caps := []p2p.Cap{ - { - Name: "eth", - Version: eth.ETH68, - }, - { - Name: "bsc", - Version: bsc.Bsc1, - }, - } - - // Create a source handler to send messages through and a sink peer to receive them - p2pEthSrc, p2pEthSink := p2p.MsgPipe() - defer p2pEthSrc.Close() - defer p2pEthSink.Close() - - localEth := eth.NewPeer(protocol, p2p.NewPeerWithProtocols(enode.ID{1}, protos, "", caps), p2pEthSrc, nil) - remoteEth := eth.NewPeer(protocol, p2p.NewPeerWithProtocols(enode.ID{2}, protos, "", caps), p2pEthSink, nil) - defer localEth.Close() - defer remoteEth.Close() - - p2pBscSrc, p2pBscSink := p2p.MsgPipe() - defer p2pBscSrc.Close() - defer p2pBscSink.Close() - - localBsc := bsc.NewPeer(bsc.Bsc1, p2p.NewPeerWithProtocols(enode.ID{1}, protos, "", caps), p2pBscSrc) - remoteBsc := bsc.NewPeer(bsc.Bsc1, p2p.NewPeerWithProtocols(enode.ID{3}, protos, "", caps), p2pBscSink) - defer localBsc.Close() - defer remoteBsc.Close() - - go func(p *bsc.Peer) { - (*bscHandler)(handler.handler).RunPeer(p, func(peer *bsc.Peer) error { - return bsc.Handle((*bscHandler)(handler.handler), peer) - }) - }(localBsc) - - time.Sleep(200 * time.Millisecond) - remoteBsc.Handshake() - - time.Sleep(200 * time.Millisecond) - go func(p *eth.Peer) { - handler.handler.runEthPeer(p, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(handler.handler), peer) - }) - }(localEth) - - // Run the handshake locally to avoid spinning up a source handler - var ( - genesis = handler.chain.Genesis() - head = handler.chain.CurrentBlock() - td = handler.chain.GetTd(head.Hash(), head.Number.Uint64()) - ) - time.Sleep(200 * time.Millisecond) - if err := remoteEth.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain), nil); err != nil { - t.Fatalf("failed to run protocol handshake: %d", err) - } - // After the handshake completes, the source handler should stream the sink - // the votes, subscribe to all inbound network events - backend := new(testBscHandler) - bcasts := make(chan []*types.VoteEnvelope) - bcastSub := backend.voteBroadcasts.Subscribe(bcasts) - defer bcastSub.Unsubscribe() - - go bsc.Handle(backend, remoteBsc) - - // Make sure we get all the votes on the correct channels - seen := make(map[common.Hash]struct{}) - for len(seen) < len(insert) { - votes := <-bcasts - for _, vote := range votes { - if _, ok := seen[vote.Hash()]; ok { - t.Errorf("duplicate vote broadcast: %x", vote.Hash()) - } - seen[vote.Hash()] = struct{}{} - } - } - for _, vote := range insert { - if _, ok := seen[vote.Hash()]; !ok { - t.Errorf("missing vote: %x", vote.Hash()) - } - } -} - -func TestRecvVotes68(t *testing.T) { testRecvVotes(t, eth.ETH68) } - -func testRecvVotes(t *testing.T, protocol uint) { - t.Parallel() - - // Create a message handler and fill the pool with big votes - handler := newTestHandler() - defer handler.close() - - protos := []p2p.Protocol{ - { - Name: "eth", - Version: eth.ETH68, - }, - { - Name: "bsc", - Version: bsc.Bsc1, - }, - } - caps := []p2p.Cap{ - { - Name: "eth", - Version: eth.ETH68, - }, - { - Name: "bsc", - Version: bsc.Bsc1, - }, - } - - // Create a source handler to send messages through and a sink peer to receive them - p2pEthSrc, p2pEthSink := p2p.MsgPipe() - defer p2pEthSrc.Close() - defer p2pEthSink.Close() - - localEth := eth.NewPeer(protocol, p2p.NewPeerWithProtocols(enode.ID{1}, protos, "", caps), p2pEthSrc, nil) - remoteEth := eth.NewPeer(protocol, p2p.NewPeerWithProtocols(enode.ID{2}, protos, "", caps), p2pEthSink, nil) - defer localEth.Close() - defer remoteEth.Close() - - p2pBscSrc, p2pBscSink := p2p.MsgPipe() - defer p2pBscSrc.Close() - defer p2pBscSink.Close() - - localBsc := bsc.NewPeer(bsc.Bsc1, p2p.NewPeerWithProtocols(enode.ID{1}, protos, "", caps), p2pBscSrc) - remoteBsc := bsc.NewPeer(bsc.Bsc1, p2p.NewPeerWithProtocols(enode.ID{3}, protos, "", caps), p2pBscSink) - defer localBsc.Close() - defer remoteBsc.Close() - - go func(p *bsc.Peer) { - (*bscHandler)(handler.handler).RunPeer(p, func(peer *bsc.Peer) error { - return bsc.Handle((*bscHandler)(handler.handler), peer) - }) - }(localBsc) - - time.Sleep(200 * time.Millisecond) - remoteBsc.Handshake() - - time.Sleep(200 * time.Millisecond) - go func(p *eth.Peer) { - handler.handler.runEthPeer(p, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(handler.handler), peer) - }) - }(localEth) - - // Run the handshake locally to avoid spinning up a source handler - var ( - genesis = handler.chain.Genesis() - head = handler.chain.CurrentBlock() - td = handler.chain.GetTd(head.Hash(), head.Number.Uint64()) - ) - time.Sleep(200 * time.Millisecond) - if err := remoteEth.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain), nil); err != nil { - t.Fatalf("failed to run protocol handshake: %d", err) - } - - votesCh := make(chan core.NewVoteEvent) - sub := handler.votepool.SubscribeNewVoteEvent(votesCh) - defer sub.Unsubscribe() - // Send the vote to the sink and verify that it's added to the vote pool - vote := types.VoteEnvelope{ - VoteAddress: types.BLSPublicKey{}, - Signature: types.BLSSignature{}, - Data: &types.VoteData{ - SourceNumber: uint64(0), - SourceHash: common.BytesToHash(common.Hex2Bytes(string(rune(0)))), - TargetNumber: uint64(1), - TargetHash: common.BytesToHash(common.Hex2Bytes(string(rune(1)))), - }, - } - - remoteBsc.AsyncSendVotes([]*types.VoteEnvelope{&vote}) - time.Sleep(100 * time.Millisecond) - select { - case event := <-votesCh: - if event.Vote.Hash() != vote.Hash() { - t.Errorf("added wrong vote hash: got %v, want %v", event.Vote.Hash(), vote.Hash()) - } - case <-time.After(2 * time.Second): - t.Errorf("no NewVotesEvent received within 2 seconds") - } -} diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go deleted file mode 100644 index c35bf0f4ce..0000000000 --- a/eth/handler_eth_test.go +++ /dev/null @@ -1,855 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - "fmt" - "math/big" - "strconv" - "sync" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/eth/protocols/bsc" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/params" -) - -// testEthHandler is a mock event handler to listen for inbound network requests -// on the `eth` protocol and convert them into a more easily testable form. -type testEthHandler struct { - blockBroadcasts event.Feed - txAnnounces event.Feed - txBroadcasts event.Feed -} - -func (h *testEthHandler) Chain() *core.BlockChain { panic("no backing chain") } -func (h *testEthHandler) TxPool() eth.TxPool { panic("no backing tx pool") } -func (h *testEthHandler) AcceptTxs() bool { return true } -func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") } -func (h *testEthHandler) PeerInfo(enode.ID) interface{} { panic("not used in tests") } - -func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { - switch packet := packet.(type) { - case *eth.NewBlockPacket: - h.blockBroadcasts.Send(packet.Block) - return nil - - case *eth.NewPooledTransactionHashesPacket: - h.txAnnounces.Send(packet.Hashes) - return nil - - case *eth.TransactionsPacket: - h.txBroadcasts.Send(([]*types.Transaction)(*packet)) - return nil - - case *eth.PooledTransactionsResponse: - h.txBroadcasts.Send(([]*types.Transaction)(*packet)) - return nil - - default: - panic(fmt.Sprintf("unexpected eth packet type in tests: %T", packet)) - } -} - -// Tests that peers are correctly accepted (or rejected) based on the advertised -// fork IDs in the protocol handshake. -func TestForkIDSplit68(t *testing.T) { testForkIDSplit(t, eth.ETH68) } - -func testForkIDSplit(t *testing.T, protocol uint) { - t.Parallel() - - var ( - engine = ethash.NewFaker() - - configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)} - configProFork = ¶ms.ChainConfig{ - HomesteadBlock: big.NewInt(1), - EIP150Block: big.NewInt(2), - EIP155Block: big.NewInt(2), - EIP158Block: big.NewInt(2), - ByzantiumBlock: big.NewInt(3), - ConstantinopleBlock: big.NewInt(4), - PetersburgBlock: big.NewInt(4), - IstanbulBlock: big.NewInt(4), - MuirGlacierBlock: big.NewInt(4), - RamanujanBlock: big.NewInt(4), - NielsBlock: big.NewInt(4), - MirrorSyncBlock: big.NewInt(4), - BrunoBlock: big.NewInt(4), - EulerBlock: big.NewInt(5), - GibbsBlock: big.NewInt(5), - NanoBlock: big.NewInt(5), - MoranBlock: big.NewInt(5), - LubanBlock: big.NewInt(6), - PlatoBlock: big.NewInt(6), - HertzBlock: big.NewInt(7), - } - dbNoFork = rawdb.NewMemoryDatabase() - dbProFork = rawdb.NewMemoryDatabase() - - gspecNoFork = &core.Genesis{Config: configNoFork} - gspecProFork = &core.Genesis{Config: configProFork} - - chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, gspecNoFork, nil, engine, vm.Config{}, nil, nil) - chainProFork, _ = core.NewBlockChain(dbProFork, nil, gspecProFork, nil, engine, vm.Config{}, nil, nil) - - _, blocksNoFork, _ = core.GenerateChainWithGenesis(gspecNoFork, engine, 2, nil) - _, blocksProFork, _ = core.GenerateChainWithGenesis(gspecProFork, engine, 2, nil) - - ethNoFork, _ = newHandler(&handlerConfig{ - Database: dbNoFork, - Chain: chainNoFork, - TxPool: newTestTxPool(), - VotePool: newTestVotePool(), - Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), - Network: 1, - Sync: downloader.FullSync, - BloomCache: 1, - }) - ethProFork, _ = newHandler(&handlerConfig{ - Database: dbProFork, - Chain: chainProFork, - TxPool: newTestTxPool(), - VotePool: newTestVotePool(), - Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), - Network: 1, - Sync: downloader.FullSync, - BloomCache: 1, - }) - ) - ethNoFork.Start(1000, 1000) - ethProFork.Start(1000, 1000) - - // Clean up everything after ourselves - defer chainNoFork.Stop() - defer chainProFork.Stop() - - defer ethNoFork.Stop() - defer ethProFork.Stop() - - // Both nodes should allow the other to connect (same genesis, next fork is the same) - p2pNoFork, p2pProFork := p2p.MsgPipe() - defer p2pNoFork.Close() - defer p2pProFork.Close() - - peerNoFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) - peerProFork := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) - defer peerNoFork.Close() - defer peerProFork.Close() - - errc := make(chan error, 2) - go func(errc chan error) { - errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) - }(errc) - go func(errc chan error) { - errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) - }(errc) - - for i := 0; i < 2; i++ { - select { - case err := <-errc: - if err != nil { - t.Fatalf("frontier nofork <-> profork failed: %v", err) - } - case <-time.After(250 * time.Millisecond): - t.Fatalf("frontier nofork <-> profork handler timeout") - } - } - // Progress into Homestead. Fork's match, so we don't care what the future holds - chainNoFork.InsertChain(blocksNoFork[:1]) - chainProFork.InsertChain(blocksProFork[:1]) - - p2pNoFork, p2pProFork = p2p.MsgPipe() - defer p2pNoFork.Close() - defer p2pProFork.Close() - - peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) - peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) - defer peerNoFork.Close() - defer peerProFork.Close() - - errc = make(chan error, 2) - go func(errc chan error) { - errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) - }(errc) - go func(errc chan error) { - errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) - }(errc) - - for i := 0; i < 2; i++ { - select { - case err := <-errc: - if err != nil { - t.Fatalf("homestead nofork <-> profork failed: %v", err) - } - case <-time.After(250 * time.Millisecond): - t.Fatalf("homestead nofork <-> profork handler timeout") - } - } - // Progress into Spurious. Forks mismatch, signalling differing chains, reject - chainNoFork.InsertChain(blocksNoFork[1:2]) - chainProFork.InsertChain(blocksProFork[1:2]) - - p2pNoFork, p2pProFork = p2p.MsgPipe() - defer p2pNoFork.Close() - defer p2pProFork.Close() - - peerNoFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pNoFork), p2pNoFork, nil) - peerProFork = eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pProFork), p2pProFork, nil) - defer peerNoFork.Close() - defer peerProFork.Close() - - errc = make(chan error, 2) - go func(errc chan error) { - errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) - }(errc) - go func(errc chan error) { - errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) - }(errc) - - var successes int - for i := 0; i < 2; i++ { - select { - case err := <-errc: - if err == nil { - successes++ - if successes == 2 { // Only one side disconnects - t.Fatalf("fork ID rejection didn't happen") - } - } - case <-time.After(10000 * time.Millisecond): - t.Fatalf("split peers not rejected") - } - } -} - -// Tests that received transactions are added to the local pool. -func TestRecvTransactions68(t *testing.T) { testRecvTransactions(t, eth.ETH68) } - -func testRecvTransactions(t *testing.T, protocol uint) { - t.Parallel() - - // Create a message handler, configure it to accept transactions and watch them - handler := newTestHandler() - defer handler.close() - - handler.handler.synced.Store(true) // mark synced to accept transactions - - txs := make(chan core.NewTxsEvent) - sub := handler.txpool.SubscribeTransactions(txs, false) - defer sub.Unsubscribe() - - // Create a source peer to send messages through and a sink handler to receive them - p2pSrc, p2pSink := p2p.MsgPipe() - defer p2pSrc.Close() - defer p2pSink.Close() - - src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) - sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) - defer src.Close() - defer sink.Close() - - go handler.handler.runEthPeer(sink, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(handler.handler), peer) - }) - // Run the handshake locally to avoid spinning up a source handler - var ( - genesis = handler.chain.Genesis() - head = handler.chain.CurrentBlock() - td = handler.chain.GetTd(head.Hash(), head.Number.Uint64()) - ) - if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain), nil); err != nil { - t.Fatalf("failed to run protocol handshake") - } - // Send the transaction to the sink and verify that it's added to the tx pool - tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) - tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) - - if err := src.SendTransactions([]*types.Transaction{tx}); err != nil { - t.Fatalf("failed to send transaction: %v", err) - } - select { - case event := <-txs: - if len(event.Txs) != 1 { - t.Errorf("wrong number of added transactions: got %d, want 1", len(event.Txs)) - } else if event.Txs[0].Hash() != tx.Hash() { - t.Errorf("added wrong tx hash: got %v, want %v", event.Txs[0].Hash(), tx.Hash()) - } - case <-time.After(2 * time.Second): - t.Errorf("no NewTxsEvent received within 2 seconds") - } -} - -func TestWaitSnapExtensionTimout68(t *testing.T) { testWaitSnapExtensionTimout(t, eth.ETH68) } - -func testWaitSnapExtensionTimout(t *testing.T, protocol uint) { - t.Parallel() - - // Create a message handler, configure it to accept transactions and watch them - handler := newTestHandler() - defer handler.close() - - // Create a source peer to send messages through and a sink handler to receive them - _, p2pSink := p2p.MsgPipe() - defer p2pSink.Close() - - protos := []p2p.Protocol{ - { - Name: "snap", - Version: 1, - }, - } - - sink := eth.NewPeer(protocol, p2p.NewPeerWithProtocols(enode.ID{2}, protos, "", []p2p.Cap{ - { - Name: "snap", - Version: 1, - }, - }), p2pSink, nil) - defer sink.Close() - - err := handler.handler.runEthPeer(sink, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(handler.handler), peer) - }) - - if err == nil || err.Error() != "peer wait timeout" { - t.Fatalf("error should be `peer wait timeout`") - } -} - -func TestWaitBscExtensionTimout68(t *testing.T) { testWaitBscExtensionTimout(t, eth.ETH68) } - -func testWaitBscExtensionTimout(t *testing.T, protocol uint) { - t.Parallel() - - // Create a message handler, configure it to accept transactions and watch them - handler := newTestHandler() - defer handler.close() - - // Create a source peer to send messages through and a sink handler to receive them - _, p2pSink := p2p.MsgPipe() - defer p2pSink.Close() - - protos := []p2p.Protocol{ - { - Name: "bsc", - Version: bsc.Bsc1, - }, - } - - sink := eth.NewPeer(protocol, p2p.NewPeerWithProtocols(enode.ID{2}, protos, "", []p2p.Cap{ - { - Name: "bsc", - Version: bsc.Bsc1, - }, - }), p2pSink, nil) - defer sink.Close() - - err := handler.handler.runEthPeer(sink, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(handler.handler), peer) - }) - - if err == nil || err.Error() != "peer wait timeout" { - t.Fatalf("error should be `peer wait timeout`") - } -} - -// This test checks that pending transactions are sent. -func TestSendTransactions68(t *testing.T) { testSendTransactions(t, eth.ETH68) } - -func testSendTransactions(t *testing.T, protocol uint) { - t.Parallel() - - // Create a message handler and fill the pool with big transactions - handler := newTestHandler() - defer handler.close() - - insert := make([]*types.Transaction, 100) - for nonce := range insert { - tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, 10240)) - tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) - insert[nonce] = tx - } - go handler.txpool.Add(insert, false, false) // Need goroutine to not block on feed - time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) - - // Create a source handler to send messages through and a sink peer to receive them - p2pSrc, p2pSink := p2p.MsgPipe() - defer p2pSrc.Close() - defer p2pSink.Close() - - src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, handler.txpool) - sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, handler.txpool) - defer src.Close() - defer sink.Close() - - go handler.handler.runEthPeer(src, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(handler.handler), peer) - }) - // Run the handshake locally to avoid spinning up a source handler - var ( - genesis = handler.chain.Genesis() - head = handler.chain.CurrentBlock() - td = handler.chain.GetTd(head.Hash(), head.Number.Uint64()) - ) - if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain), nil); err != nil { - t.Fatalf("failed to run protocol handshake") - } - // After the handshake completes, the source handler should stream the sink - // the transactions, subscribe to all inbound network events - backend := new(testEthHandler) - - anns := make(chan []common.Hash) - annSub := backend.txAnnounces.Subscribe(anns) - defer annSub.Unsubscribe() - - bcasts := make(chan []*types.Transaction) - bcastSub := backend.txBroadcasts.Subscribe(bcasts) - defer bcastSub.Unsubscribe() - - go eth.Handle(backend, sink) - - // Make sure we get all the transactions on the correct channels - seen := make(map[common.Hash]struct{}) - for len(seen) < len(insert) { - switch protocol { - case 68: - select { - case hashes := <-anns: - for _, hash := range hashes { - if _, ok := seen[hash]; ok { - t.Errorf("duplicate transaction announced: %x", hash) - } - seen[hash] = struct{}{} - } - case <-bcasts: - t.Errorf("initial tx broadcast received on post eth/66") - } - - default: - panic("unsupported protocol, please extend test") - } - } - for _, tx := range insert { - if _, ok := seen[tx.Hash()]; !ok { - t.Errorf("missing transaction: %x", tx.Hash()) - } - } -} - -// Tests that transactions get propagated to all attached peers, either via direct -// broadcasts or via announcements/retrievals. -func TestTransactionPropagation68(t *testing.T) { testTransactionPropagation(t, eth.ETH68) } - -func testTransactionPropagation(t *testing.T, protocol uint) { - t.Parallel() - - // Create a source handler to send transactions from and a number of sinks - // to receive them. We need multiple sinks since a one-to-one peering would - // broadcast all transactions without announcement. - source := newTestHandler() - source.handler.snapSync.Store(false) // Avoid requiring snap, otherwise some will be dropped below - defer source.close() - - sinks := make([]*testHandler, 10) - for i := 0; i < len(sinks); i++ { - sinks[i] = newTestHandler() - defer sinks[i].close() - - sinks[i].handler.synced.Store(true) // mark synced to accept transactions - } - // Interconnect all the sink handlers with the source handler - for i, sink := range sinks { - sink := sink // Closure for gorotuine below - - sourcePipe, sinkPipe := p2p.MsgPipe() - defer sourcePipe.Close() - defer sinkPipe.Close() - - sourcePeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{byte(i + 1)}, "", nil, sourcePipe), sourcePipe, source.txpool) - sinkPeer := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, sink.txpool) - defer sourcePeer.Close() - defer sinkPeer.Close() - - go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(source.handler), peer) - }) - go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(sink.handler), peer) - }) - } - // Subscribe to all the transaction pools - txChs := make([]chan core.NewTxsEvent, len(sinks)) - for i := 0; i < len(sinks); i++ { - txChs[i] = make(chan core.NewTxsEvent, 1024) - - sub := sinks[i].txpool.SubscribeTransactions(txChs[i], false) - defer sub.Unsubscribe() - } - // Fill the source pool with transactions and wait for them at the sinks - txs := make([]*types.Transaction, 1024) - for nonce := range txs { - tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) - tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) - txs[nonce] = tx - } - source.txpool.Add(txs, false, false) - - // Iterate through all the sinks and ensure they all got the transactions - for i := range sinks { - for arrived, timeout := 0, false; arrived < len(txs) && !timeout; { - select { - case event := <-txChs[i]: - arrived += len(event.Txs) - case <-time.After(2 * time.Second): - t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs)) - timeout = true - } - } - } -} - -// Tests that local pending transactions get propagated to peers. -func TestTransactionPendingReannounce(t *testing.T) { - t.Parallel() - - // Create a source handler to announce transactions from and a sink handler - // to receive them. - source := newTestHandler() - defer source.close() - - sink := newTestHandler() - defer sink.close() - sink.handler.synced.Store(true) // mark synced to accept transactions - - sourcePipe, sinkPipe := p2p.MsgPipe() - defer sourcePipe.Close() - defer sinkPipe.Close() - - sourcePeer := eth.NewPeer(eth.ETH68, p2p.NewPeer(enode.ID{0}, "", nil), sourcePipe, source.txpool) - sinkPeer := eth.NewPeer(eth.ETH68, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, sink.txpool) - defer sourcePeer.Close() - defer sinkPeer.Close() - - go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(source.handler), peer) - }) - go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(sink.handler), peer) - }) - - // Subscribe transaction pools - txCh := make(chan core.NewTxsEvent, 1024) - sub := sink.txpool.SubscribeTransactions(txCh, false) - defer sub.Unsubscribe() - - txs := make([]*types.Transaction, 64) - for nonce := range txs { - tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) - tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) - - txs[nonce] = tx - } - source.txpool.ReannouceTransactions(txs) - - for arrived := 0; arrived < len(txs); { - select { - case event := <-txCh: - arrived += len(event.Txs) - case <-time.NewTimer(time.Second).C: - t.Errorf("sink: transaction propagation timed out: have %d, want %d", arrived, len(txs)) - } - } -} - -// Tests that blocks are broadcast to a sqrt number of peers only. -func TestBroadcastBlock1Peer(t *testing.T) { testBroadcastBlock(t, 1, 1) } -func TestBroadcastBlock2Peers(t *testing.T) { testBroadcastBlock(t, 2, 1) } -func TestBroadcastBlock3Peers(t *testing.T) { testBroadcastBlock(t, 3, 1) } -func TestBroadcastBlock4Peers(t *testing.T) { testBroadcastBlock(t, 4, 2) } -func TestBroadcastBlock5Peers(t *testing.T) { testBroadcastBlock(t, 5, 2) } -func TestBroadcastBlock8Peers(t *testing.T) { testBroadcastBlock(t, 9, 3) } -func TestBroadcastBlock12Peers(t *testing.T) { testBroadcastBlock(t, 12, 3) } -func TestBroadcastBlock16Peers(t *testing.T) { testBroadcastBlock(t, 16, 4) } -func TestBroadcastBloc26Peers(t *testing.T) { testBroadcastBlock(t, 26, 5) } -func TestBroadcastBlock100Peers(t *testing.T) { testBroadcastBlock(t, 100, 10) } - -func testBroadcastBlock(t *testing.T, peers, bcasts int) { - t.Parallel() - - // Create a source handler to broadcast blocks from and a number of sinks - // to receive them. - source := newTestHandlerWithBlocks(1) - defer source.close() - - sinks := make([]*testEthHandler, peers) - for i := 0; i < len(sinks); i++ { - sinks[i] = new(testEthHandler) - } - // Interconnect all the sink handlers with the source handler - var ( - genesis = source.chain.Genesis() - td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) - ) - for i, sink := range sinks { - sink := sink // Closure for gorotuine below - - sourcePipe, sinkPipe := p2p.MsgPipe() - defer sourcePipe.Close() - defer sinkPipe.Close() - - sourcePeer := eth.NewPeer(eth.ETH68, p2p.NewPeerPipe(enode.ID{byte(i)}, "", nil, sourcePipe), sourcePipe, nil) - sinkPeer := eth.NewPeer(eth.ETH68, p2p.NewPeerPipe(enode.ID{0}, "", nil, sinkPipe), sinkPipe, nil) - defer sourcePeer.Close() - defer sinkPeer.Close() - - go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(source.handler), peer) - }) - // Wait a bit for the above handlers to start - time.Sleep(100 * time.Millisecond) - - if err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain), nil); err != nil { - t.Fatalf("failed to run protocol handshake") - } - go eth.Handle(sink, sinkPeer) - } - // Subscribe to all the transaction pools - blockChs := make([]chan *types.Block, len(sinks)) - for i := 0; i < len(sinks); i++ { - blockChs[i] = make(chan *types.Block, 1) - defer close(blockChs[i]) - - sub := sinks[i].blockBroadcasts.Subscribe(blockChs[i]) - defer sub.Unsubscribe() - } - // Initiate a block propagation across the peers - time.Sleep(100 * time.Millisecond) - header := source.chain.CurrentBlock() - source.handler.BroadcastBlock(source.chain.GetBlock(header.Hash(), header.Number.Uint64()), true) - - // Iterate through all the sinks and ensure the correct number got the block - done := make(chan struct{}, peers) - for _, ch := range blockChs { - ch := ch - go func() { - <-ch - done <- struct{}{} - }() - } - var received int - for { - select { - case <-done: - received++ - - case <-time.After(100 * time.Millisecond): - if received != bcasts { - t.Errorf("broadcast count mismatch: have %d, want %d", received, bcasts) - } - return - } - } -} - -// Tests that a propagated malformed block (uncles or transactions don't match -// with the hashes in the header) gets discarded and not broadcast forward. -func TestBroadcastMalformedBlock68(t *testing.T) { testBroadcastMalformedBlock(t, eth.ETH68) } - -func testBroadcastMalformedBlock(t *testing.T, protocol uint) { - t.Parallel() - - // Create a source handler to broadcast blocks from and a number of sinks - // to receive them. - source := newTestHandlerWithBlocks(1) - defer source.close() - - // Create a source handler to send messages through and a sink peer to receive them - p2pSrc, p2pSink := p2p.MsgPipe() - defer p2pSrc.Close() - defer p2pSink.Close() - - src := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{1}, "", nil, p2pSrc), p2pSrc, source.txpool) - sink := eth.NewPeer(protocol, p2p.NewPeerPipe(enode.ID{2}, "", nil, p2pSink), p2pSink, source.txpool) - defer src.Close() - defer sink.Close() - - go source.handler.runEthPeer(src, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(source.handler), peer) - }) - // Run the handshake locally to avoid spinning up a sink handler - var ( - genesis = source.chain.Genesis() - td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) - ) - if err := sink.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain), nil); err != nil { - t.Fatalf("failed to run protocol handshake") - } - // After the handshake completes, the source handler should stream the sink - // the blocks, subscribe to inbound network events - backend := new(testEthHandler) - - blocks := make(chan *types.Block, 1) - sub := backend.blockBroadcasts.Subscribe(blocks) - defer sub.Unsubscribe() - - go eth.Handle(backend, sink) - - // Create various combinations of malformed blocks - head := source.chain.CurrentBlock() - block := source.chain.GetBlock(head.Hash(), head.Number.Uint64()) - - malformedUncles := head - malformedUncles.UncleHash[0]++ - malformedTransactions := head - malformedTransactions.TxHash[0]++ - malformedEverything := head - malformedEverything.UncleHash[0]++ - malformedEverything.TxHash[0]++ - - // Try to broadcast all malformations and ensure they all get discarded - for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} { - block := types.NewBlockWithHeader(header).WithBody(block.Transactions(), block.Uncles()) - if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil { - t.Fatalf("failed to broadcast block: %v", err) - } - select { - case <-blocks: - t.Fatalf("malformed block forwarded") - case <-time.After(100 * time.Millisecond): - } - } -} - -func TestOptionMaxPeersPerIP(t *testing.T) { - t.Parallel() - - handler := newTestHandler() - defer handler.close() - var ( - genesis = handler.chain.Genesis() - head = handler.chain.CurrentBlock() - td = handler.chain.GetTd(head.Hash(), head.Number.Uint64()) - wg = sync.WaitGroup{} - maxPeersPerIP = handler.handler.maxPeersPerIP - uniPort = 1000 - ) - - tryFunc := func(tryNum int, ip1 string, ip2 string, trust bool, doneCh chan struct{}) { - // Create a source peer to send messages through and a sink handler to receive them - p2pSrc, p2pSink := p2p.MsgPipe() - defer p2pSrc.Close() - defer p2pSink.Close() - - peer1 := p2p.NewPeerPipe(enode.ID{0}, "", nil, p2pSrc) - peer1.UpdateTestRemoteAddr(ip1 + strconv.Itoa(uniPort)) - peer2 := p2p.NewPeerPipe(enode.ID{byte(uniPort)}, "", nil, p2pSink) - peer2.UpdateTestRemoteAddr(ip2 + strconv.Itoa(uniPort)) - if trust { - peer2.UpdateTrustFlagTest() - } - uniPort++ - - src := eth.NewPeer(eth.ETH68, peer1, p2pSrc, handler.txpool) - sink := eth.NewPeer(eth.ETH68, peer2, p2pSink, handler.txpool) - defer src.Close() - defer sink.Close() - - wg.Add(1) - time.Sleep(time.Duration((tryNum-1)*200) * time.Millisecond) - go func(num int) { - err := handler.handler.runEthPeer(sink, func(peer *eth.Peer) error { - wg.Done() - <-doneCh - return nil - }) - // err is nil, connection ok and it is closed by the doneCh - if err == nil { - if trust || num <= maxPeersPerIP { - return - } - // if num > maxPeersPerIP and not trust, should report: p2p.DiscTooManyPeers - t.Errorf("current num is %d, maxPeersPerIP is %d, should failed", num, maxPeersPerIP) - return - } - wg.Done() - if trust { - t.Errorf("trust node should not failed, num is %d, maxPeersPerIP is %d, but failed:%s", num, maxPeersPerIP, err) - } - // err should be p2p.DiscTooManyPeers and num > maxPeersPerIP - if err == p2p.DiscTooManyPeers && num > maxPeersPerIP { - return - } - - t.Errorf("current num is %d, maxPeersPerIP is %d, but failed:%s", num, maxPeersPerIP, err) - }(tryNum) - - if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain), nil); err != nil { - t.Fatalf("failed to run protocol handshake") - } - // make sure runEthPeer execute one by one. - wg.Wait() - } - - // case 1: normal case - doneCh1 := make(chan struct{}) - for tryNum := 1; tryNum <= maxPeersPerIP+2; tryNum++ { - tryFunc(tryNum, "1.2.3.11:", "1.2.3.22:", false, doneCh1) - } - close(doneCh1) - - // case 2: once the previous connection was unregisterred, new connections with same IP can be accepted. - doneCh2 := make(chan struct{}) - for tryNum := 1; tryNum <= maxPeersPerIP+2; tryNum++ { - tryFunc(tryNum, "1.2.3.11:", "1.2.3.22:", false, doneCh2) - } - close(doneCh2) - - // case 3: ipv6 address, like: [2001:db8::1]:80 - doneCh3 := make(chan struct{}) - for tryNum := 1; tryNum <= maxPeersPerIP+2; tryNum++ { - tryFunc(tryNum, "[2001:db8::11]:", "[2001:db8::22]:", false, doneCh3) - } - close(doneCh3) - - // case 4: same as case 2, but for ipv6 - doneCh4 := make(chan struct{}) - for tryNum := 1; tryNum <= maxPeersPerIP+2; tryNum++ { - tryFunc(tryNum, "[2001:db8::11]:", "[2001:db8::22]:", false, doneCh4) - } - close(doneCh4) - - // case 5: test trust node - doneCh5 := make(chan struct{}) - for tryNum := 1; tryNum <= maxPeersPerIP+2; tryNum++ { - tryFunc(tryNum, "[2001:db8::11]:", "[2001:db8::22]:", true, doneCh5) - } - close(doneCh5) -} diff --git a/eth/handler_test.go b/eth/handler_test.go deleted file mode 100644 index 5584afe857..0000000000 --- a/eth/handler_test.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - "crypto/ecdsa" - "math/big" - "sort" - "sync" - "testing" - - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/trie" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/txpool" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/params" - "github.com/holiman/uint256" -) - -var ( - // testKey is a private key to use for funding a tester account. - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - - // testAddr is the Ethereum address of the tester account. - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) -) - -// testTxPool is a mock transaction pool that blindly accepts all transactions. -// Its goal is to get around setting up a valid statedb for the balance and nonce -// checks. -type testTxPool struct { - pool map[common.Hash]*types.Transaction // Hash map of collected transactions - - txFeed event.Feed // Notification feed to allow waiting for inclusion - reannoTxFeed event.Feed // Notification feed to trigger reannouce - lock sync.RWMutex // Protects the transaction pool -} - -// newTestTxPool creates a mock transaction pool. -func newTestTxPool() *testTxPool { - return &testTxPool{ - pool: make(map[common.Hash]*types.Transaction), - } -} - -// Has returns an indicator whether txpool has a transaction -// cached with the given hash. -func (p *testTxPool) Has(hash common.Hash) bool { - p.lock.Lock() - defer p.lock.Unlock() - - return p.pool[hash] != nil -} - -// Get retrieves the transaction from local txpool with given -// tx hash. -func (p *testTxPool) Get(hash common.Hash) *types.Transaction { - p.lock.Lock() - defer p.lock.Unlock() - return p.pool[hash] -} - -// Add appends a batch of transactions to the pool, and notifies any -// listeners if the addition channel is non nil -func (p *testTxPool) Add(txs []*types.Transaction, local bool, sync bool) []error { - p.lock.Lock() - defer p.lock.Unlock() - - for _, tx := range txs { - p.pool[tx.Hash()] = tx - } - p.txFeed.Send(core.NewTxsEvent{Txs: txs}) - return make([]error, len(txs)) -} - -// ReannouceTransactions announce the transactions to some peers. -func (p *testTxPool) ReannouceTransactions(txs []*types.Transaction) []error { - p.lock.Lock() - defer p.lock.Unlock() - - for _, tx := range txs { - p.pool[tx.Hash()] = tx - } - p.reannoTxFeed.Send(core.ReannoTxsEvent{Txs: txs}) - return make([]error, len(txs)) -} - -// Pending returns all the transactions known to the pool -func (p *testTxPool) Pending(filter txpool.PendingFilter) map[common.Address][]*txpool.LazyTransaction { - p.lock.RLock() - defer p.lock.RUnlock() - - batches := make(map[common.Address][]*types.Transaction) - for _, tx := range p.pool { - from, _ := types.Sender(types.HomesteadSigner{}, tx) - batches[from] = append(batches[from], tx) - } - for _, batch := range batches { - sort.Sort(types.TxByNonce(batch)) - } - pending := make(map[common.Address][]*txpool.LazyTransaction) - for addr, batch := range batches { - for _, tx := range batch { - pending[addr] = append(pending[addr], &txpool.LazyTransaction{ - Hash: tx.Hash(), - Tx: tx, - Time: tx.Time(), - GasFeeCap: uint256.MustFromBig(tx.GasFeeCap()), - GasTipCap: uint256.MustFromBig(tx.GasTipCap()), - Gas: tx.Gas(), - BlobGas: tx.BlobGas(), - }) - } - } - return pending -} - -// SubscribeTransactions should return an event subscription of NewTxsEvent and -// send events to the given channel. -func (p *testTxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription { - return p.txFeed.Subscribe(ch) -} - -// SubscribeReannoTxsEvent should return an event subscription of ReannoTxsEvent and -// send events to the given channel. -func (p *testTxPool) SubscribeReannoTxsEvent(ch chan<- core.ReannoTxsEvent) event.Subscription { - return p.reannoTxFeed.Subscribe(ch) -} - -// testHandler is a live implementation of the Ethereum protocol handler, just -// preinitialized with some sane testing defaults and the transaction pool mocked -// out. -type testHandler struct { - db ethdb.Database - chain *core.BlockChain - txpool *testTxPool - votepool *testVotePool - handler *handler -} - -// newTestHandler creates a new handler for testing purposes with no blocks. -func newTestHandler() *testHandler { - return newTestHandlerWithBlocks(0) -} - -// newTestHandlerWithBlocks creates a new handler for testing purposes, with a -// given number of initial blocks. -func newTestHandlerWithBlocks(blocks int) *testHandler { - // Create a database pre-initialize with a genesis block - db := rawdb.NewMemoryDatabase() - gspec := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, - } - chain, _ := core.NewBlockChain(db, nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - - _, bs, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), blocks, nil) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - txpool := newTestTxPool() - votepool := newTestVotePool() - - handler, _ := newHandler(&handlerConfig{ - Database: db, - Chain: chain, - TxPool: txpool, - Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), - VotePool: votepool, - Network: 1, - Sync: downloader.SnapSync, - BloomCache: 1, - }) - handler.Start(1000, 3) - - return &testHandler{ - db: db, - chain: chain, - txpool: txpool, - votepool: votepool, - handler: handler, - } -} - -type mockParlia struct { - consensus.Engine -} - -func (c *mockParlia) Author(header *types.Header) (common.Address, error) { - return header.Coinbase, nil -} - -func (c *mockParlia) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { - return nil -} - -func (c *mockParlia) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header) error { - return nil -} - -func (c *mockParlia) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header) (chan<- struct{}, <-chan error) { - abort := make(chan<- struct{}) - results := make(chan error, len(headers)) - for i := 0; i < len(headers); i++ { - results <- nil - } - return abort, results -} - -func (c *mockParlia) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, _ *[]*types.Transaction, uncles []*types.Header, withdrawals []*types.Withdrawal, - _ *[]*types.Receipt, _ *[]*types.Transaction, _ *uint64) (err error) { - return -} - -func (c *mockParlia) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, - uncles []*types.Header, receipts []*types.Receipt, withdrawals []*types.Withdrawal) (*types.Block, []*types.Receipt, error) { - // Finalize block - c.Finalize(chain, header, state, &txs, uncles, nil, nil, nil, nil) - - // Assign the final state root to header. - header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - - // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), receipts, nil -} - -func (c *mockParlia) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { - return big.NewInt(1) -} - -func newTestParliaHandlerAfterCancun(t *testing.T, config *params.ChainConfig, mode downloader.SyncMode, preCancunBlks, postCancunBlks uint64) *testHandler { - // Have N headers in the freezer - frdir := t.TempDir() - db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), frdir, "", false, false, false, false, false) - if err != nil { - t.Fatalf("failed to create database with ancient backend") - } - gspec := &core.Genesis{ - Config: config, - Alloc: types.GenesisAlloc{testAddr: {Balance: new(big.Int).SetUint64(10 * params.Ether)}}, - } - engine := &mockParlia{} - chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) - signer := types.LatestSigner(config) - - _, bs, _ := core.GenerateChainWithGenesis(gspec, engine, int(preCancunBlks+postCancunBlks), func(i int, gen *core.BlockGen) { - if !config.IsCancun(gen.Number(), gen.Timestamp()) { - tx, _ := makeMockTx(config, signer, testKey, gen.TxNonce(testAddr), gen.BaseFee().Uint64(), eip4844.CalcBlobFee(gen.ExcessBlobGas()).Uint64(), false) - gen.AddTxWithChain(chain, tx) - return - } - tx, sidecar := makeMockTx(config, signer, testKey, gen.TxNonce(testAddr), gen.BaseFee().Uint64(), eip4844.CalcBlobFee(gen.ExcessBlobGas()).Uint64(), true) - gen.AddTxWithChain(chain, tx) - gen.AddBlobSidecar(&types.BlobSidecar{ - BlobTxSidecar: *sidecar, - TxIndex: 0, - TxHash: tx.Hash(), - }) - }) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - txpool := newTestTxPool() - votepool := newTestVotePool() - - handler, _ := newHandler(&handlerConfig{ - Database: db, - Chain: chain, - TxPool: txpool, - Merger: consensus.NewMerger(rawdb.NewMemoryDatabase()), - VotePool: votepool, - Network: 1, - Sync: mode, - BloomCache: 1, - }) - handler.Start(1000, 3) - - return &testHandler{ - db: db, - chain: chain, - txpool: txpool, - votepool: votepool, - handler: handler, - } -} - -// close tears down the handler and all its internal constructs. -func (b *testHandler) close() { - b.handler.Stop() - b.chain.Stop() -} - -// newTestVotePool creates a mock vote pool. -type testVotePool struct { - pool map[common.Hash]*types.VoteEnvelope // Hash map of collected votes - - voteFeed event.Feed // Notification feed to allow waiting for inclusion - lock sync.RWMutex // Protects the vote pool -} - -// newTestVotePool creates a mock vote pool. -func newTestVotePool() *testVotePool { - return &testVotePool{ - pool: make(map[common.Hash]*types.VoteEnvelope), - } -} - -func (t *testVotePool) PutVote(vote *types.VoteEnvelope) { - t.lock.Lock() - defer t.lock.Unlock() - - t.pool[vote.Hash()] = vote - t.voteFeed.Send(core.NewVoteEvent{Vote: vote}) -} - -func (t *testVotePool) FetchVoteByBlockHash(blockHash common.Hash) []*types.VoteEnvelope { - panic("implement me") -} - -func (t *testVotePool) GetVotes() []*types.VoteEnvelope { - t.lock.RLock() - defer t.lock.RUnlock() - - votes := make([]*types.VoteEnvelope, 0, len(t.pool)) - for _, vote := range t.pool { - votes = append(votes, vote) - } - return votes -} - -func (t *testVotePool) SubscribeNewVoteEvent(ch chan<- core.NewVoteEvent) event.Subscription { - return t.voteFeed.Subscribe(ch) -} - -var ( - emptyBlob = kzg4844.Blob{} - emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob) - emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit) -) - -func makeMockTx(config *params.ChainConfig, signer types.Signer, key *ecdsa.PrivateKey, nonce uint64, baseFee uint64, blobBaseFee uint64, isBlobTx bool) (*types.Transaction, *types.BlobTxSidecar) { - if !isBlobTx { - raw := &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: nonce, - GasTipCap: big.NewInt(10), - GasFeeCap: new(big.Int).SetUint64(baseFee + 10), - Gas: params.TxGas, - To: &common.Address{0x00}, - Value: big.NewInt(0), - } - tx, _ := types.SignTx(types.NewTx(raw), signer, key) - return tx, nil - } - sidecar := &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob, emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit, emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof, emptyBlobProof}, - } - raw := &types.BlobTx{ - ChainID: uint256.MustFromBig(config.ChainID), - Nonce: nonce, - GasTipCap: uint256.NewInt(10), - GasFeeCap: uint256.NewInt(baseFee + 10), - Gas: params.TxGas, - To: common.Address{0x00}, - Value: uint256.NewInt(0), - BlobFeeCap: uint256.NewInt(blobBaseFee), - BlobHashes: sidecar.BlobHashes(), - } - tx, _ := types.SignTx(types.NewTx(raw), signer, key) - return tx, sidecar -} diff --git a/eth/protocols/bsc/handler.go b/eth/protocols/bsc/handler.go index e993f255f3..ee03597832 100644 --- a/eth/protocols/bsc/handler.go +++ b/eth/protocols/bsc/handler.go @@ -2,10 +2,8 @@ package bsc import ( "fmt" - "time" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" @@ -96,33 +94,7 @@ var bsc1 = map[uint64]msgHandler{ // returning any error. func handleMessage(backend Backend, peer *Peer) error { // Read the next message from the remote peer, and ensure it's fully consumed - msg, err := peer.rw.ReadMsg() - if err != nil { - return err - } - if msg.Size > maxMessageSize { - return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) - } - defer msg.Discard() - - var handlers = bsc1 - - // Track the amount of time it takes to serve the request and run the handler - if metrics.Enabled { - h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) - defer func(start time.Time) { - sampler := func() metrics.Sample { - return metrics.ResettingSample( - metrics.NewExpDecaySample(1028, 0.015), - ) - } - metrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds()) - }(time.Now()) - } - if handler := handlers[msg.Code]; handler != nil { - return handler(backend, msg, peer) - } - return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) + return errNotSupported } func handleVotes(backend Backend, msg Decoder, peer *Peer) error { diff --git a/eth/protocols/bsc/protocol.go b/eth/protocols/bsc/protocol.go index a063531f07..e64be5a75a 100644 --- a/eth/protocols/bsc/protocol.go +++ b/eth/protocols/bsc/protocol.go @@ -40,6 +40,7 @@ var ( errDecode = errors.New("invalid message") errInvalidMsgCode = errors.New("invalid message code") errProtocolVersionMismatch = errors.New("protocol version mismatch") + errNotSupported = errors.New("not supported") ) // Packet represents a p2p message in the `bsc` protocol. diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go index 2d69ecdc83..775422ac87 100644 --- a/eth/protocols/eth/handler.go +++ b/eth/protocols/eth/handler.go @@ -185,6 +185,10 @@ func handleMessage(backend Backend, peer *Peer) error { if err != nil { return err } + if msg.Code != GetBlockBodiesMsg && msg.Code != GetBlockHeadersMsg { + return errNotSupported + } + if msg.Size > maxMessageSize { return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) } diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go deleted file mode 100644 index de49912e57..0000000000 --- a/eth/protocols/eth/handler_test.go +++ /dev/null @@ -1,656 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - rand2 "crypto/rand" - "io" - "math" - "math/big" - "math/rand" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/txpool" - "github.com/ethereum/go-ethereum/core/txpool/legacypool" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/eth/protocols/bsc" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - "github.com/holiman/uint256" -) - -var ( - // testKey is a private key to use for funding a tester account. - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - - // testAddr is the Ethereum address of the tester account. - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) -) - -func u64(val uint64) *uint64 { return &val } - -// testBackend is a mock implementation of the live Ethereum message handler. Its -// purpose is to allow testing the request/reply workflows and wire serialization -// in the `eth` protocol without actually doing any data processing. -type testBackend struct { - db ethdb.Database - chain *core.BlockChain - txpool *txpool.TxPool -} - -// newTestBackend creates an empty chain and wraps it into a mock backend. -func newTestBackend(blocks int) *testBackend { - return newTestBackendWithGenerator(blocks, false, nil) -} - -// newTestBackend creates a chain with a number of explicitly defined blocks and -// wraps it into a mock backend. -func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend { - var ( - // Create a database pre-initialize with a genesis block - db = rawdb.NewMemoryDatabase() - config = params.TestChainConfig - engine consensus.Engine = ethash.NewFaker() - ) - - if shanghai { - config = ¶ms.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: true, - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MuirGlacierBlock: big.NewInt(0), - BerlinBlock: big.NewInt(0), - LondonBlock: big.NewInt(0), - ArrowGlacierBlock: big.NewInt(0), - GrayGlacierBlock: big.NewInt(0), - MergeNetsplitBlock: big.NewInt(0), - ShanghaiTime: u64(0), - TerminalTotalDifficulty: big.NewInt(0), - TerminalTotalDifficultyPassed: true, - Ethash: new(params.EthashConfig), - } - engine = beacon.NewFaker() - } - - gspec := &core.Genesis{ - Config: config, - Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100_000_000_000_000_000)}}, - } - chain, _ := core.NewBlockChain(db, nil, gspec, nil, engine, vm.Config{}, nil, nil) - - _, bs, _ := core.GenerateChainWithGenesis(gspec, engine, blocks, generator) - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - for _, block := range bs { - chain.TrieDB().Commit(block.Root(), false) - } - txconfig := legacypool.DefaultConfig - txconfig.Journal = "" // Don't litter the disk with test journals - - pool := legacypool.New(txconfig, chain) - txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool}) - - return &testBackend{ - db: db, - chain: chain, - txpool: txpool, - } -} - -// close tears down the transaction pool and chain behind the mock backend. -func (b *testBackend) close() { - b.txpool.Close() - b.chain.Stop() -} - -func (b *testBackend) Chain() *core.BlockChain { return b.chain } -func (b *testBackend) TxPool() TxPool { return b.txpool } - -func (b *testBackend) RunPeer(peer *Peer, handler Handler) error { - // Normally the backend would do peer maintenance and handshakes. All that - // is omitted, and we will just give control back to the handler. - return handler(peer) -} -func (b *testBackend) PeerInfo(enode.ID) interface{} { panic("not implemented") } - -func (b *testBackend) AcceptTxs() bool { - panic("data processing tests should be done in the handler package") -} -func (b *testBackend) Handle(*Peer, Packet) error { - return nil -} - -// Tests that block headers can be retrieved from a remote chain based on user queries. -func TestGetBlockHeaders68(t *testing.T) { testGetBlockHeaders(t, ETH68) } - -func testGetBlockHeaders(t *testing.T, protocol uint) { - t.Parallel() - - backend := newTestBackend(maxHeadersServe + 15) - defer backend.close() - - peer, _ := newTestPeer("peer", protocol, backend) - defer peer.close() - - // Create a "random" unknown hash for testing - var unknown common.Hash - for i := range unknown { - unknown[i] = byte(i) - } - getHashes := func(from, limit uint64) (hashes []common.Hash) { - for i := uint64(0); i < limit; i++ { - hashes = append(hashes, backend.chain.GetCanonicalHash(from-1-i)) - } - return hashes - } - // Create a batch of tests for various scenarios - limit := uint64(maxHeadersServe) - tests := []struct { - query *GetBlockHeadersRequest // The query to execute for header retrieval - expect []common.Hash // The hashes of the block whose headers are expected - }{ - // A single random block should be retrievable by hash - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, - []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, - }, - // A single random block should be retrievable by number - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, - []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, - }, - // Multiple headers should be retrievable in both directions - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, - []common.Hash{ - backend.chain.GetBlockByNumber(limit / 2).Hash(), - backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), - backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), - }, - }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, - []common.Hash{ - backend.chain.GetBlockByNumber(limit / 2).Hash(), - backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), - backend.chain.GetBlockByNumber(limit/2 - 2).Hash(), - }, - }, - // Multiple headers with skip lists should be retrievable - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, - []common.Hash{ - backend.chain.GetBlockByNumber(limit / 2).Hash(), - backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), - backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), - }, - }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, - []common.Hash{ - backend.chain.GetBlockByNumber(limit / 2).Hash(), - backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), - backend.chain.GetBlockByNumber(limit/2 - 8).Hash(), - }, - }, - // The chain endpoints should be retrievable - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 0}, Amount: 1}, - []common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, - }, - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 1}, - []common.Hash{backend.chain.CurrentBlock().Hash()}, - }, - { // If the peer requests a bit into the future, we deliver what we have - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64()}, Amount: 10}, - []common.Hash{backend.chain.CurrentBlock().Hash()}, - }, - // Ensure protocol limits are honored - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 1}, Amount: limit + 10, Reverse: true}, - getHashes(backend.chain.CurrentBlock().Number.Uint64(), limit), - }, - // Check that requesting more than available is handled gracefully - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 3, Amount: 3}, - []common.Hash{ - backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), - backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64()).Hash(), - }, - }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, - []common.Hash{ - backend.chain.GetBlockByNumber(4).Hash(), - backend.chain.GetBlockByNumber(0).Hash(), - }, - }, - // Check that requesting more than available is handled gracefully, even if mid skip - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() - 4}, Skip: 2, Amount: 3}, - []common.Hash{ - backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 4).Hash(), - backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().Number.Uint64() - 1).Hash(), - }, - }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, - []common.Hash{ - backend.chain.GetBlockByNumber(4).Hash(), - backend.chain.GetBlockByNumber(1).Hash(), - }, - }, - // Check a corner case where requesting more can iterate past the endpoints - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, - []common.Hash{ - backend.chain.GetBlockByNumber(2).Hash(), - backend.chain.GetBlockByNumber(1).Hash(), - backend.chain.GetBlockByNumber(0).Hash(), - }, - }, - // Check a corner case where skipping overflow loops back into the chain start - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, - []common.Hash{ - backend.chain.GetBlockByNumber(3).Hash(), - }, - }, - // Check a corner case where skipping overflow loops back to the same header - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, - []common.Hash{ - backend.chain.GetBlockByNumber(1).Hash(), - }, - }, - // Check that non-existing headers aren't returned - { - &GetBlockHeadersRequest{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, - []common.Hash{}, - }, { - &GetBlockHeadersRequest{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().Number.Uint64() + 1}, Amount: 1}, - []common.Hash{}, - }, - } - // Run each of the tests and verify the results against the chain - for i, tt := range tests { - // Collect the headers to expect in the response - var headers []*types.Header - for _, hash := range tt.expect { - headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) - } - // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ - RequestId: 123, - GetBlockHeadersRequest: tt.query, - }) - if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, &BlockHeadersPacket{ - RequestId: 123, - BlockHeadersRequest: headers, - }); err != nil { - t.Errorf("test %d: headers mismatch: %v", i, err) - } - // If the test used number origins, repeat with hashes as the too - if tt.query.Origin.Hash == (common.Hash{}) { - if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { - tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 - - p2p.Send(peer.app, GetBlockHeadersMsg, &GetBlockHeadersPacket{ - RequestId: 456, - GetBlockHeadersRequest: tt.query, - }) - expected := &BlockHeadersPacket{RequestId: 456, BlockHeadersRequest: headers} - if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, expected); err != nil { - t.Errorf("test %d by hash: headers mismatch: %v", i, err) - } - } - } - } -} - -// Tests that block contents can be retrieved from a remote chain based on their hashes. -func TestGetBlockBodies68(t *testing.T) { testGetBlockBodies(t, ETH68) } - -func testGetBlockBodies(t *testing.T, protocol uint) { - t.Parallel() - - gen := func(n int, g *core.BlockGen) { - if n%2 == 0 { - w := &types.Withdrawal{ - Address: common.Address{0xaa}, - Amount: 42, - } - g.AddWithdrawal(w) - } - } - - backend := newTestBackendWithGenerator(maxBodiesServe+15, true, gen) - defer backend.close() - - peer, _ := newTestPeer("peer", protocol, backend) - defer peer.close() - - // Create a batch of tests for various scenarios - limit := maxBodiesServe - tests := []struct { - random int // Number of blocks to fetch randomly from the chain - explicit []common.Hash // Explicitly requested blocks - available []bool // Availability of explicitly requested blocks - expected int // Total number of existing blocks to expect - }{ - {1, nil, nil, 1}, // A single random block should be retrievable - {10, nil, nil, 10}, // Multiple random blocks should be retrievable - {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable - {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned - {0, []common.Hash{backend.chain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable - {0, []common.Hash{backend.chain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned - - // Existing and non-existing blocks interleaved should not cause problems - {0, []common.Hash{ - {}, - backend.chain.GetBlockByNumber(1).Hash(), - {}, - backend.chain.GetBlockByNumber(10).Hash(), - {}, - backend.chain.GetBlockByNumber(100).Hash(), - {}, - }, []bool{false, true, false, true, false, true, false}, 3}, - } - // Run each of the tests and verify the results against the chain - for i, tt := range tests { - // Collect the hashes to request, and the response to expect - var ( - hashes []common.Hash - bodies []*BlockBody - seen = make(map[int64]bool) - ) - for j := 0; j < tt.random; j++ { - for { - num := rand.Int63n(int64(backend.chain.CurrentBlock().Number.Uint64())) - if !seen[num] { - seen[num] = true - - block := backend.chain.GetBlockByNumber(uint64(num)) - hashes = append(hashes, block.Hash()) - if len(bodies) < tt.expected { - bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles(), Withdrawals: block.Withdrawals()}) - } - break - } - } - } - for j, hash := range tt.explicit { - hashes = append(hashes, hash) - if tt.available[j] && len(bodies) < tt.expected { - block := backend.chain.GetBlockByHash(hash) - bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles(), Withdrawals: block.Withdrawals()}) - } - } - - // Send the hash request and verify the response - p2p.Send(peer.app, GetBlockBodiesMsg, &GetBlockBodiesPacket{ - RequestId: 123, - GetBlockBodiesRequest: hashes, - }) - if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, &BlockBodiesPacket{ - RequestId: 123, - BlockBodiesResponse: bodies, - }); err != nil { - t.Fatalf("test %d: bodies mismatch: %v", i, err) - } - } -} - -// Tests that the transaction receipts can be retrieved based on hashes. -func TestGetBlockReceipts68(t *testing.T) { testGetBlockReceipts(t, ETH68) } - -func testGetBlockReceipts(t *testing.T, protocol uint) { - t.Parallel() - - // Define three accounts to simulate transactions with - acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) - - signer := types.HomesteadSigner{} - // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test) - generator := func(i int, block *core.BlockGen) { - switch i { - case 0: - // In block 1, the test bank sends account #1 some ether. - tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) - block.AddTx(tx) - case 1: - // In block 2, the test bank sends some more ether to account #1. - // acc1Addr passes it on to account #2. - tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, testKey) - tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1_000_000_000_000_000), params.TxGas, block.BaseFee(), nil), signer, acc1Key) - block.AddTx(tx1) - block.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by account #2. - block.SetCoinbase(acc2Addr) - block.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := block.PrevBlock(1).Header() - b2.Extra = []byte("foo") - block.AddUncle(b2) - b3 := block.PrevBlock(2).Header() - b3.Extra = []byte("foo") - block.AddUncle(b3) - } - } - // Assemble the test environment - backend := newTestBackendWithGenerator(4, false, generator) - defer backend.close() - - peer, _ := newTestPeer("peer", protocol, backend) - defer peer.close() - - // Collect the hashes to request, and the response to expect - var ( - hashes []common.Hash - receipts [][]*types.Receipt - ) - for i := uint64(0); i <= backend.chain.CurrentBlock().Number.Uint64(); i++ { - block := backend.chain.GetBlockByNumber(i) - - hashes = append(hashes, block.Hash()) - receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) - } - // Send the hash request and verify the response - p2p.Send(peer.app, GetReceiptsMsg, &GetReceiptsPacket{ - RequestId: 123, - GetReceiptsRequest: hashes, - }) - if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, &ReceiptsPacket{ - RequestId: 123, - ReceiptsResponse: receipts, - }); err != nil { - t.Errorf("receipts mismatch: %v", err) - } -} - -func TestHandleNewBlock(t *testing.T) { - t.Parallel() - - gen := func(n int, g *core.BlockGen) { - if n%2 == 0 { - w := &types.Withdrawal{ - Address: common.Address{0xaa}, - Amount: 42, - } - g.AddWithdrawal(w) - } - } - - backend := newTestBackendWithGenerator(maxBodiesServe+15, true, gen) - defer backend.close() - - peer, _ := newTestPeer("peer", ETH68, backend) - defer peer.close() - - v := new(uint32) - *v = 1 - genBlobs := makeBlkBlobs(1, 2) - tx1 := types.NewTx(&types.BlobTx{ - ChainID: new(uint256.Int).SetUint64(1), - GasTipCap: new(uint256.Int), - GasFeeCap: new(uint256.Int), - Gas: 0, - Value: new(uint256.Int), - Data: nil, - BlobFeeCap: new(uint256.Int), - BlobHashes: []common.Hash{common.HexToHash("0x34ec6e64f9cda8fe0451a391e4798085a3ef51a65ed1bfb016e34fc1a2028f8f"), common.HexToHash("0xb9a412e875f29fac436acde234f954e91173c4cf79814f6dcf630d8a6345747f")}, - Sidecar: genBlobs[0], - V: new(uint256.Int), - R: new(uint256.Int), - S: new(uint256.Int), - }) - block := types.NewBlockWithHeader(&types.Header{ - Number: big.NewInt(0), - Extra: []byte("test block"), - UncleHash: types.EmptyUncleHash, - TxHash: types.EmptyTxsHash, - ReceiptHash: types.EmptyReceiptsHash, - }) - sidecars := types.BlobSidecars{types.NewBlobSidecarFromTx(tx1)} - for _, s := range sidecars { - s.BlockNumber = block.Number() - s.BlockHash = block.Hash() - } - dataNil := NewBlockPacket{ - Block: block, - TD: big.NewInt(1), - Sidecars: nil, - } - dataNonNil := NewBlockPacket{ - Block: block, - TD: big.NewInt(1), - Sidecars: sidecars, - } - sizeNonNil, rNonNil, _ := rlp.EncodeToReader(dataNonNil) - sizeNil, rNil, _ := rlp.EncodeToReader(dataNil) - - // Define the test cases - testCases := []struct { - name string - msg p2p.Msg - err error - }{ - { - name: "Valid block", - msg: p2p.Msg{ - Code: 1, - Size: uint32(sizeNonNil), - Payload: rNonNil, - }, - err: nil, - }, - { - name: "Nil sidecars", - msg: p2p.Msg{ - Code: 2, - Size: uint32(sizeNil), - Payload: rNil, - }, - err: nil, - }, - } - - protos := []p2p.Protocol{ - { - Name: "eth", - Version: ETH68, - }, - { - Name: "bsc", - Version: bsc.Bsc1, - }, - } - caps := []p2p.Cap{ - { - Name: "eth", - Version: ETH68, - }, - { - Name: "bsc", - Version: bsc.Bsc1, - }, - } - // Create a source handler to send messages through and a sink peer to receive them - p2pEthSrc, p2pEthSink := p2p.MsgPipe() - defer p2pEthSrc.Close() - defer p2pEthSink.Close() - - localEth := NewPeer(ETH68, p2p.NewPeerWithProtocols(enode.ID{1}, protos, "", caps), p2pEthSrc, nil) - - // Run the tests - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - err := handleNewBlock(backend, tc.msg, localEth) - if err != tc.err { - t.Errorf("expected error %v, got %v", tc.err, err) - } - }) - } -} - -func makeBlkBlobs(n, nPerTx int) []*types.BlobTxSidecar { - if n <= 0 { - return nil - } - ret := make([]*types.BlobTxSidecar, n) - for i := 0; i < n; i++ { - blobs := make([]kzg4844.Blob, nPerTx) - commitments := make([]kzg4844.Commitment, nPerTx) - proofs := make([]kzg4844.Proof, nPerTx) - for i := 0; i < nPerTx; i++ { - io.ReadFull(rand2.Reader, blobs[i][:]) - commitments[i], _ = kzg4844.BlobToCommitment(blobs[i]) - proofs[i], _ = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) - } - ret[i] = &types.BlobTxSidecar{ - Blobs: blobs, - Commitments: commitments, - Proofs: proofs, - } - } - return ret -} diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go deleted file mode 100644 index 3ad73b58ea..0000000000 --- a/eth/protocols/eth/handshake_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - "errors" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -// Tests that handshake failures are detected and reported correctly. -func TestHandshake68(t *testing.T) { testHandshake(t, ETH68) } - -func testHandshake(t *testing.T, protocol uint) { - t.Parallel() - - // Create a test backend only to have some valid genesis chain - backend := newTestBackend(3) - defer backend.close() - - var ( - genesis = backend.chain.Genesis() - head = backend.chain.CurrentBlock() - td = backend.chain.GetTd(head.Hash(), head.Number.Uint64()) - forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis(), backend.chain.CurrentHeader().Number.Uint64(), backend.chain.CurrentHeader().Time) - ) - tests := []struct { - code uint64 - data interface{} - want error - }{ - { - code: TransactionsMsg, data: []interface{}{}, - want: errNoStatusMsg, - }, - { - code: StatusMsg, data: StatusPacket{10, 1, td, head.Hash(), genesis.Hash(), forkID}, - want: errProtocolVersionMismatch, - }, - { - code: StatusMsg, data: StatusPacket{uint32(protocol), 999, td, head.Hash(), genesis.Hash(), forkID}, - want: errNetworkIDMismatch, - }, - { - code: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), common.Hash{3}, forkID}, - want: errGenesisMismatch, - }, - { - code: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}}, - want: errForkIDRejected, - }, - } - for i, test := range tests { - // Create the two peers to shake with each other - app, net := p2p.MsgPipe() - defer app.Close() - defer net.Close() - - peer := NewPeer(protocol, p2p.NewPeer(enode.ID{}, "peer", nil), net, nil) - defer peer.Close() - - // Send the junk test with one peer, check the handshake failure - go p2p.Send(app, test.code, test.data) - - err := peer.Handshake(1, td, head.Hash(), genesis.Hash(), forkID, forkid.NewFilter(backend.chain), nil) - if err == nil { - t.Errorf("test %d: protocol returned nil error, want %q", i, test.want) - } else if !errors.Is(err, test.want) { - t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.want) - } - } -} diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index d1e07df25c..354b292514 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -74,6 +74,7 @@ var ( errNetworkIDMismatch = errors.New("network ID mismatch") errGenesisMismatch = errors.New("genesis mismatch") errForkIDRejected = errors.New("fork ID rejected") + errNotSupported = errors.New("not supported") ) // Packet represents a p2p message in the `eth` protocol. diff --git a/eth/protocols/snap/handler_fuzzing_test.go b/eth/protocols/snap/handler_fuzzing_test.go deleted file mode 100644 index 4e234ad21b..0000000000 --- a/eth/protocols/snap/handler_fuzzing_test.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package snap - -import ( - "bytes" - "encoding/binary" - "fmt" - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - fuzz "github.com/google/gofuzz" -) - -func FuzzARange(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - doFuzz(data, &GetAccountRangePacket{}, GetAccountRangeMsg) - }) -} - -func FuzzSRange(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - doFuzz(data, &GetStorageRangesPacket{}, GetStorageRangesMsg) - }) -} - -func FuzzByteCodes(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - doFuzz(data, &GetByteCodesPacket{}, GetByteCodesMsg) - }) -} - -func FuzzTrieNodes(f *testing.F) { - f.Fuzz(func(t *testing.T, data []byte) { - doFuzz(data, &GetTrieNodesPacket{}, GetTrieNodesMsg) - }) -} - -func doFuzz(input []byte, obj interface{}, code int) { - bc := getChain() - defer bc.Stop() - fuzz.NewFromGoFuzz(input).Fuzz(obj) - var data []byte - switch p := obj.(type) { - case *GetTrieNodesPacket: - p.Root = trieRoot - data, _ = rlp.EncodeToBytes(obj) - default: - data, _ = rlp.EncodeToBytes(obj) - } - cli := &dummyRW{ - code: uint64(code), - data: data, - } - peer := NewFakePeer(65, "gazonk01", cli) - err := HandleMessage(&dummyBackend{bc}, peer) - switch { - case err == nil && cli.writeCount != 1: - panic(fmt.Sprintf("Expected 1 response, got %d", cli.writeCount)) - case err != nil && cli.writeCount != 0: - panic(fmt.Sprintf("Expected 0 response, got %d", cli.writeCount)) - } -} - -var trieRoot common.Hash - -func getChain() *core.BlockChain { - ga := make(types.GenesisAlloc, 1000) - var a = make([]byte, 20) - var mkStorage = func(k, v int) (common.Hash, common.Hash) { - var kB = make([]byte, 32) - var vB = make([]byte, 32) - binary.LittleEndian.PutUint64(kB, uint64(k)) - binary.LittleEndian.PutUint64(vB, uint64(v)) - return common.BytesToHash(kB), common.BytesToHash(vB) - } - storage := make(map[common.Hash]common.Hash) - for i := 0; i < 10; i++ { - k, v := mkStorage(i, i) - storage[k] = v - } - for i := 0; i < 1000; i++ { - binary.LittleEndian.PutUint64(a, uint64(i+0xff)) - acc := types.Account{Balance: big.NewInt(int64(i))} - if i%2 == 1 { - acc.Storage = storage - } - ga[common.BytesToAddress(a)] = acc - } - gspec := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: ga, - } - _, blocks, _ := core.GenerateChainWithGenesis(gspec, ethash.NewFaker(), 2, func(i int, gen *core.BlockGen) {}) - cacheConf := &core.CacheConfig{ - TrieCleanLimit: 0, - TrieDirtyLimit: 0, - TrieTimeLimit: 5 * time.Minute, - TrieCleanNoPrefetch: true, - SnapshotLimit: 100, - SnapshotWait: true, - } - trieRoot = blocks[len(blocks)-1].Root() - bc, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), cacheConf, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil) - if _, err := bc.InsertChain(blocks); err != nil { - panic(err) - } - return bc -} - -type dummyBackend struct { - chain *core.BlockChain -} - -func (d *dummyBackend) Chain() *core.BlockChain { return d.chain } -func (d *dummyBackend) RunPeer(*Peer, Handler) error { return nil } -func (d *dummyBackend) PeerInfo(enode.ID) interface{} { return "Foo" } -func (d *dummyBackend) Handle(*Peer, Packet) error { return nil } - -type dummyRW struct { - code uint64 - data []byte - writeCount int -} - -func (d *dummyRW) ReadMsg() (p2p.Msg, error) { - return p2p.Msg{ - Code: d.code, - Payload: bytes.NewReader(d.data), - ReceivedAt: time.Now(), - Size: uint32(len(d.data)), - }, nil -} - -func (d *dummyRW) WriteMsg(msg p2p.Msg) error { - d.writeCount++ - return nil -} diff --git a/eth/protocols/snap/protocol.go b/eth/protocols/snap/protocol.go index 0db206b081..aaeb8a0c95 100644 --- a/eth/protocols/snap/protocol.go +++ b/eth/protocols/snap/protocol.go @@ -61,6 +61,7 @@ var ( errDecode = errors.New("invalid message") errInvalidMsgCode = errors.New("invalid message code") errBadRequest = errors.New("bad request") + errNotSupported = errors.New("not supported") ) // Packet represents a p2p message in the `snap` protocol. diff --git a/eth/protocols/trust/handler_test.go b/eth/protocols/trust/handler_test.go deleted file mode 100644 index 187b29c932..0000000000 --- a/eth/protocols/trust/handler_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package trust - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/clique" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/txpool/legacypool" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/triedb" -) - -var ( - // testKey is a private key to use for funding a tester account. - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - - // testAddr is the Ethereum address of the tester account. - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) -) - -// testBackend is a mock implementation of the live Ethereum message handler. Its -// purpose is to allow testing the request/reply workflows and wire serialization -// in the `eth` protocol without actually doing any data processing. -type testBackend struct { - db ethdb.Database - chain *core.BlockChain - txpool *legacypool.LegacyPool -} - -// newTestBackend creates an empty chain and wraps it into a mock backend. -func newTestBackend(blocks int) *testBackend { - return newTestBackendWithGenerator(blocks) -} - -// newTestBackend creates a chain with a number of explicitly defined blocks and -// wraps it into a mock backend. -func newTestBackendWithGenerator(blocks int) *testBackend { - signer := types.HomesteadSigner{} - db := rawdb.NewMemoryDatabase() - engine := clique.New(params.AllCliqueProtocolChanges.Clique, db) - genspec := &core.Genesis{ - Config: params.AllCliqueProtocolChanges, - ExtraData: make([]byte, 32+common.AddressLength+65), - Alloc: types.GenesisAlloc{testAddr: {Balance: big.NewInt(100000000000000000)}}, - BaseFee: big.NewInt(0), - } - copy(genspec.ExtraData[32:], testAddr[:]) - genesis := genspec.MustCommit(db, triedb.NewDatabase(db, nil)) - - chain, _ := core.NewBlockChain(db, nil, genspec, nil, engine, vm.Config{}, nil, nil) - generator := func(i int, block *core.BlockGen) { - // The chain maker doesn't have access to a chain, so the difficulty will be - // lets unset (nil). Set it here to the correct value. - // block.SetCoinbase(testAddr) - block.SetDifficulty(big.NewInt(2)) - - // We want to simulate an empty middle block, having the same state as the - // first one. The last is needs a state change again to force a reorg. - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), common.Address{0x01}, big.NewInt(1), params.TxGas, nil, nil), signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain, tx) - } - - bs, _ := core.GenerateChain(params.AllCliqueProtocolChanges, genesis, engine, db, blocks, generator) - for i, block := range bs { - header := block.Header() - if i > 0 { - header.ParentHash = bs[i-1].Hash() - } - header.Extra = make([]byte, 32+65) - header.Difficulty = big.NewInt(2) - - sig, _ := crypto.Sign(clique.SealHash(header).Bytes(), testKey) - copy(header.Extra[len(header.Extra)-65:], sig) - bs[i] = block.WithSeal(header) - } - - if _, err := chain.InsertChain(bs); err != nil { - panic(err) - } - - txconfig := legacypool.DefaultConfig - txconfig.Journal = "" // Don't litter the disk with test journals - - return &testBackend{ - db: db, - chain: chain, - txpool: legacypool.New(txconfig, chain), - } -} - -// close tears down the transaction pool and chain behind the mock backend. -func (b *testBackend) close() { - b.txpool.Close() - b.chain.Stop() -} - -func (b *testBackend) Chain() *core.BlockChain { return b.chain } - -func (b *testBackend) RunPeer(peer *Peer, handler Handler) error { - // Normally the backend would do peer mainentance and handshakes. All that - // is omitted and we will just give control back to the handler. - return handler(peer) -} -func (b *testBackend) PeerInfo(enode.ID) interface{} { panic("not implemented") } - -func (b *testBackend) Handle(*Peer, Packet) error { - panic("data processing tests should be done in the handler package") -} - -func TestRequestRoot(t *testing.T) { testRequestRoot(t, Trust1) } - -func testRequestRoot(t *testing.T, protocol uint) { - t.Parallel() - - blockNum := 1032 // The latest 1024 blocks' DiffLayer will be cached. - backend := newTestBackend(blockNum) - defer backend.close() - - peer, _ := newTestPeer("peer", protocol, backend) - defer peer.close() - - pairs := []struct { - req RootRequestPacket - res RootResponsePacket - }{ - { - req: RootRequestPacket{ - RequestId: 1, - BlockNumber: 1, - }, - res: RootResponsePacket{ - RequestId: 1, - Status: types.StatusPartiallyVerified, - BlockNumber: 1, - Extra: defaultExtra, - }, - }, - { - req: RootRequestPacket{ - RequestId: 2, - BlockNumber: 128, - }, - res: RootResponsePacket{ - RequestId: 2, - Status: types.StatusFullVerified, - BlockNumber: 128, - Extra: defaultExtra, - }, - }, - { - req: RootRequestPacket{ - RequestId: 3, - BlockNumber: 128, - BlockHash: types.EmptyRootHash, - DiffHash: types.EmptyRootHash, - }, - res: RootResponsePacket{ - RequestId: 3, - Status: types.StatusImpossibleFork, - BlockNumber: 128, - BlockHash: types.EmptyRootHash, - Root: common.Hash{}, - Extra: defaultExtra, - }, - }, - { - req: RootRequestPacket{ - RequestId: 4, - BlockNumber: 128, - DiffHash: types.EmptyRootHash, - }, - res: RootResponsePacket{ - RequestId: 4, - Status: types.StatusDiffHashMismatch, - BlockNumber: 128, - Root: common.Hash{}, - Extra: defaultExtra, - }, - }, - { - req: RootRequestPacket{ - RequestId: 5, - BlockNumber: 1024, - }, - res: RootResponsePacket{ - RequestId: 5, - Status: types.StatusFullVerified, - BlockNumber: 1024, - Extra: defaultExtra, - }, - }, - { - req: RootRequestPacket{ - RequestId: 6, - BlockNumber: 1024, - BlockHash: types.EmptyRootHash, - DiffHash: types.EmptyRootHash, - }, - res: RootResponsePacket{ - RequestId: 6, - Status: types.StatusPossibleFork, - BlockNumber: 1024, - BlockHash: types.EmptyRootHash, - Root: common.Hash{}, - Extra: defaultExtra, - }, - }, - { - req: RootRequestPacket{ - RequestId: 7, - BlockNumber: 1033, - BlockHash: types.EmptyRootHash, - DiffHash: types.EmptyRootHash, - }, - res: RootResponsePacket{ - RequestId: 7, - Status: types.StatusBlockNewer, - BlockNumber: 1033, - BlockHash: types.EmptyRootHash, - Root: common.Hash{}, - Extra: defaultExtra, - }, - }, - { - req: RootRequestPacket{ - RequestId: 8, - BlockNumber: 1044, - BlockHash: types.EmptyRootHash, - DiffHash: types.EmptyRootHash, - }, - res: RootResponsePacket{ - RequestId: 8, - Status: types.StatusBlockTooNew, - BlockNumber: 1044, - BlockHash: types.EmptyRootHash, - Root: common.Hash{}, - Extra: defaultExtra, - }, - }, - } - - for idx, pair := range pairs { - header := backend.Chain().GetHeaderByNumber(pair.req.BlockNumber) - if header != nil { - if pair.res.Status.Code&0xFF00 == types.StatusVerified.Code { - pair.req.BlockHash = header.Hash() - pair.req.DiffHash, _ = core.CalculateDiffHash(backend.Chain().GetTrustedDiffLayer(header.Hash())) - pair.res.BlockHash = pair.req.BlockHash - pair.res.Root = header.Root - } else if pair.res.Status.Code == types.StatusDiffHashMismatch.Code { - pair.req.BlockHash = header.Hash() - pair.res.BlockHash = pair.req.BlockHash - } - } - - p2p.Send(peer.app, RequestRootMsg, pair.req) - if err := p2p.ExpectMsg(peer.app, RespondRootMsg, pair.res); err != nil { - t.Errorf("test %d: root response not expected: %v", idx, err) - } - } -} diff --git a/eth/sync_test.go b/eth/sync_test.go deleted file mode 100644 index 195d2fd2ae..0000000000 --- a/eth/sync_test.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - "testing" - "time" - - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/params" - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/eth/protocols/eth" - "github.com/ethereum/go-ethereum/eth/protocols/snap" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" -) - -// Tests that snap sync is disabled after a successful sync cycle. -func TestSnapSyncDisabling68(t *testing.T) { testSnapSyncDisabling(t, eth.ETH68, snap.SNAP1) } - -// Tests that snap sync gets disabled as soon as a real block is successfully -// imported into the blockchain. -func testSnapSyncDisabling(t *testing.T, ethVer uint, snapVer uint) { - t.Parallel() - - // Create an empty handler and ensure it's in snap sync mode - empty := newTestHandler() - if !empty.handler.snapSync.Load() { - t.Fatalf("snap sync disabled on pristine blockchain") - } - defer empty.close() - - // Create a full handler and ensure snap sync ends up disabled - full := newTestHandlerWithBlocks(1024) - if full.handler.snapSync.Load() { - t.Fatalf("snap sync not disabled on non-empty blockchain") - } - defer full.close() - - // Sync up the two handlers via both `eth` and `snap` - caps := []p2p.Cap{{Name: "eth", Version: ethVer}, {Name: "snap", Version: snapVer}} - - emptyPipeEth, fullPipeEth := p2p.MsgPipe() - defer emptyPipeEth.Close() - defer fullPipeEth.Close() - - emptyPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeEth, empty.txpool) - fullPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeEth, full.txpool) - defer emptyPeerEth.Close() - defer fullPeerEth.Close() - - go empty.handler.runEthPeer(emptyPeerEth, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(empty.handler), peer) - }) - go full.handler.runEthPeer(fullPeerEth, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(full.handler), peer) - }) - - emptyPipeSnap, fullPipeSnap := p2p.MsgPipe() - defer emptyPipeSnap.Close() - defer fullPipeSnap.Close() - - emptyPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeSnap) - fullPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeSnap) - - go empty.handler.runSnapExtension(emptyPeerSnap, func(peer *snap.Peer) error { - return snap.Handle((*snapHandler)(empty.handler), peer) - }) - go full.handler.runSnapExtension(fullPeerSnap, func(peer *snap.Peer) error { - return snap.Handle((*snapHandler)(full.handler), peer) - }) - // Wait a bit for the above handlers to start - time.Sleep(250 * time.Millisecond) - - // Check that snap sync was disabled - op := peerToSyncOp(downloader.SnapSync, empty.handler.peers.peerWithHighestTD()) - if err := empty.handler.doSync(op); err != nil { - t.Fatal("sync failed:", err) - } - if empty.handler.snapSync.Load() { - t.Fatalf("snap sync not disabled after successful synchronisation") - } -} - -func TestFullSyncWithBlobs(t *testing.T) { - testChainSyncWithBlobs(t, downloader.FullSync, 128, 128) -} - -func TestSnapSyncWithBlobs(t *testing.T) { - testChainSyncWithBlobs(t, downloader.SnapSync, 128, 128) -} - -func testChainSyncWithBlobs(t *testing.T, mode downloader.SyncMode, preCancunBlks, postCancunBlks uint64) { - t.Parallel() - config := *params.ParliaTestChainConfig - cancunTime := (preCancunBlks + 1) * 10 - config.CancunTime = &cancunTime - - // Create a full handler and ensure snap sync ends up disabled - full := newTestParliaHandlerAfterCancun(t, &config, mode, preCancunBlks, postCancunBlks) - defer full.close() - if downloader.SnapSync == mode && full.handler.snapSync.Load() { - t.Fatalf("snap sync not disabled on non-empty blockchain") - } - - // check blocks and blobs - checkChainWithBlobs(t, full.chain, preCancunBlks, postCancunBlks) - - // Create an empty handler and ensure it's in snap sync mode - empty := newTestParliaHandlerAfterCancun(t, &config, mode, 0, 0) - defer empty.close() - if downloader.SnapSync == mode && !empty.handler.snapSync.Load() { - t.Fatalf("snap sync disabled on pristine blockchain") - } - - // Sync up the two handlers via both `eth` and `snap` - ethVer := uint(eth.ETH68) - snapVer := uint(snap.SNAP1) - - // Sync up the two handlers via both `eth` and `snap` - caps := []p2p.Cap{{Name: "eth", Version: ethVer}, {Name: "snap", Version: snapVer}} - - emptyPipeEth, fullPipeEth := p2p.MsgPipe() - defer emptyPipeEth.Close() - defer fullPipeEth.Close() - - emptyPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeEth, empty.txpool) - fullPeerEth := eth.NewPeer(ethVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeEth, full.txpool) - defer emptyPeerEth.Close() - defer fullPeerEth.Close() - - go empty.handler.runEthPeer(emptyPeerEth, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(empty.handler), peer) - }) - go full.handler.runEthPeer(fullPeerEth, func(peer *eth.Peer) error { - return eth.Handle((*ethHandler)(full.handler), peer) - }) - // Wait a bit for the above handlers to start - time.Sleep(250 * time.Millisecond) - - emptyPipeSnap, fullPipeSnap := p2p.MsgPipe() - defer emptyPipeSnap.Close() - defer fullPipeSnap.Close() - - emptyPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{1}, "", caps), emptyPipeSnap) - fullPeerSnap := snap.NewPeer(snapVer, p2p.NewPeer(enode.ID{2}, "", caps), fullPipeSnap) - - go empty.handler.runSnapExtension(emptyPeerSnap, func(peer *snap.Peer) error { - return snap.Handle((*snapHandler)(empty.handler), peer) - }) - go full.handler.runSnapExtension(fullPeerSnap, func(peer *snap.Peer) error { - return snap.Handle((*snapHandler)(full.handler), peer) - }) - // Wait a bit for the above handlers to start - time.Sleep(250 * time.Millisecond) - - // Check that snap sync was disabled - op := peerToSyncOp(mode, empty.handler.peers.peerWithHighestTD()) - if err := empty.handler.doSync(op); err != nil { - t.Fatal("sync failed:", err) - } - if !empty.handler.synced.Load() { - t.Fatalf("full sync not done after successful synchronisation") - } - - // check blocks and blobs - checkChainWithBlobs(t, empty.chain, preCancunBlks, postCancunBlks) -} - -func checkChainWithBlobs(t *testing.T, chain *core.BlockChain, preCancunBlks, postCancunBlks uint64) { - block := chain.GetBlockByNumber(preCancunBlks) - require.NotNil(t, block, preCancunBlks) - require.Nil(t, chain.GetSidecarsByHash(block.Hash()), preCancunBlks) - block = chain.GetBlockByNumber(preCancunBlks + 1) - require.NotNil(t, block, preCancunBlks+1) - require.NotNil(t, chain.GetSidecarsByHash(block.Hash()), preCancunBlks+1) - block = chain.GetBlockByNumber(preCancunBlks + postCancunBlks) - require.NotNil(t, block, preCancunBlks+postCancunBlks) - require.NotNil(t, chain.GetSidecarsByHash(block.Hash()), preCancunBlks+postCancunBlks) -} diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go deleted file mode 100644 index cd07b0638b..0000000000 --- a/eth/tracers/api_test.go +++ /dev/null @@ -1,998 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package tracers - -import ( - "context" - "crypto/ecdsa" - "encoding/json" - "errors" - "fmt" - "math/big" - "reflect" - "sync/atomic" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/tracers/logger" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/internal/ethapi" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" - "golang.org/x/exp/slices" -) - -var ( - errStateNotFound = errors.New("state not found") - errBlockNotFound = errors.New("block not found") -) - -type testBackend struct { - chainConfig *params.ChainConfig - engine consensus.Engine - chaindb ethdb.Database - chain *core.BlockChain - - refHook func() // Hook is invoked when the requested state is referenced - relHook func() // Hook is invoked when the requested state is released -} - -// testBackend creates a new test backend. OBS: After test is done, teardown must be -// invoked in order to release associated resources. -func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { - backend := &testBackend{ - chainConfig: gspec.Config, - engine: ethash.NewFaker(), - chaindb: rawdb.NewMemoryDatabase(), - } - // Generate blocks for testing - _, blocks, _ := core.GenerateChainWithGenesis(gspec, backend.engine, n, generator) - - // Import the canonical chain - cacheConfig := &core.CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, - TriesInMemory: 128, - TrieDirtyDisabled: true, // Archive mode - } - chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, nil, backend.engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - backend.chain = chain - return backend -} - -func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - return b.chain.GetHeaderByHash(hash), nil -} - -func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber { - return b.chain.CurrentHeader(), nil - } - return b.chain.GetHeaderByNumber(uint64(number)), nil -} - -func (b *testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - return b.chain.GetBlockByHash(hash), nil -} - -func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { - if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber { - return b.chain.GetBlockByNumber(b.chain.CurrentBlock().Number.Uint64()), nil - } - return b.chain.GetBlockByNumber(uint64(number)), nil -} - -func (b *testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64, error) { - tx, hash, blockNumber, index := rawdb.ReadTransaction(b.chaindb, txHash) - return tx != nil, tx, hash, blockNumber, index, nil -} - -func (b *testBackend) RPCGasCap() uint64 { - return 25000000 -} - -func (b *testBackend) ChainConfig() *params.ChainConfig { - return b.chainConfig -} - -func (b *testBackend) Engine() consensus.Engine { - return b.engine -} - -func (b *testBackend) ChainDb() ethdb.Database { - return b.chaindb -} - -// teardown releases the associated resources. -func (b *testBackend) teardown() { - b.chain.Stop() -} - -func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) { - statedb, err := b.chain.StateAt(block.Root()) - if err != nil { - return nil, nil, errStateNotFound - } - if b.refHook != nil { - b.refHook() - } - release := func() { - if b.relHook != nil { - b.relHook() - } - } - return statedb, release, nil -} - -func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (*core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) { - parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) - if parent == nil { - return nil, vm.BlockContext{}, nil, nil, errBlockNotFound - } - statedb, release, err := b.StateAtBlock(ctx, parent, reexec, nil, true, false) - if err != nil { - return nil, vm.BlockContext{}, nil, nil, errStateNotFound - } - if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, release, nil - } - // Recompute transactions up to the target index. - signer := types.MakeSigner(b.chainConfig, block.Number(), block.Time()) - for idx, tx := range block.Transactions() { - msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee()) - txContext := core.NewEVMTxContext(msg) - context := core.NewEVMBlockContext(block.Header(), b.chain, nil) - if idx == txIndex { - return msg, context, statedb, release, nil - } - vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) - } - statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) - } - return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) -} - -func TestTraceCall(t *testing.T) { - t.Parallel() - - // Initialize test accounts - accounts := newAccounts(3) - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }, - } - genBlocks := 10 - signer := types.HomesteadSigner{} - nonce := uint64(0) - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: nonce, - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - nonce++ - - if i == genBlocks-2 { - // Transfer from account[0] to account[2] - tx, _ = types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: nonce, - To: &accounts[2].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - nonce++ - - // Transfer from account[0] to account[1] again - tx, _ = types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: nonce, - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - nonce++ - } - }) - - uintPtr := func(i int) *hexutil.Uint { x := hexutil.Uint(i); return &x } - - defer backend.teardown() - api := NewAPI(backend) - var testSuite = []struct { - blockNumber rpc.BlockNumber - call ethapi.TransactionArgs - config *TraceCallConfig - expectErr error - expect string - }{ - // Standard JSON trace upon the genesis, plain transfer. - { - blockNumber: rpc.BlockNumber(0), - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: nil, - expectErr: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, - }, - // Standard JSON trace upon the head, plain transfer. - { - blockNumber: rpc.BlockNumber(genBlocks), - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: nil, - expectErr: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, - }, - // Upon the last state, default to the post block's state - { - blockNumber: rpc.BlockNumber(genBlocks - 1), - call: ethapi.TransactionArgs{ - From: &accounts[2].addr, - To: &accounts[0].addr, - Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))), - }, - config: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, - }, - // Before the first transaction, should be failed - { - blockNumber: rpc.BlockNumber(genBlocks - 1), - call: ethapi.TransactionArgs{ - From: &accounts[2].addr, - To: &accounts[0].addr, - Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))), - }, - config: &TraceCallConfig{TxIndex: uintPtr(0)}, - expectErr: fmt.Errorf("tracing failed: insufficient funds for gas * price + value: address %s have 1000000000000000000 want 1000000000000000100", accounts[2].addr), - }, - // Before the target transaction, should be failed - { - blockNumber: rpc.BlockNumber(genBlocks - 1), - call: ethapi.TransactionArgs{ - From: &accounts[2].addr, - To: &accounts[0].addr, - Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))), - }, - config: &TraceCallConfig{TxIndex: uintPtr(1)}, - expectErr: fmt.Errorf("tracing failed: insufficient funds for gas * price + value: address %s have 1000000000000000000 want 1000000000000000100", accounts[2].addr), - }, - // After the target transaction, should be succeed - { - blockNumber: rpc.BlockNumber(genBlocks - 1), - call: ethapi.TransactionArgs{ - From: &accounts[2].addr, - To: &accounts[0].addr, - Value: (*hexutil.Big)(new(big.Int).Add(big.NewInt(params.Ether), big.NewInt(100))), - }, - config: &TraceCallConfig{TxIndex: uintPtr(2)}, - expectErr: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, - }, - // Standard JSON trace upon the non-existent block, error expects - { - blockNumber: rpc.BlockNumber(genBlocks + 1), - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: nil, - expectErr: fmt.Errorf("block #%d not found", genBlocks+1), - // expect: nil, - }, - // Standard JSON trace upon the latest block - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: nil, - expectErr: nil, - expect: `{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}`, - }, - // Tracing on 'pending' should fail: - { - blockNumber: rpc.PendingBlockNumber, - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: nil, - expectErr: errors.New("tracing on top of pending is not supported"), - }, - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - Input: &hexutil.Bytes{0x43}, // blocknumber - }, - config: &TraceCallConfig{ - BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))}, - }, - expectErr: nil, - expect: ` {"gas":53018,"failed":false,"returnValue":"","structLogs":[ - {"pc":0,"op":"NUMBER","gas":24946984,"gasCost":2,"depth":1,"stack":[]}, - {"pc":1,"op":"STOP","gas":24946982,"gasCost":0,"depth":1,"stack":["0x1337"]}]}`, - }, - } - for i, testspec := range testSuite { - result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config) - if testspec.expectErr != nil { - if err == nil { - t.Errorf("test %d: expect error %v, got nothing", i, testspec.expectErr) - continue - } - if !reflect.DeepEqual(err.Error(), testspec.expectErr.Error()) { - t.Errorf("test %d: error mismatch, want '%v', got '%v'", i, testspec.expectErr, err) - } - } else { - if err != nil { - t.Errorf("test %d: expect no error, got %v", i, err) - continue - } - var have *logger.ExecutionResult - if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil { - t.Errorf("test %d: failed to unmarshal result %v", i, err) - } - var want *logger.ExecutionResult - if err := json.Unmarshal([]byte(testspec.expect), &want); err != nil { - t.Errorf("test %d: failed to unmarshal result %v", i, err) - } - if !reflect.DeepEqual(have, want) { - t.Errorf("test %d: result mismatch, want %v, got %v", i, testspec.expect, string(result.(json.RawMessage))) - } - } - } -} - -func TestTraceTransaction(t *testing.T) { - t.Parallel() - - // Initialize test accounts - accounts := newAccounts(2) - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - }, - } - target := common.Hash{} - signer := types.HomesteadSigner{} - backend := newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: uint64(i), - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - target = tx.Hash() - }) - defer backend.chain.Stop() - api := NewAPI(backend) - result, err := api.TraceTransaction(context.Background(), target, nil) - if err != nil { - t.Errorf("Failed to trace transaction %v", err) - } - var have *logger.ExecutionResult - if err := json.Unmarshal(result.(json.RawMessage), &have); err != nil { - t.Errorf("failed to unmarshal result %v", err) - } - if !reflect.DeepEqual(have, &logger.ExecutionResult{ - Gas: params.TxGas, - Failed: false, - ReturnValue: "", - StructLogs: []logger.StructLogRes{}, - }) { - t.Error("Transaction tracing result is different") - } - - // Test non-existent transaction - _, err = api.TraceTransaction(context.Background(), common.Hash{42}, nil) - if !errors.Is(err, errTxNotFound) { - t.Fatalf("want %v, have %v", errTxNotFound, err) - } -} - -func TestTraceBlock(t *testing.T) { - t.Parallel() - - // Initialize test accounts - accounts := newAccounts(3) - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }, - } - genBlocks := 10 - signer := types.HomesteadSigner{} - var txHash common.Hash - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: uint64(i), - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - txHash = tx.Hash() - }) - defer backend.chain.Stop() - api := NewAPI(backend) - - var testSuite = []struct { - blockNumber rpc.BlockNumber - config *TraceConfig - want string - expectErr error - }{ - // Trace genesis block, expect error - { - blockNumber: rpc.BlockNumber(0), - expectErr: errors.New("genesis is not traceable"), - }, - // Trace head block - { - blockNumber: rpc.BlockNumber(genBlocks), - want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), - }, - // Trace non-existent block - { - blockNumber: rpc.BlockNumber(genBlocks + 1), - expectErr: fmt.Errorf("block #%d not found", genBlocks+1), - }, - // Trace latest block - { - blockNumber: rpc.LatestBlockNumber, - want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), - }, - // Trace pending block - { - blockNumber: rpc.PendingBlockNumber, - want: fmt.Sprintf(`[{"txHash":"%v","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}]`, txHash), - }, - } - for i, tc := range testSuite { - result, err := api.TraceBlockByNumber(context.Background(), tc.blockNumber, tc.config) - if tc.expectErr != nil { - if err == nil { - t.Errorf("test %d, want error %v", i, tc.expectErr) - continue - } - if !reflect.DeepEqual(err, tc.expectErr) { - t.Errorf("test %d: error mismatch, want %v, get %v", i, tc.expectErr, err) - } - continue - } - if err != nil { - t.Errorf("test %d, want no error, have %v", i, err) - continue - } - have, _ := json.Marshal(result) - want := tc.want - if string(have) != want { - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, string(have), want) - } - } -} - -func TestTracingWithOverrides(t *testing.T) { - t.Parallel() - // Initialize test accounts - accounts := newAccounts(3) - storageAccount := common.Address{0x13, 37} - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - // An account with existing storage - storageAccount: { - Balance: new(big.Int), - Storage: map[common.Hash]common.Hash{ - common.HexToHash("0x03"): common.HexToHash("0x33"), - common.HexToHash("0x04"): common.HexToHash("0x44"), - }, - }, - }, - } - genBlocks := 10 - signer := types.HomesteadSigner{} - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: uint64(i), - To: &accounts[1].addr, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: b.BaseFee(), - Data: nil}), - signer, accounts[0].key) - b.AddTx(tx) - }) - defer backend.chain.Stop() - api := NewAPI(backend) - randomAccounts := newAccounts(3) - type res struct { - Gas int - Failed bool - ReturnValue string - } - var testSuite = []struct { - blockNumber rpc.BlockNumber - call ethapi.TransactionArgs - config *TraceCallConfig - expectErr error - want string - }{ - // Call which can only succeed if state is state overridden - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - randomAccounts[0].addr: ethapi.OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))}, - }, - }, - want: `{"gas":21000,"failed":false,"returnValue":""}`, - }, - // Invalid call without state overriding - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - config: &TraceCallConfig{}, - expectErr: core.ErrInsufficientFunds, - }, - // Successful simple contract call - // - // // SPDX-License-Identifier: GPL-3.0 - // - // pragma solidity >=0.7.0 <0.8.0; - // - // /** - // * @title Storage - // * @dev Store & retrieve value in a variable - // */ - // contract Storage { - // uint256 public number; - // constructor() { - // number = block.number; - // } - // } - { - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[2].addr, - Data: newRPCBytes(common.Hex2Bytes("8381f58a")), // call number() - }, - config: &TraceCallConfig{ - // Tracer: &tracer, - StateOverrides: ðapi.StateOverride{ - randomAccounts[2].addr: ethapi.OverrideAccount{ - Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060285760003560e01c80638381f58a14602d575b600080fd5b60336049565b6040518082815260200191505060405180910390f35b6000548156fea2646970667358221220eab35ffa6ab2adfe380772a48b8ba78e82a1b820a18fcb6f59aa4efb20a5f60064736f6c63430007040033")), - StateDiff: newStates([]common.Hash{{}}, []common.Hash{common.BigToHash(big.NewInt(123))}), - }, - }, - }, - want: `{"gas":23347,"failed":false,"returnValue":"000000000000000000000000000000000000000000000000000000000000007b"}`, - }, - { // Override blocknumber - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - // BLOCKNUMBER PUSH1 MSTORE - Input: newRPCBytes(common.Hex2Bytes("4360005260206000f3")), - // &hexutil.Bytes{0x43}, // blocknumber - }, - config: &TraceCallConfig{ - BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))}, - }, - want: `{"gas":59537,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000001337"}`, - }, - { // Override blocknumber, and query a blockhash - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &accounts[0].addr, - Input: &hexutil.Bytes{ - 0x60, 0x00, 0x40, // BLOCKHASH(0) - 0x60, 0x00, 0x52, // STORE memory offset 0 - 0x61, 0x13, 0x36, 0x40, // BLOCKHASH(0x1336) - 0x60, 0x20, 0x52, // STORE memory offset 32 - 0x61, 0x13, 0x37, 0x40, // BLOCKHASH(0x1337) - 0x60, 0x40, 0x52, // STORE memory offset 64 - 0x60, 0x60, 0x60, 0x00, 0xf3, // RETURN (0-96) - - }, // blocknumber - }, - config: &TraceCallConfig{ - BlockOverrides: ðapi.BlockOverrides{Number: (*hexutil.Big)(big.NewInt(0x1337))}, - }, - want: `{"gas":72666,"failed":false,"returnValue":"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"}`, - }, - /* - pragma solidity =0.8.12; - - contract Test { - uint private x; - - function test2() external { - x = 1337; - revert(); - } - - function test() external returns (uint) { - x = 1; - try this.test2() {} catch (bytes memory) {} - return x; - } - } - */ - { // First with only code override, not storage override - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[2].addr, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - randomAccounts[2].addr: ethapi.OverrideAccount{ - Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060325760003560e01c806366e41cb7146037578063f8a8fd6d14603f575b600080fd5b603d6057565b005b60456062565b60405190815260200160405180910390f35b610539600090815580fd5b60006001600081905550306001600160a01b03166366e41cb76040518163ffffffff1660e01b8152600401600060405180830381600087803b15801560a657600080fd5b505af192505050801560b6575060015b60e9573d80801560e1576040519150601f19603f3d011682016040523d82523d6000602084013e60e6565b606091505b50505b506000549056fea26469706673582212205ce45de745a5308f713cb2f448589177ba5a442d1a2eff945afaa8915961b4d064736f6c634300080c0033")), - }, - }, - }, - want: `{"gas":44100,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000001"}`, - }, - { // Same again, this time with storage override - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[2].addr, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - randomAccounts[2].addr: ethapi.OverrideAccount{ - Code: newRPCBytes(common.Hex2Bytes("6080604052348015600f57600080fd5b506004361060325760003560e01c806366e41cb7146037578063f8a8fd6d14603f575b600080fd5b603d6057565b005b60456062565b60405190815260200160405180910390f35b610539600090815580fd5b60006001600081905550306001600160a01b03166366e41cb76040518163ffffffff1660e01b8152600401600060405180830381600087803b15801560a657600080fd5b505af192505050801560b6575060015b60e9573d80801560e1576040519150601f19603f3d011682016040523d82523d6000602084013e60e6565b606091505b50505b506000549056fea26469706673582212205ce45de745a5308f713cb2f448589177ba5a442d1a2eff945afaa8915961b4d064736f6c634300080c0033")), - State: newStates([]common.Hash{{}}, []common.Hash{{}}), - }, - }, - }, - // want: `{"gas":46900,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000539"}`, - want: `{"gas":44100,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000001"}`, - }, - { // No state override - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &storageAccount, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - storageAccount: ethapi.OverrideAccount{ - Code: newRPCBytes([]byte{ - // SLOAD(3) + SLOAD(4) (which is 0x77) - byte(vm.PUSH1), 0x04, - byte(vm.SLOAD), - byte(vm.PUSH1), 0x03, - byte(vm.SLOAD), - byte(vm.ADD), - // 0x77 -> MSTORE(0) - byte(vm.PUSH1), 0x00, - byte(vm.MSTORE), - // RETURN (0, 32) - byte(vm.PUSH1), 32, - byte(vm.PUSH1), 00, - byte(vm.RETURN), - }), - }, - }, - }, - want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000077"}`, - }, - { // Full state override - // The original storage is - // 3: 0x33 - // 4: 0x44 - // With a full override, where we set 3:0x11, the slot 4 should be - // removed. So SLOT(3)+SLOT(4) should be 0x11. - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &storageAccount, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - storageAccount: ethapi.OverrideAccount{ - Code: newRPCBytes([]byte{ - // SLOAD(3) + SLOAD(4) (which is now 0x11 + 0x00) - byte(vm.PUSH1), 0x04, - byte(vm.SLOAD), - byte(vm.PUSH1), 0x03, - byte(vm.SLOAD), - byte(vm.ADD), - // 0x11 -> MSTORE(0) - byte(vm.PUSH1), 0x00, - byte(vm.MSTORE), - // RETURN (0, 32) - byte(vm.PUSH1), 32, - byte(vm.PUSH1), 00, - byte(vm.RETURN), - }), - State: newStates( - []common.Hash{common.HexToHash("0x03")}, - []common.Hash{common.HexToHash("0x11")}), - }, - }, - }, - want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000011"}`, - }, - { // Partial state override - // The original storage is - // 3: 0x33 - // 4: 0x44 - // With a partial override, where we set 3:0x11, the slot 4 as before. - // So SLOT(3)+SLOT(4) should be 0x55. - blockNumber: rpc.LatestBlockNumber, - call: ethapi.TransactionArgs{ - From: &randomAccounts[0].addr, - To: &storageAccount, - Data: newRPCBytes(common.Hex2Bytes("f8a8fd6d")), // - }, - config: &TraceCallConfig{ - StateOverrides: ðapi.StateOverride{ - storageAccount: ethapi.OverrideAccount{ - Code: newRPCBytes([]byte{ - // SLOAD(3) + SLOAD(4) (which is now 0x11 + 0x44) - byte(vm.PUSH1), 0x04, - byte(vm.SLOAD), - byte(vm.PUSH1), 0x03, - byte(vm.SLOAD), - byte(vm.ADD), - // 0x55 -> MSTORE(0) - byte(vm.PUSH1), 0x00, - byte(vm.MSTORE), - // RETURN (0, 32) - byte(vm.PUSH1), 32, - byte(vm.PUSH1), 00, - byte(vm.RETURN), - }), - StateDiff: &map[common.Hash]common.Hash{ - common.HexToHash("0x03"): common.HexToHash("0x11"), - }, - }, - }, - }, - want: `{"gas":25288,"failed":false,"returnValue":"0000000000000000000000000000000000000000000000000000000000000055"}`, - }, - } - for i, tc := range testSuite { - result, err := api.TraceCall(context.Background(), tc.call, rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, tc.config) - if tc.expectErr != nil { - if err == nil { - t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) - continue - } - if !errors.Is(err, tc.expectErr) { - t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err) - } - continue - } - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - // Turn result into res-struct - var ( - have res - want res - ) - resBytes, _ := json.Marshal(result) - json.Unmarshal(resBytes, &have) - json.Unmarshal([]byte(tc.want), &want) - if !reflect.DeepEqual(have, want) { - t.Logf("result: %v\n", string(resBytes)) - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, have, want) - } - } -} - -type Account struct { - key *ecdsa.PrivateKey - addr common.Address -} - -func newAccounts(n int) (accounts []Account) { - for i := 0; i < n; i++ { - key, _ := crypto.GenerateKey() - addr := crypto.PubkeyToAddress(key.PublicKey) - accounts = append(accounts, Account{key: key, addr: addr}) - } - slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) }) - return accounts -} - -func newRPCBalance(balance *big.Int) **hexutil.Big { - rpcBalance := (*hexutil.Big)(balance) - return &rpcBalance -} - -func newRPCBytes(bytes []byte) *hexutil.Bytes { - rpcBytes := hexutil.Bytes(bytes) - return &rpcBytes -} - -func newStates(keys []common.Hash, vals []common.Hash) *map[common.Hash]common.Hash { - if len(keys) != len(vals) { - panic("invalid input") - } - m := make(map[common.Hash]common.Hash) - for i := 0; i < len(keys); i++ { - m[keys[i]] = vals[i] - } - return &m -} - -func TestTraceChain(t *testing.T) { - // Initialize test accounts - accounts := newAccounts(3) - genesis := &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }, - } - genBlocks := 50 - signer := types.HomesteadSigner{} - - var ( - ref atomic.Uint32 // total refs has made - rel atomic.Uint32 // total rels has made - nonce uint64 - ) - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - for j := 0; j < i+1; j++ { - tx, _ := types.SignTx(types.NewTransaction(nonce, accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) - b.AddTx(tx) - nonce += 1 - } - }) - backend.refHook = func() { ref.Add(1) } - backend.relHook = func() { rel.Add(1) } - api := NewAPI(backend) - - single := `{"txHash":"0x0000000000000000000000000000000000000000000000000000000000000000","result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}` - var cases = []struct { - start uint64 - end uint64 - config *TraceConfig - }{ - {0, 50, nil}, // the entire chain range, blocks [1, 50] - {10, 20, nil}, // the middle chain range, blocks [11, 20] - } - for _, c := range cases { - ref.Store(0) - rel.Store(0) - - from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start)) - to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end)) - resCh := api.traceChain(from, to, c.config, nil) - - next := c.start + 1 - for result := range resCh { - if have, want := uint64(result.Block), next; have != want { - t.Fatalf("unexpected tracing block, have %d want %d", have, want) - } - if have, want := len(result.Traces), int(next); have != want { - t.Fatalf("unexpected result length, have %d want %d", have, want) - } - for _, trace := range result.Traces { - trace.TxHash = common.Hash{} - blob, _ := json.Marshal(trace) - if have, want := string(blob), single; have != want { - t.Fatalf("unexpected tracing result, have\n%v\nwant:\n%v", have, want) - } - } - next += 1 - } - if next != c.end+1 { - t.Error("Missing tracing block") - } - - if nref, nrel := ref.Load(), rel.Load(); nref != nrel { - t.Errorf("Ref and deref actions are not equal, ref %d rel %d", nref, nrel) - } - } -} diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go deleted file mode 100644 index 54ce597b09..0000000000 --- a/ethclient/ethclient_test.go +++ /dev/null @@ -1,780 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethclient - -import ( - "context" - "errors" - "math/big" - "reflect" - "testing" - "time" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/internal/ethapi" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/triedb" -) - -// Verify that Client implements the ethereum interfaces. -var ( - _ = ethereum.ChainReader(&Client{}) - _ = ethereum.TransactionReader(&Client{}) - _ = ethereum.ChainStateReader(&Client{}) - _ = ethereum.ChainSyncReader(&Client{}) - _ = ethereum.ContractCaller(&Client{}) - _ = ethereum.GasEstimator(&Client{}) - _ = ethereum.GasPricer(&Client{}) - _ = ethereum.LogFilterer(&Client{}) - _ = ethereum.PendingStateReader(&Client{}) - // _ = ethereum.PendingStateEventer(&Client{}) - _ = ethereum.PendingContractCaller(&Client{}) -) - -func TestToFilterArg(t *testing.T) { - blockHashErr := errors.New("cannot specify both BlockHash and FromBlock/ToBlock") - addresses := []common.Address{ - common.HexToAddress("0xD36722ADeC3EdCB29c8e7b5a47f352D701393462"), - } - blockHash := common.HexToHash( - "0xeb94bb7d78b73657a9d7a99792413f50c0a45c51fc62bdcb08a53f18e9a2b4eb", - ) - - for _, testCase := range []struct { - name string - input ethereum.FilterQuery - output interface{} - err error - }{ - { - "without BlockHash", - ethereum.FilterQuery{ - Addresses: addresses, - FromBlock: big.NewInt(1), - ToBlock: big.NewInt(2), - Topics: [][]common.Hash{}, - }, - map[string]interface{}{ - "address": addresses, - "fromBlock": "0x1", - "toBlock": "0x2", - "topics": [][]common.Hash{}, - }, - nil, - }, - { - "with nil fromBlock and nil toBlock", - ethereum.FilterQuery{ - Addresses: addresses, - Topics: [][]common.Hash{}, - }, - map[string]interface{}{ - "address": addresses, - "fromBlock": "0x0", - "toBlock": "latest", - "topics": [][]common.Hash{}, - }, - nil, - }, - { - "with negative fromBlock and negative toBlock", - ethereum.FilterQuery{ - Addresses: addresses, - FromBlock: big.NewInt(-1), - ToBlock: big.NewInt(-1), - Topics: [][]common.Hash{}, - }, - map[string]interface{}{ - "address": addresses, - "fromBlock": "pending", - "toBlock": "pending", - "topics": [][]common.Hash{}, - }, - nil, - }, - { - "with blockhash", - ethereum.FilterQuery{ - Addresses: addresses, - BlockHash: &blockHash, - Topics: [][]common.Hash{}, - }, - map[string]interface{}{ - "address": addresses, - "blockHash": blockHash, - "topics": [][]common.Hash{}, - }, - nil, - }, - { - "with blockhash and from block", - ethereum.FilterQuery{ - Addresses: addresses, - BlockHash: &blockHash, - FromBlock: big.NewInt(1), - Topics: [][]common.Hash{}, - }, - nil, - blockHashErr, - }, - { - "with blockhash and to block", - ethereum.FilterQuery{ - Addresses: addresses, - BlockHash: &blockHash, - ToBlock: big.NewInt(1), - Topics: [][]common.Hash{}, - }, - nil, - blockHashErr, - }, - { - "with blockhash and both from / to block", - ethereum.FilterQuery{ - Addresses: addresses, - BlockHash: &blockHash, - FromBlock: big.NewInt(1), - ToBlock: big.NewInt(2), - Topics: [][]common.Hash{}, - }, - nil, - blockHashErr, - }, - } { - t.Run(testCase.name, func(t *testing.T) { - output, err := toFilterArg(testCase.input) - if (testCase.err == nil) != (err == nil) { - t.Fatalf("expected error %v but got %v", testCase.err, err) - } - if testCase.err != nil { - if testCase.err.Error() != err.Error() { - t.Fatalf("expected error %v but got %v", testCase.err, err) - } - } else if !reflect.DeepEqual(testCase.output, output) { - t.Fatalf("expected filter arg %v but got %v", testCase.output, output) - } - }) - } -} - -var ( - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) - testBalance = big.NewInt(2e18) - testGasPrice = big.NewInt(3e9) // 3Gwei - testBlockNum = 128 - testBlocks = []testBlockParam{ - { - blockNr: 1, - txs: []testTransactionParam{ - { - to: common.Address{0x10}, - value: big.NewInt(0), - gasPrice: testGasPrice, - data: nil, - }, - { - to: common.Address{0x11}, - value: big.NewInt(0), - gasPrice: testGasPrice, - data: nil, - }, - }, - }, - { - // This txs params also used to default block. - blockNr: 10, - txs: []testTransactionParam{}, - }, - { - blockNr: 11, - txs: []testTransactionParam{ - { - to: common.Address{0x01}, - value: big.NewInt(1), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - }, - }, - { - blockNr: 12, - txs: []testTransactionParam{ - { - to: common.Address{0x01}, - value: big.NewInt(1), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - { - to: common.Address{0x02}, - value: big.NewInt(2), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - }, - }, - { - blockNr: 13, - txs: []testTransactionParam{ - { - to: common.Address{0x01}, - value: big.NewInt(1), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - { - to: common.Address{0x02}, - value: big.NewInt(2), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - { - to: common.Address{0x03}, - value: big.NewInt(3), - gasPrice: big.NewInt(params.InitialBaseFee), - data: nil, - }, - }, - }, - } -) - -var genesis = &core.Genesis{ - Config: params.AllEthashProtocolChanges, - Alloc: types.GenesisAlloc{testAddr: {Balance: testBalance}}, - ExtraData: []byte("test genesis"), - Timestamp: 9000, - BaseFee: big.NewInt(params.InitialBaseFeeForBSC), -} - -var testTx1 = types.MustSignNewTx(testKey, types.LatestSigner(genesis.Config), &types.LegacyTx{ - Nonce: 254, - Value: big.NewInt(12), - GasPrice: testGasPrice, - Gas: params.TxGas, - To: &common.Address{2}, -}) - -var testTx2 = types.MustSignNewTx(testKey, types.LatestSigner(genesis.Config), &types.LegacyTx{ - Nonce: 255, - Value: big.NewInt(8), - GasPrice: testGasPrice, - Gas: params.TxGas, - To: &common.Address{2}, -}) - -type testTransactionParam struct { - to common.Address - value *big.Int - gasPrice *big.Int - data []byte -} - -type testBlockParam struct { - blockNr int - txs []testTransactionParam -} - -func newTestBackend(t *testing.T) (*node.Node, []*types.Block) { - // Generate test chain. - blocks := generateTestChain() - - // Create node - n, err := node.New(&node.Config{}) - if err != nil { - t.Fatalf("can't create new node: %v", err) - } - // Create Ethereum Service - config := ðconfig.Config{Genesis: genesis} - config.SnapshotCache = 256 - config.TriesInMemory = 128 - ethservice, err := eth.New(n, config) - if err != nil { - t.Fatalf("can't create new ethereum service: %v", err) - } - // Import the test chain. - if err := n.Start(); err != nil { - t.Fatalf("can't start test node: %v", err) - } - if _, err := ethservice.BlockChain().InsertChain(blocks[1:]); err != nil { - t.Fatalf("can't import test blocks: %v", err) - } - // Ensure the tx indexing is fully generated - for ; ; time.Sleep(time.Millisecond * 100) { - progress, err := ethservice.BlockChain().TxIndexProgress() - if err == nil && progress.Done() { - break - } - } - return n, blocks -} - -func generateTestChain() []*types.Block { - signer := types.HomesteadSigner{} - // Create a database pre-initialize with a genesis block - db := rawdb.NewMemoryDatabase() - genesis.MustCommit(db, triedb.NewDatabase(db, nil)) - chain, _ := core.NewBlockChain(db, nil, genesis, nil, ethash.NewFaker(), vm.Config{}, nil, nil, core.EnablePersistDiff(860000)) - generate := func(i int, block *core.BlockGen) { - block.OffsetTime(5) - block.SetExtra([]byte("test")) - //block.SetCoinbase(testAddr) - - for idx, testBlock := range testBlocks { - // Specific block setting, the index in this generator has 1 diff from specified blockNr. - if i+1 == testBlock.blockNr { - for _, testTransaction := range testBlock.txs { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), testTransaction.to, - testTransaction.value, params.TxGas, testTransaction.gasPrice, testTransaction.data), signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain, tx) - } - break - } - - // Default block setting. - if idx == len(testBlocks)-1 { - // We want to simulate an empty middle block, having the same state as the - // first one. The last is needs a state change again to force a reorg. - for _, testTransaction := range testBlocks[0].txs { - tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), testTransaction.to, - testTransaction.value, params.TxGas, testTransaction.gasPrice, testTransaction.data), signer, testKey) - if err != nil { - panic(err) - } - block.AddTxWithChain(chain, tx) - } - } - } - // for testTransactionInBlock - if i+1 == testBlockNum { - block.AddTxWithChain(chain, testTx1) - block.AddTxWithChain(chain, testTx2) - } - } - gblock := genesis.MustCommit(db, triedb.NewDatabase(db, nil)) - engine := ethash.NewFaker() - blocks, _ := core.GenerateChain(genesis.Config, gblock, engine, db, testBlockNum, generate) - blocks = append([]*types.Block{gblock}, blocks...) - return blocks -} - -func TestEthClient(t *testing.T) { - backend, chain := newTestBackend(t) - client := backend.Attach() - defer backend.Close() - defer client.Close() - - tests := map[string]struct { - test func(t *testing.T) - }{ - "Header": { - func(t *testing.T) { testHeader(t, chain, client) }, - }, - "BalanceAt": { - func(t *testing.T) { testBalanceAt(t, client) }, - }, - "TxInBlockInterrupted": { - func(t *testing.T) { testTransactionInBlock(t, client) }, - }, - "ChainID": { - func(t *testing.T) { testChainID(t, client) }, - }, - "GetBlock": { - func(t *testing.T) { testGetBlock(t, client) }, - }, - "StatusFunctions": { - func(t *testing.T) { testStatusFunctions(t, client) }, - }, - "CallContract": { - func(t *testing.T) { testCallContract(t, client) }, - }, - "CallContractAtHash": { - func(t *testing.T) { testCallContractAtHash(t, client) }, - }, - // DO not have TestAtFunctions now, because we do not have pending block now - // "AtFunctions": { - // func(t *testing.T) { testAtFunctions(t, client) }, - // }, - "TestSendTransactionConditional": { - func(t *testing.T) { testSendTransactionConditional(t, client) }, - }, - } - - t.Parallel() - for name, tt := range tests { - t.Run(name, tt.test) - } -} - -func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) { - tests := map[string]struct { - block *big.Int - want *types.Header - wantErr error - }{ - "genesis": { - block: big.NewInt(0), - want: chain[0].Header(), - }, - "first_block": { - block: big.NewInt(1), - want: chain[1].Header(), - }, - "future_block": { - block: big.NewInt(1000000000), - want: nil, - wantErr: ethereum.NotFound, - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - ec := NewClient(client) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - - got, err := ec.HeaderByNumber(ctx, tt.block) - if !errors.Is(err, tt.wantErr) { - t.Fatalf("HeaderByNumber(%v) error = %q, want %q", tt.block, err, tt.wantErr) - } - - gotBytes, err := rlp.EncodeToBytes(got) - if err != nil { - t.Fatalf("Error serializing received block header.") - } - wantBytes, err := rlp.EncodeToBytes(tt.want) - if err != nil { - t.Fatalf("Error serializing wanted block header.") - } - - // Instead of comparing the Header's compare the serialized bytes, - // because reflect.DeepEqual(*types.Header, *types.Header) sometimes - // returns false even though the underlying field values are exactly the same. - if !reflect.DeepEqual(gotBytes, wantBytes) { - t.Fatalf("HeaderByNumber(%v) got = %v, want %v", tt.block, got, tt.want) - } - }) - } -} - -func testBalanceAt(t *testing.T, client *rpc.Client) { - tests := map[string]struct { - account common.Address - block *big.Int - want *big.Int - wantErr error - }{ - "valid_account_genesis": { - account: testAddr, - block: big.NewInt(0), - want: testBalance, - }, - "valid_account": { - account: testAddr, - block: big.NewInt(1), - want: big.NewInt(0).Sub(testBalance, big.NewInt(0).Mul(big.NewInt(2*21000), testGasPrice)), - }, - "non_existent_account": { - account: common.Address{1}, - block: big.NewInt(1), - want: big.NewInt(0), - }, - "future_block": { - account: testAddr, - block: big.NewInt(1000000000), - want: big.NewInt(0), - wantErr: errors.New("header not found"), - }, - } - for name, tt := range tests { - t.Run(name, func(t *testing.T) { - ec := NewClient(client) - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - - got, err := ec.BalanceAt(ctx, tt.account, tt.block) - if tt.wantErr != nil && (err == nil || err.Error() != tt.wantErr.Error()) { - t.Fatalf("BalanceAt(%x, %v) error = %q, want %q", tt.account, tt.block, err, tt.wantErr) - } - if got.Cmp(tt.want) != 0 { - t.Fatalf("BalanceAt(%x, %v) = %v, want %v", tt.account, tt.block, got, tt.want) - } - }) - } -} - -func testTransactionInBlock(t *testing.T, client *rpc.Client) { - ec := NewClient(client) - - // Get current block by number. - block, err := ec.BlockByNumber(context.Background(), nil) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - // Test tx in block not found. - if _, err := ec.TransactionInBlock(context.Background(), block.Hash(), 20); err != ethereum.NotFound { - t.Fatal("error should be ethereum.NotFound") - } - - // Test tx in block found. - tx, err := ec.TransactionInBlock(context.Background(), block.Hash(), 2) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if tx.Hash() != testTx1.Hash() { - t.Fatalf("unexpected transaction: %v", tx) - } - - tx, err = ec.TransactionInBlock(context.Background(), block.Hash(), 3) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if tx.Hash() != testTx2.Hash() { - t.Fatalf("unexpected transaction: %v", tx) - } -} - -func testChainID(t *testing.T, client *rpc.Client) { - ec := NewClient(client) - id, err := ec.ChainID(context.Background()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if id == nil || id.Cmp(params.AllEthashProtocolChanges.ChainID) != 0 { - t.Fatalf("ChainID returned wrong number: %+v", id) - } -} - -func testGetBlock(t *testing.T, client *rpc.Client) { - ec := NewClient(client) - - // Get current block number - blockNumber, err := ec.BlockNumber(context.Background()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if blockNumber != uint64(testBlockNum) { - t.Fatalf("BlockNumber returned wrong number: %d", blockNumber) - } - // Get current block by number - block, err := ec.BlockByNumber(context.Background(), new(big.Int).SetUint64(blockNumber)) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if block.NumberU64() != blockNumber { - t.Fatalf("BlockByNumber returned wrong block: want %d got %d", blockNumber, block.NumberU64()) - } - // Get current block by hash - blockH, err := ec.BlockByHash(context.Background(), block.Hash()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if block.Hash() != blockH.Hash() { - t.Fatalf("BlockByHash returned wrong block: want %v got %v", block.Hash().Hex(), blockH.Hash().Hex()) - } - // Get header by number - header, err := ec.HeaderByNumber(context.Background(), new(big.Int).SetUint64(blockNumber)) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if block.Header().Hash() != header.Hash() { - t.Fatalf("HeaderByNumber returned wrong header: want %v got %v", block.Header().Hash().Hex(), header.Hash().Hex()) - } - // Get header by hash - headerH, err := ec.HeaderByHash(context.Background(), block.Hash()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if block.Header().Hash() != headerH.Hash() { - t.Fatalf("HeaderByHash returned wrong header: want %v got %v", block.Header().Hash().Hex(), headerH.Hash().Hex()) - } -} - -func testStatusFunctions(t *testing.T, client *rpc.Client) { - ec := NewClient(client) - - // Sync progress - progress, err := ec.SyncProgress(context.Background()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if progress != nil { - t.Fatalf("unexpected progress: %v", progress) - } - - // NetworkID - networkID, err := ec.NetworkID(context.Background()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if networkID.Cmp(big.NewInt(1337)) != 0 { - t.Fatalf("unexpected networkID: %v", networkID) - } - - // SuggestGasPrice - gasPrice, err := ec.SuggestGasPrice(context.Background()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if gasPrice.Cmp(testGasPrice) != 0 { - t.Fatalf("unexpected gas price: %v", gasPrice) - } - - // SuggestGasTipCap - gasTipCap, err := ec.SuggestGasTipCap(context.Background()) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if gasTipCap.Cmp(testGasPrice) != 0 { - t.Fatalf("unexpected gas tip cap: %v", gasTipCap) - } - - // FeeHistory - history, err := ec.FeeHistory(context.Background(), 1, big.NewInt(2), []float64{95, 99}) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - want := ðereum.FeeHistory{ - OldestBlock: big.NewInt(2), - Reward: [][]*big.Int{ - { - testGasPrice, - testGasPrice, - }, - }, - BaseFee: []*big.Int{ - big.NewInt(params.InitialBaseFeeForBSC), - big.NewInt(params.InitialBaseFeeForBSC), - }, - GasUsedRatio: []float64{0.008912678667376286}, - } - if !reflect.DeepEqual(history, want) { - t.Fatalf("FeeHistory result doesn't match expected: (got: %v, want: %v)", history, want) - } -} - -func testCallContractAtHash(t *testing.T, client *rpc.Client) { - ec := NewClient(client) - - // EstimateGas - msg := ethereum.CallMsg{ - From: testAddr, - To: &common.Address{}, - Gas: 21000, - Value: big.NewInt(1), - } - gas, err := ec.EstimateGas(context.Background(), msg) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if gas != 21000 { - t.Fatalf("unexpected gas price: %v", gas) - } - block, err := ec.HeaderByNumber(context.Background(), big.NewInt(1)) - if err != nil { - t.Fatalf("BlockByNumber error: %v", err) - } - // CallContract - if _, err := ec.CallContractAtHash(context.Background(), msg, block.Hash()); err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - -func testCallContract(t *testing.T, client *rpc.Client) { - ec := NewClient(client) - - // EstimateGas - msg := ethereum.CallMsg{ - From: testAddr, - To: &common.Address{}, - Gas: 21000, - Value: big.NewInt(1), - } - gas, err := ec.EstimateGas(context.Background(), msg) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if gas != 21000 { - t.Fatalf("unexpected gas price: %v", gas) - } - // CallContract - if _, err := ec.CallContract(context.Background(), msg, big.NewInt(1)); err != nil { - t.Fatalf("unexpected error: %v", err) - } - // PendingCallContract - if _, err := ec.PendingCallContract(context.Background(), msg); err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - -func testSendTransactionConditional(t *testing.T, client *rpc.Client) { - ec := NewClient(client) - - if err := sendTransactionConditional(ec); err != nil { - t.Fatalf("error: %v", err) - } -} - -func sendTransactionConditional(ec *Client) error { - chainID, err := ec.ChainID(context.Background()) - if err != nil { - return err - } - - nonce, err := ec.PendingNonceAt(context.Background(), testAddr) - if err != nil { - return err - } - - signer := types.LatestSignerForChainID(chainID) - - tx, err := types.SignNewTx(testKey, signer, &types.LegacyTx{ - Nonce: nonce, - To: &common.Address{2}, - Value: big.NewInt(1), - Gas: 22000, - GasPrice: big.NewInt(params.InitialBaseFee), - }) - if err != nil { - return err - } - - root := common.HexToHash("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - return ec.SendTransactionConditional(context.Background(), tx, ethapi.TransactionOpts{ - KnownAccounts: map[common.Address]ethapi.AccountStorage{ - testAddr: ethapi.AccountStorage{ - StorageRoot: &root, - }, - }, - }) -} diff --git a/ethclient/gethclient/gethclient_test.go b/ethclient/gethclient/gethclient_test.go deleted file mode 100644 index 158886475e..0000000000 --- a/ethclient/gethclient/gethclient_test.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright 2021 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package gethclient - -import ( - "bytes" - "context" - "encoding/json" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/eth/filters" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" -) - -var ( - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) - testContract = common.HexToAddress("0xbeef") - testEmpty = common.HexToAddress("0xeeee") - testSlot = common.HexToHash("0xdeadbeef") - testValue = crypto.Keccak256Hash(testSlot[:]) - testBalance = big.NewInt(2e15) -) - -func newTestBackend(t *testing.T) (*node.Node, []*types.Block) { - // Generate test chain. - genesis, blocks := generateTestChain() - // Create node - n, err := node.New(&node.Config{}) - if err != nil { - t.Fatalf("can't create new node: %v", err) - } - // Create Ethereum Service - config := ðconfig.Config{Genesis: genesis} - ethservice, err := eth.New(n, config) - if err != nil { - t.Fatalf("can't create new ethereum service: %v", err) - } - filterSystem := filters.NewFilterSystem(ethservice.APIBackend, filters.Config{}) - n.RegisterAPIs([]rpc.API{{ - Namespace: "eth", - Service: filters.NewFilterAPI(filterSystem, false), - }}) - - // Import the test chain. - if err := n.Start(); err != nil { - t.Fatalf("can't start test node: %v", err) - } - if _, err := ethservice.BlockChain().InsertChain(blocks[1:]); err != nil { - t.Fatalf("can't import test blocks: %v", err) - } - return n, blocks -} - -func generateTestChain() (*core.Genesis, []*types.Block) { - genesis := &core.Genesis{ - Config: params.AllEthashProtocolChanges, - Alloc: types.GenesisAlloc{ - testAddr: {Balance: testBalance, Storage: map[common.Hash]common.Hash{testSlot: testValue}}, - testContract: {Nonce: 1, Code: []byte{0x13, 0x37}}, - testEmpty: {Balance: big.NewInt(1)}, - }, - ExtraData: []byte("test genesis"), - Timestamp: 9000, - } - generate := func(i int, g *core.BlockGen) { - g.OffsetTime(5) - g.SetExtra([]byte("test")) - } - _, blocks, _ := core.GenerateChainWithGenesis(genesis, ethash.NewFaker(), 1, generate) - blocks = append([]*types.Block{genesis.ToBlock()}, blocks...) - return genesis, blocks -} - -func TestGethClient(t *testing.T) { - backend, _ := newTestBackend(t) - client := backend.Attach() - defer backend.Close() - defer client.Close() - - tests := []struct { - name string - test func(t *testing.T) - }{ - { - "TestGetProof1", - func(t *testing.T) { testGetProof(t, client, testAddr) }, - }, { - "TestGetProof2", - func(t *testing.T) { testGetProof(t, client, testContract) }, - }, { - "TestGetProofEmpty", - func(t *testing.T) { testGetProof(t, client, testEmpty) }, - }, { - "TestGetProofNonExistent", - func(t *testing.T) { testGetProofNonExistent(t, client) }, - }, { - "TestGetProofCanonicalizeKeys", - func(t *testing.T) { testGetProofCanonicalizeKeys(t, client) }, - }, { - "TestGCStats", - func(t *testing.T) { testGCStats(t, client) }, - }, { - "TestMemStats", - func(t *testing.T) { testMemStats(t, client) }, - }, { - "TestGetNodeInfo", - func(t *testing.T) { testGetNodeInfo(t, client) }, - }, { - "TestSubscribePendingTxHashes", - func(t *testing.T) { testSubscribePendingTransactions(t, client) }, - }, { - "TestSubscribePendingTxs", - func(t *testing.T) { testSubscribeFullPendingTransactions(t, client) }, - }, { - "TestCallContract", - func(t *testing.T) { testCallContract(t, client) }, - }, { - "TestCallContractWithBlockOverrides", - func(t *testing.T) { testCallContractWithBlockOverrides(t, client) }, - }, - // The testaccesslist is a bit time-sensitive: the newTestBackend imports - // one block. The `testAcessList` fails if the miner has not yet created a - // new pending-block after the import event. - // Hence: this test should be last, execute the tests serially. - { - "TestAccessList", - func(t *testing.T) { testAccessList(t, client) }, - }, { - "TestSetHead", - func(t *testing.T) { testSetHead(t, client) }, - }, - } - for _, tt := range tests { - t.Run(tt.name, tt.test) - } -} - -func testAccessList(t *testing.T, client *rpc.Client) { - ec := New(client) - // Test transfer - msg := ethereum.CallMsg{ - From: testAddr, - To: &common.Address{}, - Gas: 21000, - GasPrice: big.NewInt(875000000), - Value: big.NewInt(1), - } - al, gas, vmErr, err := ec.CreateAccessList(context.Background(), msg) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if vmErr != "" { - t.Fatalf("unexpected vm error: %v", vmErr) - } - if gas != 21000 { - t.Fatalf("unexpected gas used: %v", gas) - } - if len(*al) != 0 { - t.Fatalf("unexpected length of accesslist: %v", len(*al)) - } - // Test reverting transaction - msg = ethereum.CallMsg{ - From: testAddr, - To: nil, - Gas: 100000, - GasPrice: big.NewInt(1000000000), - Value: big.NewInt(1), - Data: common.FromHex("0x608060806080608155fd"), - } - al, gas, vmErr, err = ec.CreateAccessList(context.Background(), msg) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if vmErr == "" { - t.Fatalf("wanted vmErr, got none") - } - if gas == 21000 { - t.Fatalf("unexpected gas used: %v", gas) - } - if len(*al) != 1 || al.StorageKeys() != 1 { - t.Fatalf("unexpected length of accesslist: %v", len(*al)) - } - // address changes between calls, so we can't test for it. - if (*al)[0].Address == common.HexToAddress("0x0") { - t.Fatalf("unexpected address: %v", (*al)[0].Address) - } - if (*al)[0].StorageKeys[0] != common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000081") { - t.Fatalf("unexpected storage key: %v", (*al)[0].StorageKeys[0]) - } -} - -func testGetProof(t *testing.T, client *rpc.Client, addr common.Address) { - ec := New(client) - ethcl := ethclient.NewClient(client) - result, err := ec.GetProof(context.Background(), addr, []string{testSlot.String()}, nil) - if err != nil { - t.Fatal(err) - } - if result.Address != addr { - t.Fatalf("unexpected address, have: %v want: %v", result.Address, addr) - } - // test nonce - if nonce, _ := ethcl.NonceAt(context.Background(), addr, nil); result.Nonce != nonce { - t.Fatalf("invalid nonce, want: %v got: %v", nonce, result.Nonce) - } - // test balance - if balance, _ := ethcl.BalanceAt(context.Background(), addr, nil); result.Balance.Cmp(balance) != 0 { - t.Fatalf("invalid balance, want: %v got: %v", balance, result.Balance) - } - // test storage - if len(result.StorageProof) != 1 { - t.Fatalf("invalid storage proof, want 1 proof, got %v proof(s)", len(result.StorageProof)) - } - for _, proof := range result.StorageProof { - if proof.Key != testSlot.String() { - t.Fatalf("invalid storage proof key, want: %q, got: %q", testSlot.String(), proof.Key) - } - slotValue, _ := ethcl.StorageAt(context.Background(), addr, common.HexToHash(proof.Key), nil) - if have, want := common.BigToHash(proof.Value), common.BytesToHash(slotValue); have != want { - t.Fatalf("addr %x, invalid storage proof value: have: %v, want: %v", addr, have, want) - } - } - // test code - code, _ := ethcl.CodeAt(context.Background(), addr, nil) - if have, want := result.CodeHash, crypto.Keccak256Hash(code); have != want { - t.Fatalf("codehash wrong, have %v want %v ", have, want) - } -} - -func testGetProofCanonicalizeKeys(t *testing.T, client *rpc.Client) { - ec := New(client) - - // Tests with non-canon input for storage keys. - // Here we check that the storage key is canonicalized. - result, err := ec.GetProof(context.Background(), testAddr, []string{"0x0dEadbeef"}, nil) - if err != nil { - t.Fatal(err) - } - if result.StorageProof[0].Key != "0xdeadbeef" { - t.Fatalf("wrong storage key encoding in proof: %q", result.StorageProof[0].Key) - } - if result, err = ec.GetProof(context.Background(), testAddr, []string{"0x000deadbeef"}, nil); err != nil { - t.Fatal(err) - } - if result.StorageProof[0].Key != "0xdeadbeef" { - t.Fatalf("wrong storage key encoding in proof: %q", result.StorageProof[0].Key) - } - - // If the requested storage key is 32 bytes long, it will be returned as is. - hashSizedKey := "0x00000000000000000000000000000000000000000000000000000000deadbeef" - result, err = ec.GetProof(context.Background(), testAddr, []string{hashSizedKey}, nil) - if err != nil { - t.Fatal(err) - } - if result.StorageProof[0].Key != hashSizedKey { - t.Fatalf("wrong storage key encoding in proof: %q", result.StorageProof[0].Key) - } -} - -func testGetProofNonExistent(t *testing.T, client *rpc.Client) { - addr := common.HexToAddress("0x0001") - ec := New(client) - result, err := ec.GetProof(context.Background(), addr, nil, nil) - if err != nil { - t.Fatal(err) - } - if result.Address != addr { - t.Fatalf("unexpected address, have: %v want: %v", result.Address, addr) - } - // test nonce - if result.Nonce != 0 { - t.Fatalf("invalid nonce, want: %v got: %v", 0, result.Nonce) - } - // test balance - if result.Balance.Cmp(big.NewInt(0)) != 0 { - t.Fatalf("invalid balance, want: %v got: %v", 0, result.Balance) - } - // test storage - if have := len(result.StorageProof); have != 0 { - t.Fatalf("invalid storage proof, want 0 proof, got %v proof(s)", have) - } - // test codeHash - if have, want := result.CodeHash, (common.Hash{}); have != want { - t.Fatalf("codehash wrong, have %v want %v ", have, want) - } - // test codeHash - if have, want := result.StorageHash, (common.Hash{}); have != want { - t.Fatalf("storagehash wrong, have %v want %v ", have, want) - } -} - -func testGCStats(t *testing.T, client *rpc.Client) { - ec := New(client) - _, err := ec.GCStats(context.Background()) - if err != nil { - t.Fatal(err) - } -} - -func testMemStats(t *testing.T, client *rpc.Client) { - ec := New(client) - stats, err := ec.MemStats(context.Background()) - if err != nil { - t.Fatal(err) - } - if stats.Alloc == 0 { - t.Fatal("Invalid mem stats retrieved") - } -} - -func testGetNodeInfo(t *testing.T, client *rpc.Client) { - ec := New(client) - info, err := ec.GetNodeInfo(context.Background()) - if err != nil { - t.Fatal(err) - } - - if info.Name == "" { - t.Fatal("Invalid node info retrieved") - } -} - -func testSetHead(t *testing.T, client *rpc.Client) { - ec := New(client) - err := ec.SetHead(context.Background(), big.NewInt(0)) - if err != nil { - t.Fatal(err) - } -} - -func testSubscribePendingTransactions(t *testing.T, client *rpc.Client) { - ec := New(client) - ethcl := ethclient.NewClient(client) - // Subscribe to Transactions - ch := make(chan common.Hash) - ec.SubscribePendingTransactions(context.Background(), ch) - // Send a transaction - chainID, err := ethcl.ChainID(context.Background()) - if err != nil { - t.Fatal(err) - } - // Create transaction - tx := types.NewTransaction(0, common.Address{1}, big.NewInt(1), 22000, big.NewInt(1), nil) - signer := types.LatestSignerForChainID(chainID) - signature, err := crypto.Sign(signer.Hash(tx).Bytes(), testKey) - if err != nil { - t.Fatal(err) - } - signedTx, err := tx.WithSignature(signer, signature) - if err != nil { - t.Fatal(err) - } - // Send transaction - err = ethcl.SendTransaction(context.Background(), signedTx) - if err != nil { - t.Fatal(err) - } - // Check that the transaction was sent over the channel - hash := <-ch - if hash != signedTx.Hash() { - t.Fatalf("Invalid tx hash received, got %v, want %v", hash, signedTx.Hash()) - } -} - -func testSubscribeFullPendingTransactions(t *testing.T, client *rpc.Client) { - ec := New(client) - ethcl := ethclient.NewClient(client) - // Subscribe to Transactions - ch := make(chan *types.Transaction) - ec.SubscribeFullPendingTransactions(context.Background(), ch) - // Send a transaction - chainID, err := ethcl.ChainID(context.Background()) - if err != nil { - t.Fatal(err) - } - // Create transaction - tx := types.NewTransaction(1, common.Address{1}, big.NewInt(1), 22000, big.NewInt(1), nil) - signer := types.LatestSignerForChainID(chainID) - signature, err := crypto.Sign(signer.Hash(tx).Bytes(), testKey) - if err != nil { - t.Fatal(err) - } - signedTx, err := tx.WithSignature(signer, signature) - if err != nil { - t.Fatal(err) - } - // Send transaction - err = ethcl.SendTransaction(context.Background(), signedTx) - if err != nil { - t.Fatal(err) - } - // Check that the transaction was sent over the channel - tx = <-ch - if tx.Hash() != signedTx.Hash() { - t.Fatalf("Invalid tx hash received, got %v, want %v", tx.Hash(), signedTx.Hash()) - } -} - -func testCallContract(t *testing.T, client *rpc.Client) { - ec := New(client) - msg := ethereum.CallMsg{ - From: testAddr, - To: &common.Address{}, - Gas: 21000, - GasPrice: big.NewInt(1000000000), - Value: big.NewInt(1), - } - // CallContract without override - if _, err := ec.CallContract(context.Background(), msg, big.NewInt(0), nil); err != nil { - t.Fatalf("unexpected error: %v", err) - } - // CallContract with override - override := OverrideAccount{ - Nonce: 1, - } - mapAcc := make(map[common.Address]OverrideAccount) - mapAcc[testAddr] = override - if _, err := ec.CallContract(context.Background(), msg, big.NewInt(0), &mapAcc); err != nil { - t.Fatalf("unexpected error: %v", err) - } -} - -func TestOverrideAccountMarshal(t *testing.T) { - om := map[common.Address]OverrideAccount{ - {0x11}: { - // Zero-valued nonce is not overridden, but simply dropped by the encoder. - Nonce: 0, - }, - {0xaa}: { - Nonce: 5, - }, - {0xbb}: { - Code: []byte{1}, - }, - {0xcc}: { - // 'code', 'balance', 'state' should be set when input is - // a non-nil but empty value. - Code: []byte{}, - Balance: big.NewInt(0), - State: map[common.Hash]common.Hash{}, - // For 'stateDiff' the behavior is different, empty map - // is ignored because it makes no difference. - StateDiff: map[common.Hash]common.Hash{}, - }, - } - - marshalled, err := json.MarshalIndent(&om, "", " ") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - expected := `{ - "0x1100000000000000000000000000000000000000": {}, - "0xaa00000000000000000000000000000000000000": { - "nonce": "0x5" - }, - "0xbb00000000000000000000000000000000000000": { - "code": "0x01" - }, - "0xcc00000000000000000000000000000000000000": { - "code": "0x", - "balance": "0x0", - "state": {} - } -}` - - if string(marshalled) != expected { - t.Error("wrong output:", string(marshalled)) - t.Error("want:", expected) - } -} - -func TestBlockOverridesMarshal(t *testing.T) { - for i, tt := range []struct { - bo BlockOverrides - want string - }{ - { - bo: BlockOverrides{}, - want: `{}`, - }, - { - bo: BlockOverrides{ - Coinbase: common.HexToAddress("0x1111111111111111111111111111111111111111"), - }, - want: `{"coinbase":"0x1111111111111111111111111111111111111111"}`, - }, - { - bo: BlockOverrides{ - Number: big.NewInt(1), - Difficulty: big.NewInt(2), - Time: 3, - GasLimit: 4, - BaseFee: big.NewInt(5), - }, - want: `{"number":"0x1","difficulty":"0x2","time":"0x3","gasLimit":"0x4","baseFee":"0x5"}`, - }, - } { - marshalled, err := json.Marshal(&tt.bo) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if string(marshalled) != tt.want { - t.Errorf("Testcase #%d failed. expected\n%s\ngot\n%s", i, tt.want, string(marshalled)) - } - } -} - -func testCallContractWithBlockOverrides(t *testing.T, client *rpc.Client) { - ec := New(client) - msg := ethereum.CallMsg{ - From: testAddr, - To: &common.Address{}, - Gas: 50000, - GasPrice: big.NewInt(1000000000), - Value: big.NewInt(1), - } - override := OverrideAccount{ - // Returns coinbase address. - Code: common.FromHex("0x41806000526014600cf3"), - } - mapAcc := make(map[common.Address]OverrideAccount) - mapAcc[common.Address{}] = override - res, err := ec.CallContract(context.Background(), msg, big.NewInt(0), &mapAcc) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !bytes.Equal(res, common.FromHex("0x0000000000000000000000000000000000000000")) { - t.Fatalf("unexpected result: %x", res) - } - - // Now test with block overrides - bo := BlockOverrides{ - Coinbase: common.HexToAddress("0x1111111111111111111111111111111111111111"), - } - res, err = ec.CallContractWithBlockOverrides(context.Background(), msg, big.NewInt(0), &mapAcc, bo) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - if !bytes.Equal(res, common.FromHex("0x1111111111111111111111111111111111111111")) { - t.Fatalf("unexpected result: %x", res) - } -} diff --git a/ethclient/simulated/backend_test.go b/ethclient/simulated/backend_test.go deleted file mode 100644 index a8fd7913c3..0000000000 --- a/ethclient/simulated/backend_test.go +++ /dev/null @@ -1,308 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package simulated - -import ( - "context" - "crypto/ecdsa" - "math/big" - "math/rand" - "testing" - "time" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" -) - -var _ bind.ContractBackend = (Client)(nil) - -var ( - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) -) - -func simTestBackend(testAddr common.Address) *Backend { - return NewBackend( - types.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000000000)}, - }, - ) -} - -func newTx(sim *Backend, key *ecdsa.PrivateKey) (*types.Transaction, error) { - client := sim.Client() - - // create a signed transaction to send - head, _ := client.HeaderByNumber(context.Background(), nil) // Should be child's, good enough - gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(params.GWei)) - addr := crypto.PubkeyToAddress(key.PublicKey) - chainid, _ := client.ChainID(context.Background()) - nonce, err := client.PendingNonceAt(context.Background(), addr) - if err != nil { - return nil, err - } - tx := types.NewTx(&types.DynamicFeeTx{ - ChainID: chainid, - Nonce: nonce, - GasTipCap: big.NewInt(params.GWei), - GasFeeCap: gasPrice, - Gas: 21000, - To: &addr, - }) - return types.SignTx(tx, types.LatestSignerForChainID(chainid), key) -} - -func TestNewBackend(t *testing.T) { - sim := NewBackend(types.GenesisAlloc{}) - defer sim.Close() - - client := sim.Client() - num, err := client.BlockNumber(context.Background()) - if err != nil { - t.Fatal(err) - } - if num != 0 { - t.Fatalf("expected 0 got %v", num) - } - // Create a block - sim.Commit() - num, err = client.BlockNumber(context.Background()) - if err != nil { - t.Fatal(err) - } - if num != 1 { - t.Fatalf("expected 1 got %v", num) - } -} - -func TestAdjustTime(t *testing.T) { - sim := NewBackend(types.GenesisAlloc{}) - defer sim.Close() - - client := sim.Client() - block1, _ := client.BlockByNumber(context.Background(), nil) - - // Create a block - if err := sim.AdjustTime(time.Minute); err != nil { - t.Fatal(err) - } - block2, _ := client.BlockByNumber(context.Background(), nil) - prevTime := block1.Time() - newTime := block2.Time() - if newTime-prevTime != uint64(time.Minute) { - t.Errorf("adjusted time not equal to 60 seconds. prev: %v, new: %v", prevTime, newTime) - } -} - -func TestSendTransaction(t *testing.T) { - sim := simTestBackend(testAddr) - defer sim.Close() - - client := sim.Client() - ctx := context.Background() - - signedTx, err := newTx(sim, testKey) - if err != nil { - t.Errorf("could not create transaction: %v", err) - } - // send tx to simulated backend - err = client.SendTransaction(ctx, signedTx) - if err != nil { - t.Errorf("could not add tx to pending block: %v", err) - } - sim.Commit() - block, err := client.BlockByNumber(ctx, big.NewInt(1)) - if err != nil { - t.Errorf("could not get block at height 1: %v", err) - } - - if signedTx.Hash() != block.Transactions()[0].Hash() { - t.Errorf("did not commit sent transaction. expected hash %v got hash %v", block.Transactions()[0].Hash(), signedTx.Hash()) - } -} - -// TestFork check that the chain length after a reorg is correct. -// Steps: -// 1. Save the current block which will serve as parent for the fork. -// 2. Mine n blocks with n ∈ [0, 20]. -// 3. Assert that the chain length is n. -// 4. Fork by using the parent block as ancestor. -// 5. Mine n+1 blocks which should trigger a reorg. -// 6. Assert that the chain length is n+1. -// Since Commit() was called 2n+1 times in total, -// having a chain length of just n+1 means that a reorg occurred. -func TestFork(t *testing.T) { - t.Parallel() - testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := simTestBackend(testAddr) - defer sim.Close() - - client := sim.Client() - ctx := context.Background() - - // 1. - parent, _ := client.HeaderByNumber(ctx, nil) - - // 2. - n := int(rand.Int31n(21)) - for i := 0; i < n; i++ { - sim.Commit() - } - - // 3. - b, _ := client.BlockNumber(ctx) - if b != uint64(n) { - t.Error("wrong chain length") - } - - // 4. - sim.Fork(parent.Hash()) - - // 5. - for i := 0; i < n+1; i++ { - sim.Commit() - } - - // 6. - b, _ = client.BlockNumber(ctx) - if b != uint64(n+1) { - t.Error("wrong chain length") - } -} - -// TestForkResendTx checks that re-sending a TX after a fork -// is possible and does not cause a "nonce mismatch" panic. -// Steps: -// 1. Save the current block which will serve as parent for the fork. -// 2. Send a transaction. -// 3. Check that the TX is included in block 1. -// 4. Fork by using the parent block as ancestor. -// 5. Mine a block, Re-send the transaction and mine another one. -// 6. Check that the TX is now included in block 2. -func TestForkResendTx(t *testing.T) { - t.Parallel() - testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := simTestBackend(testAddr) - defer sim.Close() - - client := sim.Client() - ctx := context.Background() - - // 1. - parent, _ := client.HeaderByNumber(ctx, nil) - - // 2. - tx, err := newTx(sim, testKey) - if err != nil { - t.Fatalf("could not create transaction: %v", err) - } - client.SendTransaction(ctx, tx) - sim.Commit() - - // 3. - receipt, _ := client.TransactionReceipt(ctx, tx.Hash()) - if h := receipt.BlockNumber.Uint64(); h != 1 { - t.Errorf("TX included in wrong block: %d", h) - } - - // 4. - if err := sim.Fork(parent.Hash()); err != nil { - t.Errorf("forking: %v", err) - } - - // 5. - sim.Commit() - if err := client.SendTransaction(ctx, tx); err != nil { - t.Fatalf("sending transaction: %v", err) - } - sim.Commit() - receipt, _ = client.TransactionReceipt(ctx, tx.Hash()) - if h := receipt.BlockNumber.Uint64(); h != 2 { - t.Errorf("TX included in wrong block: %d", h) - } -} - -func TestCommitReturnValue(t *testing.T) { - t.Parallel() - testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := simTestBackend(testAddr) - defer sim.Close() - - client := sim.Client() - ctx := context.Background() - - // Test if Commit returns the correct block hash - h1 := sim.Commit() - cur, _ := client.HeaderByNumber(ctx, nil) - if h1 != cur.Hash() { - t.Error("Commit did not return the hash of the last block.") - } - - // Create a block in the original chain (containing a transaction to force different block hashes) - head, _ := client.HeaderByNumber(ctx, nil) // Should be child's, good enough - gasPrice := new(big.Int).Add(head.BaseFee, big.NewInt(1)) - _tx := types.NewTransaction(0, testAddr, big.NewInt(1000), params.TxGas, gasPrice, nil) - tx, _ := types.SignTx(_tx, types.HomesteadSigner{}, testKey) - client.SendTransaction(ctx, tx) - - h2 := sim.Commit() - - // Create another block in the original chain - sim.Commit() - - // Fork at the first bock - if err := sim.Fork(h1); err != nil { - t.Errorf("forking: %v", err) - } - - // Test if Commit returns the correct block hash after the reorg - h2fork := sim.Commit() - if h2 == h2fork { - t.Error("The block in the fork and the original block are the same block!") - } - if header, err := client.HeaderByHash(ctx, h2fork); err != nil || header == nil { - t.Error("Could not retrieve the just created block (side-chain)") - } -} - -// TestAdjustTimeAfterFork ensures that after a fork, AdjustTime uses the pending fork -// block's parent rather than the canonical head's parent. -func TestAdjustTimeAfterFork(t *testing.T) { - t.Parallel() - testAddr := crypto.PubkeyToAddress(testKey.PublicKey) - sim := simTestBackend(testAddr) - defer sim.Close() - - client := sim.Client() - ctx := context.Background() - - sim.Commit() // h1 - h1, _ := client.HeaderByNumber(ctx, nil) - - sim.Commit() // h2 - sim.Fork(h1.Hash()) - sim.AdjustTime(1 * time.Second) - sim.Commit() - - head, _ := client.HeaderByNumber(ctx, nil) - if head.Number.Uint64() == 2 && head.ParentHash != h1.Hash() { - t.Errorf("failed to build block on fork") - } -} diff --git a/ethclient/simulated/options_test.go b/ethclient/simulated/options_test.go deleted file mode 100644 index 9ff2be5ff9..0000000000 --- a/ethclient/simulated/options_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2024 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package simulated - -import ( - "context" - "math/big" - "strings" - "testing" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" -) - -// Tests that the simulator starts with the initial gas limit in the genesis block, -// and that it keeps the same target value. -func TestWithBlockGasLimitOption(t *testing.T) { - // Construct a simulator, targeting a different gas limit - sim := NewBackend(types.GenesisAlloc{}, WithBlockGasLimit(12_345_678)) - defer sim.Close() - - client := sim.Client() - genesis, err := client.BlockByNumber(context.Background(), big.NewInt(0)) - if err != nil { - t.Fatalf("failed to retrieve genesis block: %v", err) - } - if genesis.GasLimit() != 12_345_678 { - t.Errorf("genesis gas limit mismatch: have %v, want %v", genesis.GasLimit(), 12_345_678) - } - // Produce a number of blocks and verify the locked in gas target - sim.Commit() - head, err := client.BlockByNumber(context.Background(), big.NewInt(1)) - if err != nil { - t.Fatalf("failed to retrieve head block: %v", err) - } - if head.GasLimit() != 12_345_678 { - t.Errorf("head gas limit mismatch: have %v, want %v", head.GasLimit(), 12_345_678) - } -} - -// Tests that the simulator honors the RPC call caps set by the options. -func TestWithCallGasLimitOption(t *testing.T) { - // Construct a simulator, targeting a different gas limit - sim := NewBackend(types.GenesisAlloc{ - testAddr: {Balance: big.NewInt(10000000000000000)}, - }, WithCallGasLimit(params.TxGas-1)) - defer sim.Close() - - client := sim.Client() - _, err := client.CallContract(context.Background(), ethereum.CallMsg{ - From: testAddr, - To: &testAddr, - Gas: 21000, - }, nil) - if !strings.Contains(err.Error(), core.ErrIntrinsicGas.Error()) { - t.Fatalf("error mismatch: have %v, want %v", err, core.ErrIntrinsicGas) - } -} diff --git a/go.mod b/go.mod index b59bbab5a6..018e7d0375 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.13.43 github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2 github.com/bnb-chain/fastssz v0.1.2 + github.com/bnb-chain/greenfield-bundle-sdk v1.1.0 github.com/bnb-chain/ics23 v0.1.0 github.com/btcsuite/btcd/btcec/v2 v2.3.2 github.com/cespare/cp v1.1.1 @@ -38,7 +39,6 @@ require ( github.com/golang/protobuf v1.5.4 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/gofuzz v1.2.0 - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 github.com/google/uuid v1.4.0 github.com/gorilla/websocket v1.5.1 github.com/graph-gophers/graphql-go v1.3.0 @@ -154,6 +154,7 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gopacket v1.1.19 // indirect + github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect diff --git a/go.sum b/go.sum index 0d40c46281..e3581fa194 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= cloud.google.com/go v0.16.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -25,26 +26,148 @@ cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPT cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= @@ -54,8 +177,11 @@ dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBr dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/Abirdcfly/dupword v0.0.7/go.mod h1:K/4M1kj+Zh39d2aotRwypvasonOyAMH1c/IZJzE0dmk= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU= +github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= @@ -69,6 +195,7 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0/ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= @@ -96,19 +223,33 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= +github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ= github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/MariusVanDerWijden/FuzzyVM v0.0.0-20240209103030-ec53fa766bf8/go.mod h1:L1QpLBqXlboJMOC2hndG95d1eiElzKsBhjzcuy8pxeM= +github.com/MariusVanDerWijden/tx-fuzz v1.3.3-0.20240227085032-f70dd7c85c97/go.mod h1:xcjGtET6+7KeDHcwLQp3sIfyFALtoTjzZgY8Y+RUozM= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.1.1/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -119,18 +260,26 @@ github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjC github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= @@ -145,11 +294,15 @@ github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.2.0/go.mod h1:zEQs02YRBw1DjK0PoJv3ygDYOFTre1ejlJWl8FwAuQo= github.com/aws/aws-sdk-go-v2 v1.21.2 h1:+LXZ0sgo8quN9UOKXXzAWRT3FWd4NxeXWOZom9pE7GA= @@ -169,6 +322,7 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37 h1:JRVhO25+r3ar2mKGP7 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.2/go.mod h1:45MfaXZ0cNbeuT0KQ1XJylq8A6+OpVV2E5kvY/Kq+u8= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37 h1:WWZA/I2K4ptBS1kg0kV1JbBtG/umed0vwHRrmcr9z7k= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck= @@ -186,6 +340,7 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsP github.com/aws/smithy-go v1.1.0/go.mod h1:EzMw8dbp/YJL4A5/sbhGddag+NPT7q084agLbB9LgIw= github.com/aws/smithy-go v1.15.0 h1:PS/durmlzvAFpQHDs4wi4sNNP9ExsqZh6IlfdHXgKK8= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/bazelbuild/rules_go v0.23.2 h1:Wxu7JjqnF78cKZbsBsARLSXx/jlGaSLCnUV3mTlyHvM= github.com/bazelbuild/rules_go v0.23.2/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= @@ -203,9 +358,13 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bits-and-blooms/bitset v1.11.0 h1:RMyy2mBBShArUAhfVRZJ2xyBO58KCBCtZFShw3umo6k= github.com/bits-and-blooms/bitset v1.11.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/bnb-chain/fastssz v0.1.2 h1:vTcXw5SwCtRYnl/BEclujiml7GXiVOZ74tub4GHpvlM= github.com/bnb-chain/fastssz v0.1.2/go.mod h1:KcabV+OEw2QwgyY8Fc88ZG79CKYkFdu0kKWyfA3dI6o= +github.com/bnb-chain/greenfield-bundle-sdk v1.1.0 h1:0BWQsV+c32wHxEEpJY9igBSBg5N1Fm3KoSLC+Yef2n0= +github.com/bnb-chain/greenfield-bundle-sdk v1.1.0/go.mod h1:NCjQp0sniAbBR5yR5pYiXpYwYd1okSIBLj+31sTpmXA= github.com/bnb-chain/greenfield-tendermint v0.0.0-20230417032003-4cda1f296fb2 h1:jubavYCs/mCFj/g6Utl+l4SfpykdBdWJFPsvb9FcEXU= github.com/bnb-chain/greenfield-tendermint v0.0.0-20230417032003-4cda1f296fb2/go.mod h1:9q11eHNRY9FDwFH+4pompzPNGv//Z3VcfvkELaHJPMs= github.com/bnb-chain/ics23 v0.1.0 h1:DvjGOts2FBfbxB48384CYD1LbcrfjThFz8kowY/7KxU= @@ -213,8 +372,11 @@ github.com/bnb-chain/ics23 v0.1.0/go.mod h1:cU6lTGolbbLFsGCgceNB2AzplH1xecLp6+KX github.com/bnb-chain/tendermint v0.31.16 h1:rOO6WG61JDAuRCCL8TKnGhorJftQDVygq0mqR7A0ck4= github.com/bnb-chain/tendermint v0.31.16/go.mod h1:cmt8HHmQUSVaWQ/hoTefRxsh5X3ERaM1zCUIR0DPbFU= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bradfitz/gomemcache v0.0.0-20170208213004-1952afaa557d/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60= +github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc70T2A= +github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU= github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= @@ -228,15 +390,22 @@ github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtyd github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/bufbuild/buf v0.37.0/go.mod h1:lQ1m2HkIaGOFba6w/aC3KYBHhKEOESP3gaAEpS3dAFM= +github.com/bufbuild/buf v1.7.0/go.mod h1:Go40fMAF46PnPLC7jJgTQhAI95pmC0+VtxFKVC0qLq0= +github.com/bufbuild/connect-go v1.0.0/go.mod h1:9iNvh/NOsfhNBUH5CtvXeVUskQO1xsrEviH7ZArwZ3I= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/cp v1.1.1 h1:nCb6ZLdB7NRaqsm91JtQTAme2SKJzXVsdPIPkyJr1MU= github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= @@ -245,7 +414,12 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/chavacava/garif v0.0.0-20220630083739-93517212f375/go.mod h1:4m1Rv7xfuwWPNKXlThldNuJvutYM6J95wNuuVmn55To= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= @@ -259,15 +433,20 @@ github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMn github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cloudflare/cloudflare-go v0.79.0 h1:ErwCYDjFCYppDJlDJ/5WhsSmzegAUe2+K9qgFyQDg3M= github.com/cloudflare/cloudflare-go v0.79.0/go.mod h1:gkHQf9xEubaQPEuerBuoinR9P8bf8a05Lq0X6WKy1Oc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= @@ -282,6 +461,7 @@ github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAKVxetITBuuhv3BI9cMrmStnpT18zmgmTxunpo= github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= @@ -293,6 +473,9 @@ github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5U github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -325,9 +508,11 @@ github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6V github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= +github.com/daixiang0/gci v0.8.1/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -349,6 +534,7 @@ github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0 github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= @@ -367,7 +553,10 @@ github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.19+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -388,12 +577,14 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/dot v0.11.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -402,28 +593,38 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/ferranbt/fastssz v0.0.0-20210120143747-11b9eff30ea9/go.mod h1:DyEu2iuLBnb/T51BlsiO3yLYdJC6UbGMrIkqK1KmQxM= github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 h1:6dVcS0LktRSyEEgldFY4N9J17WjUoiJStttH+RZj0Wo= github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4= +github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e h1:bBLctRc7kr01YGvaDfgLbTwjFNW5jdp5y5rj8XXBHfY= github.com/fjl/gencodec v0.0.0-20230517082657-f9840df7b83e/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= github.com/fjl/memsize v0.0.2/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= @@ -442,6 +643,7 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgxrzK5E1fW7RQGeDwE8F/ZZnUYc= github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/garyburd/redigo v1.1.1-0.20170914051019-70e1b1943d4f/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= @@ -455,15 +657,23 @@ github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/getsentry/sentry-go v0.25.0 h1:q6Eo+hS+yoJlTO3uu/azhQadsD8V+jQn2D8VvX1eOyI= github.com/getsentry/sentry-go v0.25.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-critic/go-critic v0.6.5/go.mod h1:ezfP/Lh7MA6dBNn4c6ab5ALv3sKnZVLx37tr00uuaOY= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.4.0/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg= +github.com/go-git/go-git/v5 v5.5.2/go.mod h1:BE5hUJ5yaV2YMxhmaP4l6RBQ08kMxKSPD4BlxtH7OjI= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -486,6 +696,8 @@ github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -517,11 +729,25 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.2/go.mod h1:4TcEdbElGc9twQEYpVo/aieIXfHhiuLh4aLAck6dO7Y= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -533,13 +759,16 @@ github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= @@ -594,7 +823,17 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= +github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs= +github.com/golangci/golangci-lint v1.50.1/go.mod h1:AQjHBopYS//oB8xs0y0M/dtxdKHkdhl0RvmjUct0/4w= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= @@ -668,7 +907,9 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= @@ -677,6 +918,11 @@ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY41SORZyNJ0= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= @@ -697,13 +943,16 @@ github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/guptarohit/asciigraph v0.5.5/go.mod h1:dYl5wwK4gNsnFf9Zp+l06rFiDZ5YtXM6x7SRWZ3KGag= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -712,23 +961,28 @@ github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/S github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v0.0.0-20170914154624-68e816d1c783/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= @@ -737,30 +991,38 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/herumi/bls-eth-go-binary v0.0.0-20210130185500-57372fb27371/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e h1:wCMygKUQhmcQAjlk2Gquzq6dLmyMv2kF+llRspoRgrk= github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= +github.com/holiman/goevmlab v0.0.0-20231201084119-c73b3c97929c/go.mod h1:K6KFgcQq1U9ksldcRyLYcwtj4nUTPn4rEaZtX4Gjofc= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/holiman/uint256 v1.2.4 h1:jUc4Nk8fm9jZabQuqr2JzednajVmBpC+oiTiXZJEApU= github.com/holiman/uint256 v1.2.4/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/hydrogen18/memlistener v1.0.0/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= github.com/ianlancetaylor/cgosymbolizer v0.0.0-20200424224625-be1b05b0b279/go.mod h1:a5aratAVTWyz+nJMmDsN8O4XTfaLfdAsB1ysCmZX5Bw= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/log15 v0.0.0-20170622235902-74a0988b5f80/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= github.com/influxdata/influxdb-client-go/v2 v2.4.0 h1:HGBfZYStlx3Kqvsv1h2pJixbCl/jhnFtxpKFAv9Tu5k= @@ -777,6 +1039,7 @@ github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19y github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/informalsystems/tm-load-test v1.3.0/go.mod h1:OQ5AQ9TbT5hKWBNIwsMjn6Bf4O0U4b1kRc+0qZlQJKw= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -788,6 +1051,7 @@ github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LK github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= @@ -809,10 +1073,12 @@ github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72g github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= +github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= @@ -820,12 +1086,19 @@ github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsj github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw= +github.com/jedib0t/go-pretty/v6 v6.5.4/go.mod h1:5LQIxa52oJ/DlDSLv0HEkWOFMDGoWkJb9ss5KqPpJBg= github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q= github.com/jhump/protoreflect v1.8.1/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= +github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= @@ -835,6 +1108,7 @@ github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/joonix/log v0.0.0-20200409080653-9c1d2ceb5f1d/go.mod h1:fS54ONkjDV71zS9CDx3V9K21gJg7byKSvI4ajuWFNJw= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -854,6 +1128,7 @@ github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 h1:qGQQKEcAR99REcMpsXCp3lJ03zYT1PkRd3kQGPn9GVg= @@ -862,13 +1137,22 @@ github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2vi github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c h1:AqsttAyEyIEsNz5WLRwuRwjiT5CMDUfLk6cFJDVPebs= github.com/karalabe/usb v0.0.3-0.20230711191512-61db3e06439c/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I= +github.com/kataras/golog v0.1.8/go.mod h1:rGPAin4hYROfk1qT9wZP6VY2rsb4zzc37QpdPjdkqVw= +github.com/kataras/iris/v12 v12.2.0/go.mod h1:BLzBpEunc41GbE68OUaQlqX4jzi791mx5HU04uPb90Y= +github.com/kataras/pio v0.0.11/go.mod h1:38hH6SWH6m4DKSYmRhlrCJ5WItwWgCVrTNU62XZyUvI= +github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4= +github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinms/leakybucket-go v0.0.0-20200115003610-082473db97ca/go.mod h1:ph+C5vpnCcQvKBwJwKLTK3JLNGnBXYlG7m7JjoC/zYA= github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -907,14 +1191,22 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= +github.com/labstack/echo/v4 v4.10.0/go.mod h1:S/T/5fy/GigaXnHTkh0ZGe4LpkkQysvRjFMSUTkDRNQ= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -1049,12 +1341,14 @@ github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDY github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= +github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= @@ -1062,19 +1356,25 @@ github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0Q github.com/magiconair/properties v1.7.4-0.20170902060319-8d7837e64d3c/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.7.0 h1:3l11YT8tm9MnwGFQ4kETwkzpAwY2Jt9lCrumCUW4+z4= github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.0.10-0.20170816031813-ad5389df28cd/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -1110,8 +1410,12 @@ github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4f github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/revive v1.2.4/go.mod h1:iAWlQishqCuj4yhV24FTnKSXGpbAA+0SckXB8GQMX/Q= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.23/go.mod h1:mN70sk7UkkF8TUr2IGBpNN0jAgStuPzlK76QuruE/z4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -1158,6 +1462,8 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/moby/buildkit v0.10.4/go.mod h1:Yajz9vt1Zw5q9Pp4pdb3TCSUXJBIroIQGQ3TTs/sLug= +github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1167,6 +1473,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -1232,20 +1540,28 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nishanths/exhaustive v0.8.3/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -1281,6 +1597,9 @@ github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8P github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -1296,6 +1615,8 @@ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/panjf2000/ants/v2 v2.4.5 h1:kcGvjXB7ea0MrzzszpnlVFthhYKoFxLi75nRbsq01HY= github.com/panjf2000/ants/v2 v2.4.5/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= @@ -1309,7 +1630,10 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.0.1-0.20170904195809-1d6b12b7cb29/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= @@ -1317,12 +1641,30 @@ github.com/peterh/liner v1.2.0 h1:w/UPXyl5GfahFxcTOz2j9wCIHNI+pUPr2laqpojKNCg= github.com/peterh/liner v1.2.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pion/datachannel v1.5.5/go.mod h1:iMz+lECmfdCMqFRhXhcA/219B0SQlbpoR2V118yimL0= +github.com/pion/dtls/v2 v2.2.8/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= +github.com/pion/ice/v2 v2.3.11/go.mod h1:hPcLC3kxMa+JGRzMHqQzjoSj3xtE9F+eoncmXLlCL4E= +github.com/pion/interceptor v0.1.25/go.mod h1:wkbPYAak5zKsfpVDYMtEfWEy8D4zL+rpxCxPImLOg3Y= +github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= +github.com/pion/mdns v0.0.9/go.mod h1:2JA5exfxwzXiCihmxpTKgFUpiQws2MnipoPK09vecIc= +github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= +github.com/pion/rtcp v1.2.13/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= +github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.9/go.mod h1:cMLT45jqw3+jiJCrtHVwfQLnfR0MGZ4rgOJwUOIqLkI= +github.com/pion/sdp/v3 v3.0.6/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw= +github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= +github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/turn/v2 v2.1.4/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= +github.com/pion/webrtc/v3 v3.2.23/go.mod h1:1CaT2fcZzZ6VZA+O1i9yK2DU4EOcXVvSbWG9pr5jefs= +github.com/pjbgf/sha1cd v0.2.3/go.mod h1:HOK9QrgzdHpbc2Kzip0Q1yi3M2MFGPADtR6HjG65m5M= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -1332,10 +1674,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3/go.mod h1:q5NXNGzqj5uPnVuhGkZfmgHqNUhf15VLi6L9kW0VEc0= +github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4/go.mod h1:RdR1j20Aj5pB6+fw6Y9Ur7lMHpegTEjY1vc19hEZL40= +github.com/pointlander/peg v1.0.1/go.mod h1:5hsGDQR2oZI4QoWz0/Kdg3VSVEC31iJw/b7WjqCBGRI= +github.com/polyfloyd/go-errorlint v1.0.5/go.mod h1:APVvOesVSAnne5SClsPxPdfvZTVDojXh1/G3qb5wjGI= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= @@ -1415,8 +1762,13 @@ github.com/prysmaticlabs/prysm v0.0.0-20220124113610-e26cde5e091b h1:XULhE6PdzCY github.com/prysmaticlabs/prysm v0.0.0-20220124113610-e26cde5e091b/go.mod h1:bFzDfaj4xtisRey9RPkMJOhOJVwmtH3FChV7NPKV1Nk= github.com/prysmaticlabs/prysm/v5 v5.0.3 h1:hUi0gu6v7aXmMQkl2GbrLoWcMhDNIbkVxRwrZchKbxU= github.com/prysmaticlabs/prysm/v5 v5.0.3/go.mod h1:v5Oz4A4cWljfxUmW7SDk/VBzoYnei+lzwJogvSqUZVs= +github.com/quasilyte/go-ruleguard v0.3.18/go.mod h1:lOIzcYlgxrQ2sGJ735EHXmf/e9MJ516j16K/Ifcttvs= +github.com/quasilyte/gogrep v0.0.0-20220828223005-86e4605de09f/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= +github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= github.com/quic-go/quic-go v0.42.0 h1:uSfdap0eveIl8KXnipv9K7nlwZ5IqLlYOpJ58u5utpM= github.com/quic-go/quic-go v0.42.0/go.mod h1:132kz4kL3F9vxhW3CtQJLDVwcFe5wdWeJXXijhsO57M= github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= @@ -1443,21 +1795,31 @@ github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUz github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.2.4/go.mod h1:+Kem4VjWwvFpUJRJSwa16s1tBJe+vbv02+naTow2f6M= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.20.0/go.mod h1:0GaP+ecfZMXShS0A94CJn6aEuPRILv8h/VuWI9n1ygg= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/schollz/progressbar/v3 v3.3.4 h1:nMinx+JaEm/zJz4cEyClQeAw5rsYSB5th3xv+5lV6Vg= github.com/schollz/progressbar/v3 v3.3.4/go.mod h1:Rp5lZwpgtYmlvmGo1FyDwXMqagyRBQYSDwzlP9QDu84= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/securego/gosec/v2 v2.13.1/go.mod h1:EO1sImBMBWFjOTFzMWfTRrZW6M15gm60ljzrmy/wtHo= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= @@ -1490,12 +1852,19 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= +github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= +github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= @@ -1509,11 +1878,14 @@ github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.1.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.1-0.20201006035406-b97b5ead31f7/go.mod h1:yk5b0mALVusDL5fMM6Rd1wgnoO5jUPhwsQ6LQAJTidQ= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v0.0.0-20170901151539-12bd96e66386/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20170901120850-7aff26db30c1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1523,14 +1895,19 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA= github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -1551,6 +1928,7 @@ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= github.com/supranational/blst v0.3.5/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= @@ -1558,6 +1936,9 @@ github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpP github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tdewolff/minify/v2 v2.12.4/go.mod h1:h+SRvSIX3kwgwTFOpSckvSxgax3uy8kZTSF1Ojrr3bk= +github.com/tdewolff/parse/v2 v2.6.4/go.mod h1:woz0cgbLwFdtbjJu8PIKxhW05KplTFQkOdX78o+Jgrs= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/templexxx/cpufeat v0.0.0-20180724012125-cef66df7f161/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU= @@ -1566,6 +1947,7 @@ github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6o github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= github.com/tendermint/iavl v0.12.0 h1:xcaFAr+ycqCj7WN1RzL2EfcBioRDOHcU1oWcg83K028= github.com/tendermint/iavl v0.12.0/go.mod h1:EoKMMv++tDOL5qKKVnoIqtVPshRrEPeJ0WsgDOLAauM= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e/go.mod h1:Tu4lItkATkonrYuvtVjG0/rhy15qrNGNTjPdaphtZ/8= github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= @@ -1578,6 +1960,8 @@ github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= github.com/tidwall/wal v1.1.7 h1:emc1TRjIVsdKKSnpwGBAcsAGg0767SvUk8+ygx7Bb+4= github.com/tidwall/wal v1.1.7/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= +github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timonwong/loggercheck v0.9.3/go.mod h1:wUqnk9yAOIKtGA39l1KLE9Iz0QiTocu/YZoOf+OzFdw= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tjfoc/gmsm v1.3.0/go.mod h1:HaUcFuY0auTiaHB9MHFGCPx5IaLhTUd2atbCFBQXn9w= github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI= @@ -1588,6 +1972,8 @@ github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck/v2 v2.7.0/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/trailofbits/go-mutexasserts v0.0.0-20200708152505-19999e7d3cef/go.mod h1:+SV/613m53DNAmlXPTWGZhIyt4E/qDvn9g/lOPRiy0A= github.com/trailofbits/go-mutexasserts v0.0.0-20230328101604-8cdbc5f3d279 h1:+LynomhWB+14Plp/bOONEAZCtvCZk4leRbTvNzNVkL0= github.com/trailofbits/go-mutexasserts v0.0.0-20230328101604-8cdbc5f3d279/go.mod h1:GA3+Mq3kt3tYAfM0WZCu7ofy+GW9PuGysHfhr+6JX7s= @@ -1598,17 +1984,27 @@ github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3C github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U= github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/cli/v2 v2.26.0 h1:3f3AMg3HpThFNT4I++TKOejZO8yU55t3JnnSr4S4QEI= github.com/urfave/cli/v2 v2.26.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.40.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vektra/mockery/v2 v2.14.0/go.mod h1:bnD1T8tExSgPD1ripLkDbr60JA9VtQeu12P3wgLZd7M= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/wealdtech/go-bytesutil v1.1.1 h1:ocEg3Ke2GkZ4vQw5lp46rmO+pfqCCTgq35gqOy8JKVc= github.com/wealdtech/go-bytesutil v1.1.1/go.mod h1:jENeMqeTEU8FNZyDFRVc7KqBdRKSnJ9CCh26TcuNb9s= github.com/wealdtech/go-eth2-types/v2 v2.5.2 h1:tiA6T88M6XQIbrV5Zz53l1G5HtRERcxQfmET225V4Ls= @@ -1628,8 +2024,10 @@ github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1: github.com/willf/bitset v1.1.3 h1:ekJIKh6+YbUIVt9DfNbkR5d6aFcFTLDRyJNAACURBg8= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -1637,6 +2035,9 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRT github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/xtaci/kcp-go v5.4.20+incompatible/go.mod h1:bN6vIwHQbfHaHtFpEssmWsN45a+AZwO7eyRCmEIbtvE= github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= +github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1646,12 +2047,18 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1665,6 +2072,9 @@ go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw= +go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk= +go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1760,6 +2170,7 @@ golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8H golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1877,6 +2288,7 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5/go.mod h1:UBKtEnL8aqnd+0JHqZ+2qoMDwtuy6cYhhKNoHLBiTQc= golang.org/x/sync v0.0.0-20170517211232-f52d1811a629/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2007,6 +2419,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240208230135-b75ee8823808/go.mod h1:KG1lNk5ZFNssSZLrpVb4sMXKMpGwGXOxSG3rnu2gZQQ= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2114,9 +2527,11 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= @@ -2284,6 +2699,7 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= @@ -2317,6 +2733,7 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2327,6 +2744,7 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +honnef.co/go/tools v0.5.0-0.dev.0.20231205170804-aef76f4feee2/go.mod h1:J8YyqAvNy0yWpeKUOCONA1m2G4hH2CqUSo/5ZO2/5UA= k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= k8s.io/api v0.20.0 h1:WwrYoZNM1W1aQEbyl8HNG+oWGzLpZQBlcerS9BQw9yI= k8s.io/api v0.20.0/go.mod h1:HyLC5l5eoS/ygQYl1BXBgFzWNlkHiAuyNAbevIn+FKg= @@ -2354,6 +2772,10 @@ k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +mvdan.cc/gofumpt v0.4.0/go.mod h1:PljLOHDeZqgS8opHRKLzp2It2VBuSdteAgqUfzMTxlQ= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go deleted file mode 100644 index f3f9d1778a..0000000000 --- a/graphql/graphql_test.go +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package graphql - -import ( - "context" - "encoding/json" - "fmt" - "io" - "math/big" - "net/http" - "strings" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/eth/filters" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - - "github.com/stretchr/testify/assert" -) - -func TestBuildSchema(t *testing.T) { - ddir := t.TempDir() - // Copy config - conf := node.DefaultConfig - conf.DataDir = ddir - stack, err := node.New(&conf) - if err != nil { - t.Fatalf("could not create new node: %v", err) - } - defer stack.Close() - // Make sure the schema can be parsed and matched up to the object model. - if _, err := newHandler(stack, nil, nil, []string{}, []string{}); err != nil { - t.Errorf("Could not construct GraphQL handler: %v", err) - } -} - -// Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint -func TestGraphQLBlockSerialization(t *testing.T) { - stack := createNode(t) - defer stack.Close() - genesis := &core.Genesis{ - Config: params.AllEthashProtocolChanges, - GasLimit: 11500000, - Difficulty: big.NewInt(1048576), - } - newGQLService(t, stack, false, genesis, 10, func(i int, gen *core.BlockGen) {}) - // start node - if err := stack.Start(); err != nil { - t.Fatalf("could not start node: %v", err) - } - - for i, tt := range []struct { - body string - want string - code int - }{ - { // Should return latest block - body: `{"query": "{block{number}}","variables": null}`, - want: `{"data":{"block":{"number":"0xa"}}}`, - code: 200, - }, - { // Should return info about latest block - body: `{"query": "{block{number,gasUsed,gasLimit}}","variables": null}`, - want: `{"data":{"block":{"number":"0xa","gasUsed":"0x0","gasLimit":"0xaf79e0"}}}`, - code: 200, - }, - { - body: `{"query": "{block(number:0){number,gasUsed,gasLimit}}","variables": null}`, - want: `{"data":{"block":{"number":"0x0","gasUsed":"0x0","gasLimit":"0xaf79e0"}}}`, - code: 200, - }, - { - body: `{"query": "{block(number:-1){number,gasUsed,gasLimit}}","variables": null}`, - want: `{"data":{"block":null}}`, - code: 200, - }, - { - body: `{"query": "{block(number:-500){number,gasUsed,gasLimit}}","variables": null}`, - want: `{"data":{"block":null}}`, - code: 200, - }, - { - body: `{"query": "{block(number:\"0\"){number,gasUsed,gasLimit}}","variables": null}`, - want: `{"data":{"block":{"number":"0x0","gasUsed":"0x0","gasLimit":"0xaf79e0"}}}`, - code: 200, - }, - { - body: `{"query": "{block(number:\"-33\"){number,gasUsed,gasLimit}}","variables": null}`, - want: `{"data":{"block":null}}`, - code: 200, - }, - { - body: `{"query": "{block(number:\"1337\"){number,gasUsed,gasLimit}}","variables": null}`, - want: `{"data":{"block":null}}`, - code: 200, - }, - { - body: `{"query": "{block(number:\"0x0\"){number,gasUsed,gasLimit}}","variables": null}`, - want: `{"data":{"block":{"number":"0x0","gasUsed":"0x0","gasLimit":"0xaf79e0"}}}`, - //want: `{"errors":[{"message":"strconv.ParseInt: parsing \"0x0\": invalid syntax"}],"data":{}}`, - code: 200, - }, - { - body: `{"query": "{block(number:\"a\"){number,gasUsed,gasLimit}}","variables": null}`, - want: `{"errors":[{"message":"strconv.ParseInt: parsing \"a\": invalid syntax"}],"data":{}}`, - code: 400, - }, - { - body: `{"query": "{bleh{number}}","variables": null}"`, - want: `{"errors":[{"message":"Cannot query field \"bleh\" on type \"Query\".","locations":[{"line":1,"column":2}]}]}`, - code: 400, - }, - // should return `estimateGas` as decimal - { - body: `{"query": "{block{ estimateGas(data:{}) }}"}`, - want: `{"data":{"block":{"estimateGas":"0xd221"}}}`, - code: 200, - }, - // should return `status` as decimal - { - body: `{"query": "{block {number call (data : {from : \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\", to: \"0x6295ee1b4f6dd65047762f924ecd367c17eabf8f\", data :\"0x12a7b914\"}){data status}}}"}`, - want: `{"data":{"block":{"number":"0xa","call":{"data":"0x","status":"0x1"}}}}`, - code: 200, - }, - { - body: `{"query": "{blocks {number}}"}`, - want: `{"errors":[{"message":"from block number must be specified","path":["blocks"]}],"data":null}`, - code: 400, - }, - } { - resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", strings.NewReader(tt.body)) - if err != nil { - t.Fatalf("could not post: %v", err) - } - bodyBytes, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - t.Fatalf("could not read from response body: %v", err) - } - if have := string(bodyBytes); have != tt.want { - t.Errorf("testcase %d %s,\nhave:\n%v\nwant:\n%v", i, tt.body, have, tt.want) - } - if tt.code != resp.StatusCode { - t.Errorf("testcase %d %s,\nwrong statuscode, have: %v, want: %v", i, tt.body, resp.StatusCode, tt.code) - } - if ctype := resp.Header.Get("Content-Type"); ctype != "application/json" { - t.Errorf("testcase %d \nwrong Content-Type, have: %v, want: %v", i, ctype, "application/json") - } - } -} - -func TestGraphQLBlockSerializationEIP2718(t *testing.T) { - // Account for signing txes - var ( - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - address = crypto.PubkeyToAddress(key.PublicKey) - funds = big.NewInt(1000000000000000) - dad = common.HexToAddress("0x0000000000000000000000000000000000000dad") - ) - stack := createNode(t) - defer stack.Close() - genesis := &core.Genesis{ - Config: params.AllEthashProtocolChanges, - GasLimit: 11500000, - Difficulty: big.NewInt(1048576), - Alloc: types.GenesisAlloc{ - address: {Balance: funds}, - // The address 0xdad sloads 0x00 and 0x01 - dad: { - Code: []byte{byte(vm.PC), byte(vm.PC), byte(vm.SLOAD), byte(vm.SLOAD)}, - Nonce: 0, - Balance: big.NewInt(0), - }, - }, - BaseFee: big.NewInt(params.InitialBaseFee), - } - signer := types.LatestSigner(genesis.Config) - newGQLService(t, stack, false, genesis, 1, func(i int, gen *core.BlockGen) { - gen.SetCoinbase(common.Address{1}) - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{ - Nonce: uint64(0), - To: &dad, - Value: big.NewInt(100), - Gas: 50000, - GasPrice: big.NewInt(params.InitialBaseFee), - }) - gen.AddTx(tx) - tx, _ = types.SignNewTx(key, signer, &types.AccessListTx{ - ChainID: genesis.Config.ChainID, - Nonce: uint64(1), - To: &dad, - Gas: 30000, - GasPrice: big.NewInt(params.InitialBaseFee), - Value: big.NewInt(50), - AccessList: types.AccessList{{ - Address: dad, - StorageKeys: []common.Hash{{0}}, - }}, - }) - gen.AddTx(tx) - }) - // start node - if err := stack.Start(); err != nil { - t.Fatalf("could not start node: %v", err) - } - - for i, tt := range []struct { - body string - want string - code int - }{ - { - body: `{"query": "{block {number transactions { from { address } to { address } value hash type accessList { address storageKeys } index}}}"}`, - want: `{"data":{"block":{"number":"0x1","transactions":[{"from":{"address":"0x71562b71999873db5b286df957af199ec94617f7"},"to":{"address":"0x0000000000000000000000000000000000000dad"},"value":"0x64","hash":"0xd864c9d7d37fade6b70164740540c06dd58bb9c3f6b46101908d6339db6a6a7b","type":"0x0","accessList":[],"index":"0x0"},{"from":{"address":"0x71562b71999873db5b286df957af199ec94617f7"},"to":{"address":"0x0000000000000000000000000000000000000dad"},"value":"0x32","hash":"0x19b35f8187b4e15fb59a9af469dca5dfa3cd363c11d372058c12f6482477b474","type":"0x1","accessList":[{"address":"0x0000000000000000000000000000000000000dad","storageKeys":["0x0000000000000000000000000000000000000000000000000000000000000000"]}],"index":"0x1"}]}}}`, - code: 200, - }, - } { - resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", strings.NewReader(tt.body)) - if err != nil { - t.Fatalf("could not post: %v", err) - } - bodyBytes, err := io.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - t.Fatalf("could not read from response body: %v", err) - } - if have := string(bodyBytes); have != tt.want { - t.Errorf("testcase %d %s,\nhave:\n%v\nwant:\n%v", i, tt.body, have, tt.want) - } - if tt.code != resp.StatusCode { - t.Errorf("testcase %d %s,\nwrong statuscode, have: %v, want: %v", i, tt.body, resp.StatusCode, tt.code) - } - } -} - -// Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint -func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) { - stack := createNode(t) - defer stack.Close() - if err := stack.Start(); err != nil { - t.Fatalf("could not start node: %v", err) - } - body := strings.NewReader(`{"query": "{block{number}}","variables": null}`) - resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", body) - if err != nil { - t.Fatalf("could not post: %v", err) - } - resp.Body.Close() - // make sure the request is not handled successfully - assert.Equal(t, http.StatusNotFound, resp.StatusCode) -} - -func TestGraphQLConcurrentResolvers(t *testing.T) { - var ( - key, _ = crypto.GenerateKey() - addr = crypto.PubkeyToAddress(key.PublicKey) - dadStr = "0x0000000000000000000000000000000000000dad" - dad = common.HexToAddress(dadStr) - genesis = &core.Genesis{ - Config: params.AllEthashProtocolChanges, - GasLimit: 11500000, - Difficulty: big.NewInt(1048576), - Alloc: types.GenesisAlloc{ - addr: {Balance: big.NewInt(params.Ether)}, - dad: { - // LOG0(0, 0), LOG0(0, 0), RETURN(0, 0) - Code: common.Hex2Bytes("60006000a060006000a060006000f3"), - Nonce: 0, - Balance: big.NewInt(0), - }, - }, - } - signer = types.LatestSigner(genesis.Config) - stack = createNode(t) - ) - defer stack.Close() - - var tx *types.Transaction - handler, chain := newGQLService(t, stack, false, genesis, 1, func(i int, gen *core.BlockGen) { - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) - gen.AddTx(tx) - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Nonce: 1, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) - gen.AddTx(tx) - tx, _ = types.SignNewTx(key, signer, &types.LegacyTx{To: &dad, Nonce: 2, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) - gen.AddTx(tx) - }) - // start node - if err := stack.Start(); err != nil { - t.Fatalf("could not start node: %v", err) - } - - for i, tt := range []struct { - body string - want string - }{ - // Multiple txes race to get/set the block hash. - { - body: "{block { transactions { logs { account { address } } } } }", - want: fmt.Sprintf(`{"block":{"transactions":[{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]},{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]},{"logs":[{"account":{"address":"%s"}},{"account":{"address":"%s"}}]}]}}`, dadStr, dadStr, dadStr, dadStr, dadStr, dadStr), - }, - // Multiple fields of a tx race to resolve it. Happens in this case - // because resolving the tx body belonging to a log is delayed. - { - body: `{block { logs(filter: {}) { transaction { nonce value gasPrice }}}}`, - want: `{"block":{"logs":[{"transaction":{"nonce":"0x0","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x0","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x1","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x1","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x2","value":"0x0","gasPrice":"0x3b9aca00"}},{"transaction":{"nonce":"0x2","value":"0x0","gasPrice":"0x3b9aca00"}}]}}`, - }, - // Multiple txes of a block race to set/retrieve receipts of a block. - { - body: "{block { transactions { status gasUsed } } }", - want: `{"block":{"transactions":[{"status":"0x1","gasUsed":"0x5508"},{"status":"0x1","gasUsed":"0x5508"},{"status":"0x1","gasUsed":"0x5508"}]}}`, - }, - // Multiple fields of block race to resolve header and body. - { - body: "{ block { number hash gasLimit ommerCount transactionCount totalDifficulty } }", - want: fmt.Sprintf(`{"block":{"number":"0x1","hash":"%s","gasLimit":"0xaf79e0","ommerCount":"0x0","transactionCount":"0x3","totalDifficulty":"0x200000"}}`, chain[len(chain)-1].Hash()), - }, - // Multiple fields of a block race to resolve the header and body. - { - body: fmt.Sprintf(`{ transaction(hash: "%s") { block { number hash gasLimit ommerCount transactionCount } } }`, tx.Hash()), - want: fmt.Sprintf(`{"transaction":{"block":{"number":"0x1","hash":"%s","gasLimit":"0xaf79e0","ommerCount":"0x0","transactionCount":"0x3"}}}`, chain[len(chain)-1].Hash()), - }, - // Account fields race the resolve the state object. - { - body: fmt.Sprintf(`{ block { account(address: "%s") { balance transactionCount code } } }`, dadStr), - want: `{"block":{"account":{"balance":"0x0","transactionCount":"0x0","code":"0x60006000a060006000a060006000f3"}}}`, - }, - // Test values for a non-existent account. - { - body: fmt.Sprintf(`{ block { account(address: "%s") { balance transactionCount code } } }`, "0x1111111111111111111111111111111111111111"), - want: `{"block":{"account":{"balance":"0x0","transactionCount":"0x0","code":"0x"}}}`, - }, - } { - res := handler.Schema.Exec(context.Background(), tt.body, "", map[string]interface{}{}) - if res.Errors != nil { - t.Fatalf("failed to execute query for testcase #%d: %v", i, res.Errors) - } - have, err := json.Marshal(res.Data) - if err != nil { - t.Fatalf("failed to encode graphql response for testcase #%d: %s", i, err) - } - if string(have) != tt.want { - t.Errorf("response unmatch for testcase #%d.\nExpected:\n%s\nGot:\n%s\n", i, tt.want, have) - } - } -} - -func TestWithdrawals(t *testing.T) { - var ( - key, _ = crypto.GenerateKey() - addr = crypto.PubkeyToAddress(key.PublicKey) - - genesis = &core.Genesis{ - Config: params.AllEthashProtocolChanges, - GasLimit: 11500000, - Difficulty: common.Big1, - Alloc: types.GenesisAlloc{ - addr: {Balance: big.NewInt(params.Ether)}, - }, - } - signer = types.LatestSigner(genesis.Config) - stack = createNode(t) - ) - defer stack.Close() - - handler, _ := newGQLService(t, stack, true, genesis, 1, func(i int, gen *core.BlockGen) { - tx, _ := types.SignNewTx(key, signer, &types.LegacyTx{To: &common.Address{}, Gas: 100000, GasPrice: big.NewInt(params.InitialBaseFee)}) - gen.AddTx(tx) - gen.AddWithdrawal(&types.Withdrawal{ - Validator: 5, - Address: common.Address{}, - Amount: 10, - }) - }) - // start node - if err := stack.Start(); err != nil { - t.Fatalf("could not start node: %v", err) - } - - for i, tt := range []struct { - body string - want string - }{ - // Genesis block has no withdrawals. - { - body: "{block(number: 0) { withdrawalsRoot withdrawals { index } } }", - want: `{"block":{"withdrawalsRoot":null,"withdrawals":null}}`, - }, - { - body: "{block(number: 1) { withdrawalsRoot withdrawals { validator amount } } }", - want: `{"block":{"withdrawalsRoot":"0x8418fc1a48818928f6692f148e9b10e99a88edc093b095cb8ca97950284b553d","withdrawals":[{"validator":"0x5","amount":"0xa"}]}}`, - }, - } { - res := handler.Schema.Exec(context.Background(), tt.body, "", map[string]interface{}{}) - if res.Errors != nil { - t.Fatalf("failed to execute query for testcase #%d: %v", i, res.Errors) - } - have, err := json.Marshal(res.Data) - if err != nil { - t.Fatalf("failed to encode graphql response for testcase #%d: %s", i, err) - } - if string(have) != tt.want { - t.Errorf("response unmatch for testcase #%d.\nhave:\n%s\nwant:\n%s", i, have, tt.want) - } - } -} - -func createNode(t *testing.T) *node.Node { - stack, err := node.New(&node.Config{ - HTTPHost: "127.0.0.1", - HTTPPort: 0, - WSHost: "127.0.0.1", - WSPort: 0, - HTTPTimeouts: node.DefaultConfig.HTTPTimeouts, - }) - if err != nil { - t.Fatalf("could not create node: %v", err) - } - return stack -} - -func newGQLService(t *testing.T, stack *node.Node, shanghai bool, gspec *core.Genesis, genBlocks int, genfunc func(i int, gen *core.BlockGen)) (*handler, []*types.Block) { - ethConf := ðconfig.Config{ - Genesis: gspec, - NetworkId: 1337, - TrieCleanCache: 5, - TrieDirtyCache: 5, - TrieTimeout: 60 * time.Minute, - SnapshotCache: 5, - StateScheme: rawdb.HashScheme, - } - var engine consensus.Engine = ethash.NewFaker() - if shanghai { - engine = beacon.NewFaker() - chainCfg := gspec.Config - chainCfg.TerminalTotalDifficultyPassed = true - chainCfg.TerminalTotalDifficulty = common.Big0 - // GenerateChain will increment timestamps by 10. - // Shanghai upgrade at block 1. - shanghaiTime := uint64(5) - chainCfg.ShanghaiTime = &shanghaiTime - } - ethBackend, err := eth.New(stack, ethConf) - if err != nil { - t.Fatalf("could not create eth backend: %v", err) - } - // Create some blocks and import them - chain, _ := core.GenerateChain(params.AllEthashProtocolChanges, ethBackend.BlockChain().Genesis(), - engine, ethBackend.ChainDb(), genBlocks, genfunc) - _, err = ethBackend.BlockChain().InsertChain(chain) - if err != nil { - t.Fatalf("could not create import blocks: %v", err) - } - // Set up handler - filterSystem := filters.NewFilterSystem(ethBackend.APIBackend, filters.Config{}) - handler, err := newHandler(stack, ethBackend.APIBackend, filterSystem, []string{}, []string{}) - if err != nil { - t.Fatalf("could not create graphql service: %v", err) - } - return handler, chain -} diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go deleted file mode 100644 index 4890d0b7c6..0000000000 --- a/internal/cmdtest/test_cmd.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package cmdtest - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - "os/exec" - "regexp" - "strings" - "sync" - "sync/atomic" - "syscall" - "testing" - "text/template" - "time" - - "github.com/ethereum/go-ethereum/internal/reexec" -) - -func NewTestCmd(t *testing.T, data interface{}) *TestCmd { - return &TestCmd{T: t, Data: data} -} - -type TestCmd struct { - // For total convenience, all testing methods are available. - *testing.T - - Func template.FuncMap - Data interface{} - Cleanup func() - - cmd *exec.Cmd - stdout *bufio.Reader - stdin io.WriteCloser - stderr *testlogger - // Err will contain the process exit error or interrupt signal error - Err error -} - -var id atomic.Int32 - -// Run exec's the current binary using name as argv[0] which will trigger the -// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) -func (tt *TestCmd) Run(name string, args ...string) { - id.Add(1) - tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id.Load())} - tt.cmd = &exec.Cmd{ - Path: reexec.Self(), - Args: append([]string{name}, args...), - Stderr: tt.stderr, - } - stdout, err := tt.cmd.StdoutPipe() - if err != nil { - tt.Fatal(err) - } - tt.stdout = bufio.NewReader(stdout) - if tt.stdin, err = tt.cmd.StdinPipe(); err != nil { - tt.Fatal(err) - } - if err := tt.cmd.Start(); err != nil { - tt.Fatal(err) - } -} - -// InputLine writes the given text to the child's stdin. -// This method can also be called from an expect template, e.g.: -// -// geth.expect(`Passphrase: {{.InputLine "password"}}`) -func (tt *TestCmd) InputLine(s string) string { - io.WriteString(tt.stdin, s+"\n") - return "" -} - -func (tt *TestCmd) SetTemplateFunc(name string, fn interface{}) { - if tt.Func == nil { - tt.Func = make(map[string]interface{}) - } - tt.Func[name] = fn -} - -// Expect runs its argument as a template, then expects the -// child process to output the result of the template within 5s. -// -// If the template starts with a newline, the newline is removed -// before matching. -func (tt *TestCmd) Expect(tplsource string) { - // Generate the expected output by running the template. - tpl := template.Must(template.New("").Funcs(tt.Func).Parse(tplsource)) - wantbuf := new(bytes.Buffer) - if err := tpl.Execute(wantbuf, tt.Data); err != nil { - panic(err) - } - // Trim exactly one newline at the beginning. This makes tests look - // much nicer because all expect strings are at column 0. - want := bytes.TrimPrefix(wantbuf.Bytes(), []byte("\n")) - if err := tt.matchExactOutput(want); err != nil { - tt.Fatal(err) - } - tt.Logf("Matched stdout text:\n%s", want) -} - -// Output reads all output from stdout, and returns the data. -func (tt *TestCmd) Output() []byte { - var buf []byte - tt.withKillTimeout(func() { buf, _ = io.ReadAll(tt.stdout) }) - return buf -} - -func (tt *TestCmd) matchExactOutput(want []byte) error { - buf := make([]byte, len(want)) - n := 0 - tt.withKillTimeout(func() { n, _ = io.ReadFull(tt.stdout, buf) }) - buf = buf[:n] - if n < len(want) || !bytes.Equal(buf, want) { - // Grab any additional buffered output in case of mismatch - // because it might help with debugging. - buf = append(buf, make([]byte, tt.stdout.Buffered())...) - tt.stdout.Read(buf[n:]) - // Find the mismatch position. - for i := 0; i < n; i++ { - if want[i] != buf[i] { - return fmt.Errorf("output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s", - buf[:i], buf[i:n], want) - } - } - if n < len(want) { - return fmt.Errorf("not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s", - buf, want[:n], want[n:]) - } - } - return nil -} - -// ExpectRegexp expects the child process to output text matching the -// given regular expression within 5s. -// -// Note that an arbitrary amount of output may be consumed by the -// regular expression. This usually means that expect cannot be used -// after ExpectRegexp. -func (tt *TestCmd) ExpectRegexp(regex string) (*regexp.Regexp, []string) { - regex = strings.TrimPrefix(regex, "\n") - var ( - re = regexp.MustCompile(regex) - rtee = &runeTee{in: tt.stdout} - matches []int - ) - tt.withKillTimeout(func() { matches = re.FindReaderSubmatchIndex(rtee) }) - output := rtee.buf.Bytes() - if matches == nil { - tt.Fatalf("Output did not match:\n---------------- (stdout text)\n%s\n---------------- (regular expression)\n%s", - output, regex) - return re, nil - } - tt.Logf("Matched stdout text:\n%s", output) - var submatches []string - for i := 0; i < len(matches); i += 2 { - submatch := string(output[matches[i]:matches[i+1]]) - submatches = append(submatches, submatch) - } - return re, submatches -} - -// ExpectExit expects the child process to exit within 5s without -// printing any additional text on stdout. -func (tt *TestCmd) ExpectExit() { - var output []byte - tt.withKillTimeout(func() { - output, _ = io.ReadAll(tt.stdout) - }) - tt.WaitExit() - if tt.Cleanup != nil { - tt.Cleanup() - } - if len(output) > 0 { - tt.Errorf("Unmatched stdout text:\n%s", output) - } -} - -func (tt *TestCmd) WaitExit() { - tt.Err = tt.cmd.Wait() -} - -func (tt *TestCmd) Interrupt() { - tt.Err = tt.cmd.Process.Signal(os.Interrupt) -} - -// ExitStatus exposes the process' OS exit code -// It will only return a valid value after the process has finished. -func (tt *TestCmd) ExitStatus() int { - if tt.Err != nil { - exitErr := tt.Err.(*exec.ExitError) - if exitErr != nil { - if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { - return status.ExitStatus() - } - } - } - return 0 -} - -// StderrText returns any stderr output written so far. -// The returned text holds all log lines after ExpectExit has -// returned. -func (tt *TestCmd) StderrText() string { - tt.stderr.mu.Lock() - defer tt.stderr.mu.Unlock() - return tt.stderr.buf.String() -} - -func (tt *TestCmd) CloseStdin() { - tt.stdin.Close() -} - -func (tt *TestCmd) Kill() { - tt.cmd.Process.Kill() - if tt.Cleanup != nil { - tt.Cleanup() - } -} - -func (tt *TestCmd) withKillTimeout(fn func()) { - timeout := time.AfterFunc(30*time.Second, func() { - tt.Log("killing the child process (timeout)") - tt.Kill() - }) - defer timeout.Stop() - fn() -} - -// testlogger logs all written lines via t.Log and also -// collects them for later inspection. -type testlogger struct { - t *testing.T - mu sync.Mutex - buf bytes.Buffer - name string -} - -func (tl *testlogger) Write(b []byte) (n int, err error) { - lines := bytes.Split(b, []byte("\n")) - for _, line := range lines { - if len(line) > 0 { - tl.t.Logf("(stderr:%v) %s", tl.name, line) - } - } - tl.mu.Lock() - tl.buf.Write(b) - tl.mu.Unlock() - return len(b), err -} - -// runeTee collects text read through it into buf. -type runeTee struct { - in interface { - io.Reader - io.ByteReader - io.RuneReader - } - buf bytes.Buffer -} - -func (rtee *runeTee) Read(b []byte) (n int, err error) { - n, err = rtee.in.Read(b) - rtee.buf.Write(b[:n]) - return n, err -} - -func (rtee *runeTee) ReadRune() (r rune, size int, err error) { - r, size, err = rtee.in.ReadRune() - if err == nil { - rtee.buf.WriteRune(r) - } - return r, size, err -} - -func (rtee *runeTee) ReadByte() (b byte, err error) { - b, err = rtee.in.ReadByte() - if err == nil { - rtee.buf.WriteByte(b) - } - return b, err -} diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go deleted file mode 100644 index 8764f51fb6..0000000000 --- a/internal/ethapi/api_test.go +++ /dev/null @@ -1,2295 +0,0 @@ -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package ethapi - -import ( - "bytes" - "context" - "crypto/ecdsa" - "crypto/sha256" - "encoding/json" - "errors" - "fmt" - "math/big" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/holiman/uint256" - "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" - - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/accounts/keystore" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/beacon" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/bloombits" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/internal/blocktest" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" -) - -var emptyBlob = kzg4844.Blob{} -var emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob) -var emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit) -var emptyBlobHash common.Hash = kzg4844.CalcBlobHashV1(sha256.New(), &emptyBlobCommit) - -func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) { - t.Parallel() - var ( - signer = types.LatestSigner(config) - key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - ) - - for i, tt := range tests { - var tx2 types.Transaction - tx, err := types.SignNewTx(key, signer, tt.Tx) - if err != nil { - t.Fatalf("test %d: signing failed: %v", i, err) - } - // Regular transaction - if data, err := json.Marshal(tx); err != nil { - t.Fatalf("test %d: marshalling failed; %v", i, err) - } else if err = tx2.UnmarshalJSON(data); err != nil { - t.Fatalf("test %d: sunmarshal failed: %v", i, err) - } else if want, have := tx.Hash(), tx2.Hash(); want != have { - t.Fatalf("test %d: stx changed, want %x have %x", i, want, have) - } - - // rpcTransaction - rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, nil, config) - if data, err := json.Marshal(rpcTx); err != nil { - t.Fatalf("test %d: marshalling failed; %v", i, err) - } else if err = tx2.UnmarshalJSON(data); err != nil { - t.Fatalf("test %d: unmarshal failed: %v", i, err) - } else if want, have := tx.Hash(), tx2.Hash(); want != have { - t.Fatalf("test %d: tx changed, want %x have %x", i, want, have) - } else { - want, have := tt.Want, string(data) - require.JSONEqf(t, want, have, "test %d: rpc json not match, want %s have %s", i, want, have) - } - } -} - -func TestTransaction_RoundTripRpcJSON(t *testing.T) { - var ( - config = params.AllEthashProtocolChanges - tests = allTransactionTypes(common.Address{0xde, 0xad}, config) - ) - testTransactionMarshal(t, tests, config) -} - -func TestTransactionBlobTx(t *testing.T) { - config := *params.TestChainConfig - config.ShanghaiTime = new(uint64) - config.CancunTime = new(uint64) - tests := allBlobTxs(common.Address{0xde, 0xad}, &config) - - testTransactionMarshal(t, tests, &config) -} - -type txData struct { - Tx types.TxData - Want string -} - -func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txData { - return []txData{ - { - Tx: &types.LegacyTx{ - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - V: big.NewInt(9), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x6", - "hash": "0x5f3240454cd09a5d8b1c5d651eefae7a339262875bcd2d0e6676f3d989967008", - "input": "0x0001020304", - "nonce": "0x5", - "to": "0xdead000000000000000000000000000000000000", - "transactionIndex": null, - "value": "0x8", - "type": "0x0", - "chainId": "0x539", - "v": "0xa96", - "r": "0xbc85e96592b95f7160825d837abb407f009df9ebe8f1b9158a4b8dd093377f75", - "s": "0x1b55ea3af5574c536967b039ba6999ef6c89cf22fc04bcb296e0e8b0b9b576f5" - }`, - }, { - Tx: &types.LegacyTx{ - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x6", - "hash": "0x806e97f9d712b6cb7e781122001380a2837531b0fc1e5f5d78174ad4cb699873", - "input": "0x0001020304", - "nonce": "0x5", - "to": null, - "transactionIndex": null, - "value": "0x8", - "type": "0x0", - "chainId": "0x539", - "v": "0xa96", - "r": "0x9dc28b267b6ad4e4af6fe9289668f9305c2eb7a3241567860699e478af06835a", - "s": "0xa0b51a071aa9bed2cd70aedea859779dff039e3630ea38497d95202e9b1fec7" - }`, - }, - { - Tx: &types.AccessListTx{ - ChainID: config.ChainID, - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, - }, - }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x6", - "hash": "0x121347468ee5fe0a29f02b49b4ffd1c8342bc4255146bb686cd07117f79e7129", - "input": "0x0001020304", - "nonce": "0x5", - "to": "0xdead000000000000000000000000000000000000", - "transactionIndex": null, - "value": "0x8", - "type": "0x1", - "accessList": [ - { - "address": "0x0200000000000000000000000000000000000000", - "storageKeys": [ - "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ] - } - ], - "chainId": "0x539", - "v": "0x0", - "r": "0xf372ad499239ae11d91d34c559ffc5dab4daffc0069e03afcabdcdf231a0c16b", - "s": "0x28573161d1f9472fa0fd4752533609e72f06414f7ab5588699a7141f65d2abf", - "yParity": "0x0" - }`, - }, { - Tx: &types.AccessListTx{ - ChainID: config.ChainID, - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, - }, - }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x6", - "hash": "0x067c3baebede8027b0f828a9d933be545f7caaec623b00684ac0659726e2055b", - "input": "0x0001020304", - "nonce": "0x5", - "to": null, - "transactionIndex": null, - "value": "0x8", - "type": "0x1", - "accessList": [ - { - "address": "0x0200000000000000000000000000000000000000", - "storageKeys": [ - "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ] - } - ], - "chainId": "0x539", - "v": "0x1", - "r": "0x542981b5130d4613897fbab144796cb36d3cb3d7807d47d9c7f89ca7745b085c", - "s": "0x7425b9dd6c5deaa42e4ede35d0c4570c4624f68c28d812c10d806ffdf86ce63", - "yParity": "0x1" - }`, - }, { - Tx: &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: 5, - GasTipCap: big.NewInt(6), - GasFeeCap: big.NewInt(9), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, - }, - }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x9", - "maxFeePerGas": "0x9", - "maxPriorityFeePerGas": "0x6", - "hash": "0xb63e0b146b34c3e9cb7fbabb5b3c081254a7ded6f1b65324b5898cc0545d79ff", - "input": "0x0001020304", - "nonce": "0x5", - "to": "0xdead000000000000000000000000000000000000", - "transactionIndex": null, - "value": "0x8", - "type": "0x2", - "accessList": [ - { - "address": "0x0200000000000000000000000000000000000000", - "storageKeys": [ - "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ] - } - ], - "chainId": "0x539", - "v": "0x1", - "r": "0x3b167e05418a8932cd53d7578711fe1a76b9b96c48642402bb94978b7a107e80", - "s": "0x22f98a332d15ea2cc80386c1ebaa31b0afebfa79ebc7d039a1e0074418301fef", - "yParity": "0x1" - }`, - }, { - Tx: &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: 5, - GasTipCap: big.NewInt(6), - GasFeeCap: big.NewInt(9), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{}, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x9", - "maxFeePerGas": "0x9", - "maxPriorityFeePerGas": "0x6", - "hash": "0xcbab17ee031a9d5b5a09dff909f0a28aedb9b295ac0635d8710d11c7b806ec68", - "input": "0x0001020304", - "nonce": "0x5", - "to": null, - "transactionIndex": null, - "value": "0x8", - "type": "0x2", - "accessList": [], - "chainId": "0x539", - "v": "0x0", - "r": "0x6446b8a682db7e619fc6b4f6d1f708f6a17351a41c7fbd63665f469bc78b41b9", - "s": "0x7626abc15834f391a117c63450047309dbf84c5ce3e8e609b607062641e2de43", - "yParity": "0x0" - }`, - }, - } -} - -func allBlobTxs(addr common.Address, config *params.ChainConfig) []txData { - return []txData{ - { - Tx: &types.BlobTx{ - Nonce: 6, - GasTipCap: uint256.NewInt(1), - GasFeeCap: uint256.NewInt(5), - Gas: 6, - To: addr, - BlobFeeCap: uint256.NewInt(1), - BlobHashes: []common.Hash{{1}}, - Value: new(uint256.Int), - V: uint256.NewInt(32), - R: uint256.NewInt(10), - S: uint256.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x6", - "gasPrice": "0x5", - "maxFeePerGas": "0x5", - "maxPriorityFeePerGas": "0x1", - "maxFeePerBlobGas": "0x1", - "hash": "0x1f2b59a20e61efc615ad0cbe936379d6bbea6f938aafaf35eb1da05d8e7f46a3", - "input": "0x", - "nonce": "0x6", - "to": "0xdead000000000000000000000000000000000000", - "transactionIndex": null, - "value": "0x0", - "type": "0x3", - "accessList": [], - "chainId": "0x1", - "blobVersionedHashes": [ - "0x0100000000000000000000000000000000000000000000000000000000000000" - ], - "v": "0x0", - "r": "0x618be8908e0e5320f8f3b48042a079fe5a335ebd4ed1422a7d2207cd45d872bc", - "s": "0x27b2bc6c80e849a8e8b764d4549d8c2efac3441e73cf37054eb0a9b9f8e89b27", - "yParity": "0x0" - }`, - }, - } -} - -func newTestAccountManager(t *testing.T) (*accounts.Manager, accounts.Account) { - var ( - dir = t.TempDir() - am = accounts.NewManager(&accounts.Config{InsecureUnlockAllowed: true}) - b = keystore.NewKeyStore(dir, 2, 1) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - ) - acc, err := b.ImportECDSA(testKey, "") - if err != nil { - t.Fatalf("failed to create test account: %v", err) - } - if err := b.Unlock(acc, ""); err != nil { - t.Fatalf("failed to unlock account: %v\n", err) - } - am.AddBackend(b) - return am, acc -} - -type testBackend struct { - db ethdb.Database - chain *core.BlockChain - pending *types.Block - accman *accounts.Manager - acc accounts.Account -} - -func newTestBackend(t *testing.T, n int, gspec *core.Genesis, engine consensus.Engine, generator func(i int, b *core.BlockGen)) *testBackend { - var ( - cacheConfig = &core.CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, - TrieDirtyDisabled: true, // Archive mode - } - ) - accman, acc := newTestAccountManager(t) - gspec.Alloc[acc.Address] = types.Account{Balance: big.NewInt(params.Ether)} - // Generate blocks for testing - db, blocks, _ := core.GenerateChainWithGenesis(gspec, engine, n, generator) - txlookupLimit := uint64(0) - chain, err := core.NewBlockChain(db, cacheConfig, gspec, nil, engine, vm.Config{}, nil, &txlookupLimit) - if err != nil { - t.Fatalf("failed to create tester chain: %v", err) - } - if n, err := chain.InsertChain(blocks); err != nil { - t.Fatalf("block %d: failed to insert into chain: %v", n, err) - } - - backend := &testBackend{db: db, chain: chain, accman: accman, acc: acc} - return backend -} - -// nolint:unused -func (b *testBackend) setPendingBlock(block *types.Block) { - b.pending = block -} - -func (b testBackend) SyncProgress() ethereum.SyncProgress { return ethereum.SyncProgress{} } -func (b testBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - return big.NewInt(0), nil -} - -func (b testBackend) Chain() *core.BlockChain { - return b.chain -} - -func (b testBackend) FeeHistory(ctx context.Context, blockCount uint64, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) { - return nil, nil, nil, nil, nil -} -func (b testBackend) ChainDb() ethdb.Database { return b.db } -func (b testBackend) AccountManager() *accounts.Manager { return b.accman } -func (b testBackend) ExtRPCEnabled() bool { return false } -func (b testBackend) RPCGasCap() uint64 { return 10000000 } -func (b testBackend) RPCEVMTimeout() time.Duration { return time.Second } -func (b testBackend) RPCTxFeeCap() float64 { return 0 } -func (b testBackend) UnprotectedAllowed() bool { return false } -func (b testBackend) SetHead(number uint64) {} -func (b testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - if number == rpc.LatestBlockNumber { - return b.chain.CurrentBlock(), nil - } - if number == rpc.PendingBlockNumber && b.pending != nil { - return b.pending.Header(), nil - } - return b.chain.GetHeaderByNumber(uint64(number)), nil -} -func (b testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - return b.chain.GetHeaderByHash(hash), nil -} -func (b testBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { - if blockNr, ok := blockNrOrHash.Number(); ok { - return b.HeaderByNumber(ctx, blockNr) - } - if blockHash, ok := blockNrOrHash.Hash(); ok { - return b.HeaderByHash(ctx, blockHash) - } - panic("unknown type rpc.BlockNumberOrHash") -} -func (b testBackend) CurrentHeader() *types.Header { return b.chain.CurrentBlock() } -func (b testBackend) CurrentBlock() *types.Header { return b.chain.CurrentBlock() } -func (b testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { - if number == rpc.LatestBlockNumber { - head := b.chain.CurrentBlock() - return b.chain.GetBlock(head.Hash(), head.Number.Uint64()), nil - } - if number == rpc.PendingBlockNumber { - return b.pending, nil - } - return b.chain.GetBlockByNumber(uint64(number)), nil -} -func (b testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - return b.chain.GetBlockByHash(hash), nil -} -func (b testBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) { - if blockNr, ok := blockNrOrHash.Number(); ok { - return b.BlockByNumber(ctx, blockNr) - } - if blockHash, ok := blockNrOrHash.Hash(); ok { - return b.BlockByHash(ctx, blockHash) - } - panic("unknown type rpc.BlockNumberOrHash") -} -func (b testBackend) GetBody(ctx context.Context, hash common.Hash, number rpc.BlockNumber) (*types.Body, error) { - return b.chain.GetBlock(hash, uint64(number.Int64())).Body(), nil -} -func (b testBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) { - if number == rpc.PendingBlockNumber { - panic("pending state not implemented") - } - header, err := b.HeaderByNumber(ctx, number) - if err != nil { - return nil, nil, err - } - if header == nil { - return nil, nil, errors.New("header not found") - } - stateDb, err := b.chain.StateAt(header.Root) - return stateDb, header, err -} -func (b testBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) { - if blockNr, ok := blockNrOrHash.Number(); ok { - return b.StateAndHeaderByNumber(ctx, blockNr) - } - panic("only implemented for number") -} -func (b testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { panic("implement me") } -func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { - header, err := b.HeaderByHash(ctx, hash) - if header == nil || err != nil { - return nil, err - } - receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config()) - return receipts, nil -} - -func (b testBackend) GetBlobSidecars(ctx context.Context, hash common.Hash) (types.BlobSidecars, error) { - header, err := b.HeaderByHash(ctx, hash) - if header == nil || err != nil { - return nil, err - } - blobSidecars := rawdb.ReadBlobSidecars(b.db, hash, header.Number.Uint64()) - return blobSidecars, nil -} -func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { - if b.pending != nil && hash == b.pending.Hash() { - return nil - } - return big.NewInt(1) -} -func (b testBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) *vm.EVM { - if vmConfig == nil { - vmConfig = b.chain.GetVMConfig() - } - txContext := core.NewEVMTxContext(msg) - context := core.NewEVMBlockContext(header, b.chain, nil) - if blockContext != nil { - context = *blockContext - } - return vm.NewEVM(context, txContext, state, b.chain.Config(), *vmConfig) -} -func (b testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { - panic("implement me") -} -func (b testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { - panic("implement me") -} -func (b testBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription { - panic("implement me") -} -func (b testBackend) SubscribeFinalizedHeaderEvent(ch chan<- core.FinalizedHeaderEvent) event.Subscription { - panic("implement me") -} -func (b testBackend) SubscribeNewVoteEvent(ch chan<- core.NewVoteEvent) event.Subscription { - panic("implement me") -} -func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { - panic("implement me") -} -func (b testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (bool, *types.Transaction, common.Hash, uint64, uint64, error) { - tx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.db, txHash) - return true, tx, blockHash, blockNumber, index, nil -} -func (b testBackend) GetPoolTransactions() (types.Transactions, error) { panic("implement me") } -func (b testBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction { panic("implement me") } -func (b testBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) { - return 0, nil -} -func (b testBackend) Stats() (pending int, queued int) { panic("implement me") } -func (b testBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { - panic("implement me") -} -func (b testBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { - panic("implement me") -} -func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.Subscription { - panic("implement me") -} -func (b testBackend) ChainConfig() *params.ChainConfig { return b.chain.Config() } -func (b testBackend) Engine() consensus.Engine { return b.chain.Engine() } -func (b testBackend) GetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { - panic("implement me") -} -func (b testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { - panic("implement me") -} -func (b testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { - panic("implement me") -} -func (b testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { - panic("implement me") -} -func (b testBackend) BloomStatus() (uint64, uint64) { panic("implement me") } -func (b testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { - panic("implement me") -} - -func (b *testBackend) MevRunning() bool { return false } -func (b *testBackend) HasBuilder(builder common.Address) bool { return false } -func (b *testBackend) MevParams() *types.MevParams { - return &types.MevParams{} -} -func (b *testBackend) StartMev() {} -func (b *testBackend) StopMev() {} -func (b *testBackend) AddBuilder(builder common.Address, builderUrl string) error { return nil } -func (b *testBackend) RemoveBuilder(builder common.Address) error { return nil } -func (b *testBackend) SendBid(ctx context.Context, bid *types.BidArgs) (common.Hash, error) { - panic("implement me") -} -func (b *testBackend) MinerInTurn() bool { return false } -func (b *testBackend) BestBidGasFee(parentHash common.Hash) *big.Int { - //TODO implement me - panic("implement me") -} - -func TestEstimateGas(t *testing.T) { - t.Parallel() - // Initialize test accounts - var ( - accounts = newAccounts(2) - genesis = &core.Genesis{ - Config: params.MergedTestChainConfig, - Alloc: types.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - }, - } - genBlocks = 10 - signer = types.HomesteadSigner{} - randomAccounts = newAccounts(2) - ) - api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &accounts[1].addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, accounts[0].key) - b.AddTx(tx) - b.SetPoS() - })) - var testSuite = []struct { - blockNumber rpc.BlockNumber - call TransactionArgs - overrides StateOverride - expectErr error - want uint64 - }{ - // simple transfer on latest block - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - expectErr: nil, - want: 21000, - }, - // simple transfer with insufficient funds on latest block - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &randomAccounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - expectErr: core.ErrInsufficientFunds, - want: 21000, - }, - // empty create - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{}, - expectErr: nil, - want: 53000, - }, - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{}, - overrides: StateOverride{ - randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))}, - }, - expectErr: nil, - want: 53000, - }, - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - overrides: StateOverride{ - randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(0))}, - }, - expectErr: core.ErrInsufficientFunds, - }, - // Test for a bug where the gas price was set to zero but the basefee non-zero - // - // contract BasefeeChecker { - // constructor() { - // require(tx.gasprice >= block.basefee); - // if (tx.gasprice > 0) { - // require(block.basefee > 0); - // } - // } - //} - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[0].addr, - Input: hex2Bytes("6080604052348015600f57600080fd5b50483a1015601c57600080fd5b60003a111560315760004811603057600080fd5b5b603f80603e6000396000f3fe6080604052600080fdfea264697066735822122060729c2cee02b10748fae5200f1c9da4661963354973d9154c13a8e9ce9dee1564736f6c63430008130033"), - GasPrice: (*hexutil.Big)(big.NewInt(1_000_000_000)), // Legacy as pricing - }, - expectErr: nil, - want: 67617, - }, - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[0].addr, - Input: hex2Bytes("6080604052348015600f57600080fd5b50483a1015601c57600080fd5b60003a111560315760004811603057600080fd5b5b603f80603e6000396000f3fe6080604052600080fdfea264697066735822122060729c2cee02b10748fae5200f1c9da4661963354973d9154c13a8e9ce9dee1564736f6c63430008130033"), - MaxFeePerGas: (*hexutil.Big)(big.NewInt(1_000_000_000)), // 1559 gas pricing - }, - expectErr: nil, - want: 67617, - }, - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[0].addr, - Input: hex2Bytes("6080604052348015600f57600080fd5b50483a1015601c57600080fd5b60003a111560315760004811603057600080fd5b5b603f80603e6000396000f3fe6080604052600080fdfea264697066735822122060729c2cee02b10748fae5200f1c9da4661963354973d9154c13a8e9ce9dee1564736f6c63430008130033"), - GasPrice: nil, // No legacy gas pricing - MaxFeePerGas: nil, // No 1559 gas pricing - }, - expectErr: nil, - want: 67595, - }, - // Blobs should have no effect on gas estimate - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, - BlobFeeCap: (*hexutil.Big)(big.NewInt(1)), - }, - want: 21000, - }, - } - for i, tc := range testSuite { - result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides) - if tc.expectErr != nil { - if err == nil { - t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) - continue - } - if !errors.Is(err, tc.expectErr) { - t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err) - } - continue - } - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - if float64(result) > float64(tc.want)*(1+estimateGasErrorRatio) { - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, uint64(result), tc.want) - } - } -} - -func TestCall(t *testing.T) { - t.Parallel() - // Initialize test accounts - var ( - accounts = newAccounts(3) - genesis = &core.Genesis{ - Config: params.MergedTestChainConfig, - Alloc: types.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }, - } - genBlocks = 10 - signer = types.HomesteadSigner{} - ) - api := NewBlockChainAPI(newTestBackend(t, genBlocks, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &accounts[1].addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, accounts[0].key) - b.AddTx(tx) - b.SetPoS() - })) - randomAccounts := newAccounts(3) - var testSuite = []struct { - blockNumber rpc.BlockNumber - overrides StateOverride - call TransactionArgs - blockOverrides BlockOverrides - expectErr error - want string - }{ - // transfer on genesis - { - blockNumber: rpc.BlockNumber(0), - call: TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - expectErr: nil, - want: "0x", - }, - // transfer on the head - { - blockNumber: rpc.BlockNumber(genBlocks), - call: TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - expectErr: nil, - want: "0x", - }, - // transfer on a non-existent block, error expects - { - blockNumber: rpc.BlockNumber(genBlocks + 1), - call: TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - expectErr: errors.New("header not found"), - }, - // transfer on the latest block - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[0].addr, - To: &accounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - expectErr: nil, - want: "0x", - }, - // Call which can only succeed if state is state overridden - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - overrides: StateOverride{ - randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))}, - }, - want: "0x", - }, - // Invalid call without state overriding - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - expectErr: core.ErrInsufficientFunds, - }, - // Successful simple contract call - // - // // SPDX-License-Identifier: GPL-3.0 - // - // pragma solidity >=0.7.0 <0.8.0; - // - // /** - // * @title Storage - // * @dev Store & retrieve value in a variable - // */ - // contract Storage { - // uint256 public number; - // constructor() { - // number = block.number; - // } - // } - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[2].addr, - Data: hex2Bytes("8381f58a"), // call number() - }, - overrides: StateOverride{ - randomAccounts[2].addr: OverrideAccount{ - Code: hex2Bytes("6080604052348015600f57600080fd5b506004361060285760003560e01c80638381f58a14602d575b600080fd5b60336049565b6040518082815260200191505060405180910390f35b6000548156fea2646970667358221220eab35ffa6ab2adfe380772a48b8ba78e82a1b820a18fcb6f59aa4efb20a5f60064736f6c63430007040033"), - StateDiff: &map[common.Hash]common.Hash{{}: common.BigToHash(big.NewInt(123))}, - }, - }, - want: "0x000000000000000000000000000000000000000000000000000000000000007b", - }, - // Block overrides should work - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[1].addr, - Input: &hexutil.Bytes{ - 0x43, // NUMBER - 0x60, 0x00, 0x52, // MSTORE offset 0 - 0x60, 0x20, 0x60, 0x00, 0xf3, - }, - }, - blockOverrides: BlockOverrides{Number: (*hexutil.Big)(big.NewInt(11))}, - want: "0x000000000000000000000000000000000000000000000000000000000000000b", - }, - // Invalid blob tx - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[1].addr, - Input: &hexutil.Bytes{0x00}, - BlobHashes: []common.Hash{}, - }, - expectErr: core.ErrBlobTxCreate, - }, - // BLOBHASH opcode - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &accounts[1].addr, - To: &randomAccounts[2].addr, - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, - BlobFeeCap: (*hexutil.Big)(big.NewInt(1)), - }, - overrides: StateOverride{ - randomAccounts[2].addr: { - Code: hex2Bytes("60004960005260206000f3"), - }, - }, - want: "0x0122000000000000000000000000000000000000000000000000000000000000", - }, - } - for i, tc := range testSuite { - result, err := api.Call(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides, &tc.blockOverrides) - if tc.expectErr != nil { - if err == nil { - t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) - continue - } - if !errors.Is(err, tc.expectErr) { - // Second try - if !reflect.DeepEqual(err, tc.expectErr) { - t.Errorf("test %d: error mismatch, want %v, have %v", i, tc.expectErr, err) - } - } - continue - } - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - if !reflect.DeepEqual(result.String(), tc.want) { - t.Errorf("test %d, result mismatch, have\n%v\n, want\n%v\n", i, result.String(), tc.want) - } - } -} - -func TestSignTransaction(t *testing.T) { - t.Parallel() - // Initialize test accounts - var ( - key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - to = crypto.PubkeyToAddress(key.PublicKey) - genesis = &core.Genesis{ - Config: params.MergedTestChainConfig, - Alloc: types.GenesisAlloc{}, - } - ) - b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) { - b.SetPoS() - }) - api := NewTransactionAPI(b, nil) - res, err := api.FillTransaction(context.Background(), TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - }) - if err != nil { - t.Fatalf("failed to fill tx defaults: %v\n", err) - } - - res, err = api.SignTransaction(context.Background(), argsFromTransaction(res.Tx, b.acc.Address)) - if err != nil { - t.Fatalf("failed to sign tx: %v\n", err) - } - tx, err := json.Marshal(res.Tx) - if err != nil { - t.Fatal(err) - } - expect := `{"type":"0x2","chainId":"0x1","nonce":"0x0","to":"0x703c4b2bd70c169f5717101caee543299fc946c7","gas":"0x5208","gasPrice":null,"maxPriorityFeePerGas":"0x0","maxFeePerGas":"0x684ee180","value":"0x1","input":"0x","accessList":[],"v":"0x0","r":"0x8fabeb142d585dd9247f459f7e6fe77e2520c88d50ba5d220da1533cea8b34e1","s":"0x582dd68b21aef36ba23f34e49607329c20d981d30404daf749077f5606785ce7","yParity":"0x0","hash":"0x93927839207cfbec395da84b8a2bc38b7b65d2cb2819e9fef1f091f5b1d4cc8f"}` - if !bytes.Equal(tx, []byte(expect)) { - t.Errorf("result mismatch. Have:\n%s\nWant:\n%s\n", tx, expect) - } -} - -func TestSignBlobTransaction(t *testing.T) { - t.Parallel() - // Initialize test accounts - var ( - key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - to = crypto.PubkeyToAddress(key.PublicKey) - genesis = &core.Genesis{ - Config: params.MergedTestChainConfig, - Alloc: types.GenesisAlloc{}, - } - ) - b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) { - b.SetPoS() - }) - api := NewTransactionAPI(b, nil) - res, err := api.FillTransaction(context.Background(), TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{{0x01, 0x22}}, - }) - if err != nil { - t.Fatalf("failed to fill tx defaults: %v\n", err) - } - - _, err = api.SignTransaction(context.Background(), argsFromTransaction(res.Tx, b.acc.Address)) - if err == nil { - t.Fatalf("should fail on blob transaction") - } - if !errors.Is(err, errBlobTxNotSupported) { - t.Errorf("error mismatch. Have: %v, want: %v", err, errBlobTxNotSupported) - } -} - -func TestSendBlobTransaction(t *testing.T) { - t.Parallel() - // Initialize test accounts - var ( - key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - to = crypto.PubkeyToAddress(key.PublicKey) - genesis = &core.Genesis{ - Config: params.MergedTestChainConfig, - Alloc: types.GenesisAlloc{}, - } - ) - b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) { - b.SetPoS() - }) - api := NewTransactionAPI(b, nil) - res, err := api.FillTransaction(context.Background(), TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{common.Hash{0x01, 0x22}}, - }) - if err != nil { - t.Fatalf("failed to fill tx defaults: %v\n", err) - } - - _, err = api.SendTransaction(context.Background(), argsFromTransaction(res.Tx, b.acc.Address)) - if err == nil { - t.Errorf("sending tx should have failed") - } else if !errors.Is(err, errBlobTxNotSupported) { - t.Errorf("unexpected error. Have %v, want %v\n", err, errBlobTxNotSupported) - } -} - -func TestFillBlobTransaction(t *testing.T) { - t.Parallel() - // Initialize test accounts - var ( - key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - to = crypto.PubkeyToAddress(key.PublicKey) - genesis = &core.Genesis{ - Config: params.MergedTestChainConfig, - Alloc: types.GenesisAlloc{}, - } - ) - b := newTestBackend(t, 1, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) { - b.SetPoS() - }) - api := NewTransactionAPI(b, nil) - type result struct { - Hashes []common.Hash - Sidecar *types.BlobTxSidecar - } - suite := []struct { - name string - args TransactionArgs - err string - want *result - }{ - { - name: "TestInvalidParamsCombination1", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - Blobs: []kzg4844.Blob{{}}, - Proofs: []kzg4844.Proof{{}}, - }, - err: `blob proofs provided while commitments were not`, - }, - { - name: "TestInvalidParamsCombination2", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - Blobs: []kzg4844.Blob{{}}, - Commitments: []kzg4844.Commitment{{}}, - }, - err: `blob commitments provided while proofs were not`, - }, - { - name: "TestInvalidParamsCount1", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - Blobs: []kzg4844.Blob{{}}, - Commitments: []kzg4844.Commitment{{}, {}}, - Proofs: []kzg4844.Proof{{}, {}}, - }, - err: `number of blobs and commitments mismatch (have=2, want=1)`, - }, - { - name: "TestInvalidParamsCount2", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - Blobs: []kzg4844.Blob{{}, {}}, - Commitments: []kzg4844.Commitment{{}, {}}, - Proofs: []kzg4844.Proof{{}}, - }, - err: `number of blobs and proofs mismatch (have=1, want=2)`, - }, - { - name: "TestInvalidProofVerification", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - Blobs: []kzg4844.Blob{{}, {}}, - Commitments: []kzg4844.Commitment{{}, {}}, - Proofs: []kzg4844.Proof{{}, {}}, - }, - err: `failed to verify blob proof: short buffer`, - }, - { - name: "TestGenerateBlobHashes", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }, - want: &result{ - Hashes: []common.Hash{emptyBlobHash}, - Sidecar: &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }, - }, - }, - { - name: "TestValidBlobHashes", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{emptyBlobHash}, - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }, - want: &result{ - Hashes: []common.Hash{emptyBlobHash}, - Sidecar: &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }, - }, - }, - { - name: "TestInvalidBlobHashes", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - BlobHashes: []common.Hash{{0x01, 0x22}}, - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }, - err: fmt.Sprintf("blob hash verification failed (have=%s, want=%s)", common.Hash{0x01, 0x22}, emptyBlobHash), - }, - { - name: "TestGenerateBlobProofs", - args: TransactionArgs{ - From: &b.acc.Address, - To: &to, - Value: (*hexutil.Big)(big.NewInt(1)), - Blobs: []kzg4844.Blob{emptyBlob}, - }, - want: &result{ - Hashes: []common.Hash{emptyBlobHash}, - Sidecar: &types.BlobTxSidecar{ - Blobs: []kzg4844.Blob{emptyBlob}, - Commitments: []kzg4844.Commitment{emptyBlobCommit}, - Proofs: []kzg4844.Proof{emptyBlobProof}, - }, - }, - }, - } - for _, tc := range suite { - t.Run(tc.name, func(t *testing.T) { - res, err := api.FillTransaction(context.Background(), tc.args) - if len(tc.err) > 0 { - if err == nil { - t.Fatalf("missing error. want: %s", tc.err) - } else if err != nil && err.Error() != tc.err { - t.Fatalf("error mismatch. want: %s, have: %s", tc.err, err.Error()) - } - return - } - if err != nil && len(tc.err) == 0 { - t.Fatalf("expected no error. have: %s", err) - } - if res == nil { - t.Fatal("result missing") - } - want, err := json.Marshal(tc.want) - if err != nil { - t.Fatalf("failed to encode expected: %v", err) - } - have, err := json.Marshal(result{Hashes: res.Tx.BlobHashes(), Sidecar: res.Tx.BlobTxSidecar()}) - if err != nil { - t.Fatalf("failed to encode computed sidecar: %v", err) - } - if !bytes.Equal(have, want) { - t.Errorf("blob sidecar mismatch. Have: %s, want: %s", have, want) - } - }) - } -} - -func argsFromTransaction(tx *types.Transaction, from common.Address) TransactionArgs { - var ( - gas = tx.Gas() - nonce = tx.Nonce() - input = tx.Data() - ) - return TransactionArgs{ - From: &from, - To: tx.To(), - Gas: (*hexutil.Uint64)(&gas), - MaxFeePerGas: (*hexutil.Big)(tx.GasFeeCap()), - MaxPriorityFeePerGas: (*hexutil.Big)(tx.GasTipCap()), - Value: (*hexutil.Big)(tx.Value()), - Nonce: (*hexutil.Uint64)(&nonce), - Input: (*hexutil.Bytes)(&input), - ChainID: (*hexutil.Big)(tx.ChainId()), - // TODO: impl accessList conversion - //AccessList: tx.AccessList(), - BlobFeeCap: (*hexutil.Big)(tx.BlobGasFeeCap()), - BlobHashes: tx.BlobHashes(), - } -} - -type account struct { - key *ecdsa.PrivateKey - addr common.Address -} - -func newAccounts(n int) (accounts []account) { - for i := 0; i < n; i++ { - key, _ := crypto.GenerateKey() - addr := crypto.PubkeyToAddress(key.PublicKey) - accounts = append(accounts, account{key: key, addr: addr}) - } - slices.SortFunc(accounts, func(a, b account) int { return a.addr.Cmp(b.addr) }) - return accounts -} - -func newRPCBalance(balance *big.Int) **hexutil.Big { - rpcBalance := (*hexutil.Big)(balance) - return &rpcBalance -} - -func hex2Bytes(str string) *hexutil.Bytes { - rpcBytes := hexutil.Bytes(common.Hex2Bytes(str)) - return &rpcBytes -} - -func TestRPCMarshalBlock(t *testing.T) { - t.Parallel() - var ( - txs []*types.Transaction - to = common.BytesToAddress([]byte{0x11}) - ) - for i := uint64(1); i <= 4; i++ { - var tx *types.Transaction - if i%2 == 0 { - tx = types.NewTx(&types.LegacyTx{ - Nonce: i, - GasPrice: big.NewInt(11111), - Gas: 1111, - To: &to, - Value: big.NewInt(111), - Data: []byte{0x11, 0x11, 0x11}, - }) - } else { - tx = types.NewTx(&types.AccessListTx{ - ChainID: big.NewInt(1337), - Nonce: i, - GasPrice: big.NewInt(11111), - Gas: 1111, - To: &to, - Value: big.NewInt(111), - Data: []byte{0x11, 0x11, 0x11}, - }) - } - txs = append(txs, tx) - } - block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, blocktest.NewHasher()) - - var testSuite = []struct { - inclTx bool - fullTx bool - want string - }{ - // without txs - { - inclTx: false, - fullTx: false, - want: `{ - "difficulty": "0x0", - "extraData": "0x", - "gasLimit": "0x0", - "gasUsed": "0x0", - "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x64", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x296", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x0", - "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", - "uncles": [] - }`, - }, - // only tx hashes - { - inclTx: true, - fullTx: false, - want: `{ - "difficulty": "0x0", - "extraData": "0x", - "gasLimit": "0x0", - "gasUsed": "0x0", - "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x64", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x296", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x0", - "transactions": [ - "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605", - "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4", - "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5", - "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1" - ], - "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", - "uncles": [] - }`, - }, - // full tx details - { - inclTx: true, - fullTx: true, - want: `{ - "difficulty": "0x0", - "extraData": "0x", - "gasLimit": "0x0", - "gasUsed": "0x0", - "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x64", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x296", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x0", - "transactions": [ - { - "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "blockNumber": "0x64", - "from": "0x0000000000000000000000000000000000000000", - "gas": "0x457", - "gasPrice": "0x2b67", - "hash": "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605", - "input": "0x111111", - "nonce": "0x1", - "to": "0x0000000000000000000000000000000000000011", - "transactionIndex": "0x0", - "value": "0x6f", - "type": "0x1", - "accessList": [], - "chainId": "0x539", - "v": "0x0", - "r": "0x0", - "s": "0x0", - "yParity": "0x0" - }, - { - "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "blockNumber": "0x64", - "from": "0x0000000000000000000000000000000000000000", - "gas": "0x457", - "gasPrice": "0x2b67", - "hash": "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4", - "input": "0x111111", - "nonce": "0x2", - "to": "0x0000000000000000000000000000000000000011", - "transactionIndex": "0x1", - "value": "0x6f", - "type": "0x0", - "chainId": "0x7fffffffffffffee", - "v": "0x0", - "r": "0x0", - "s": "0x0" - }, - { - "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "blockNumber": "0x64", - "from": "0x0000000000000000000000000000000000000000", - "gas": "0x457", - "gasPrice": "0x2b67", - "hash": "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5", - "input": "0x111111", - "nonce": "0x3", - "to": "0x0000000000000000000000000000000000000011", - "transactionIndex": "0x2", - "value": "0x6f", - "type": "0x1", - "accessList": [], - "chainId": "0x539", - "v": "0x0", - "r": "0x0", - "s": "0x0", - "yParity": "0x0" - }, - { - "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "blockNumber": "0x64", - "from": "0x0000000000000000000000000000000000000000", - "gas": "0x457", - "gasPrice": "0x2b67", - "hash": "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1", - "input": "0x111111", - "nonce": "0x4", - "to": "0x0000000000000000000000000000000000000011", - "transactionIndex": "0x3", - "value": "0x6f", - "type": "0x0", - "chainId": "0x7fffffffffffffee", - "v": "0x0", - "r": "0x0", - "s": "0x0" - } - ], - "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", - "uncles": [] - }`, - }, - } - - for i, tc := range testSuite { - resp := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.MainnetChainConfig) - out, err := json.Marshal(resp) - if err != nil { - t.Errorf("test %d: json marshal error: %v", i, err) - continue - } - require.JSONEqf(t, tc.want, string(out), "test %d", i) - } -} - -func TestRPCGetBlockOrHeader(t *testing.T) { - t.Parallel() - - // Initialize test accounts - var ( - acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) - genesis = &core.Genesis{ - Config: params.TestChainConfig, - Alloc: types.GenesisAlloc{ - acc1Addr: {Balance: big.NewInt(params.Ether)}, - acc2Addr: {Balance: big.NewInt(params.Ether)}, - }, - } - genBlocks = 10 - signer = types.HomesteadSigner{} - tx = types.NewTx(&types.LegacyTx{ - Nonce: 11, - GasPrice: big.NewInt(11111), - Gas: 1111, - To: &acc2Addr, - Value: big.NewInt(111), - Data: []byte{0x11, 0x11, 0x11}, - }) - withdrawal = &types.Withdrawal{ - Index: 0, - Validator: 1, - Address: common.Address{0x12, 0x34}, - Amount: 10, - } - pending = types.NewBlockWithWithdrawals(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, []*types.Withdrawal{withdrawal}, blocktest.NewHasher()) - ) - backend := newTestBackend(t, genBlocks, genesis, ethash.NewFaker(), func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, acc1Key) - b.AddTx(tx) - }) - backend.setPendingBlock(pending) - api := NewBlockChainAPI(backend) - blockHashes := make([]common.Hash, genBlocks+1) - ctx := context.Background() - for i := 0; i <= genBlocks; i++ { - header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) - if err != nil { - t.Errorf("failed to get block: %d err: %v", i, err) - } - blockHashes[i] = header.Hash() - } - pendingHash := pending.Hash() - - var testSuite = []struct { - blockNumber rpc.BlockNumber - blockHash *common.Hash - fullTx bool - reqHeader bool - file string - expectErr error - }{ - // 0. latest header - { - blockNumber: rpc.LatestBlockNumber, - reqHeader: true, - file: "tag-latest", - }, - // 1. genesis header - { - blockNumber: rpc.BlockNumber(0), - reqHeader: true, - file: "number-0", - }, - // 2. #1 header - { - blockNumber: rpc.BlockNumber(1), - reqHeader: true, - file: "number-1", - }, - // 3. latest-1 header - { - blockNumber: rpc.BlockNumber(9), - reqHeader: true, - file: "number-latest-1", - }, - // 4. latest+1 header - { - blockNumber: rpc.BlockNumber(11), - reqHeader: true, - file: "number-latest+1", - }, - // 5. pending header - { - blockNumber: rpc.PendingBlockNumber, - reqHeader: true, - file: "tag-pending", - }, - // 6. latest block - { - blockNumber: rpc.LatestBlockNumber, - file: "tag-latest", - }, - // 7. genesis block - { - blockNumber: rpc.BlockNumber(0), - file: "number-0", - }, - // 8. #1 block - { - blockNumber: rpc.BlockNumber(1), - file: "number-1", - }, - // 9. latest-1 block - { - blockNumber: rpc.BlockNumber(9), - fullTx: true, - file: "number-latest-1", - }, - // 10. latest+1 block - { - blockNumber: rpc.BlockNumber(11), - fullTx: true, - file: "number-latest+1", - }, - // 11. pending block - { - blockNumber: rpc.PendingBlockNumber, - file: "tag-pending", - }, - // 12. pending block + fullTx - { - blockNumber: rpc.PendingBlockNumber, - fullTx: true, - file: "tag-pending-fullTx", - }, - // 13. latest header by hash - { - blockHash: &blockHashes[len(blockHashes)-1], - reqHeader: true, - file: "hash-latest", - }, - // 14. genesis header by hash - { - blockHash: &blockHashes[0], - reqHeader: true, - file: "hash-0", - }, - // 15. #1 header - { - blockHash: &blockHashes[1], - reqHeader: true, - file: "hash-1", - }, - // 16. latest-1 header - { - blockHash: &blockHashes[len(blockHashes)-2], - reqHeader: true, - file: "hash-latest-1", - }, - // 17. empty hash - { - blockHash: &common.Hash{}, - reqHeader: true, - file: "hash-empty", - }, - // 18. pending hash - { - blockHash: &pendingHash, - reqHeader: true, - file: `hash-pending`, - }, - // 19. latest block - { - blockHash: &blockHashes[len(blockHashes)-1], - file: "hash-latest", - }, - // 20. genesis block - { - blockHash: &blockHashes[0], - file: "hash-genesis", - }, - // 21. #1 block - { - blockHash: &blockHashes[1], - file: "hash-1", - }, - // 22. latest-1 block - { - blockHash: &blockHashes[len(blockHashes)-2], - fullTx: true, - file: "hash-latest-1-fullTx", - }, - // 23. empty hash + body - { - blockHash: &common.Hash{}, - fullTx: true, - file: "hash-empty-fullTx", - }, - // 24. pending block - { - blockHash: &pendingHash, - file: `hash-pending`, - }, - // 25. pending block + fullTx - { - blockHash: &pendingHash, - fullTx: true, - file: "hash-pending-fullTx", - }, - } - - for i, tt := range testSuite { - var ( - result map[string]interface{} - err error - rpc string - ) - if tt.blockHash != nil { - if tt.reqHeader { - result = api.GetHeaderByHash(context.Background(), *tt.blockHash) - rpc = "eth_getHeaderByHash" - } else { - result, err = api.GetBlockByHash(context.Background(), *tt.blockHash, tt.fullTx) - rpc = "eth_getBlockByHash" - } - } else { - if tt.reqHeader { - result, err = api.GetHeaderByNumber(context.Background(), tt.blockNumber) - rpc = "eth_getHeaderByNumber" - } else { - result, err = api.GetBlockByNumber(context.Background(), tt.blockNumber, tt.fullTx) - rpc = "eth_getBlockByNumber" - } - } - if tt.expectErr != nil { - if err == nil { - t.Errorf("test %d: want error %v, have nothing", i, tt.expectErr) - continue - } - if !errors.Is(err, tt.expectErr) { - t.Errorf("test %d: error mismatch, want %v, have %v", i, tt.expectErr, err) - } - continue - } - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - - testRPCResponseWithFile(t, i, result, rpc, tt.file) - } -} - -func setupReceiptBackend(t *testing.T, genBlocks int) (*testBackend, []common.Hash) { - config := *params.MergedTestChainConfig - var ( - acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) - contract = common.HexToAddress("0000000000000000000000000000000000031ec7") - genesis = &core.Genesis{ - Config: &config, - ExcessBlobGas: new(uint64), - BlobGasUsed: new(uint64), - Alloc: types.GenesisAlloc{ - acc1Addr: {Balance: big.NewInt(params.Ether)}, - acc2Addr: {Balance: big.NewInt(params.Ether)}, - // // SPDX-License-Identifier: GPL-3.0 - // pragma solidity >=0.7.0 <0.9.0; - // - // contract Token { - // event Transfer(address indexed from, address indexed to, uint256 value); - // function transfer(address to, uint256 value) public returns (bool) { - // emit Transfer(msg.sender, to, value); - // return true; - // } - // } - contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")}, - }, - } - signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID) - txHashes = make([]common.Hash, genBlocks) - ) - - backend := newTestBackend(t, genBlocks, genesis, beacon.New(ethash.NewFaker()), func(i int, b *core.BlockGen) { - var ( - tx *types.Transaction - err error - ) - b.SetPoS() - switch i { - case 0: - // transfer 1000wei - tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key) - case 1: - // create contract - tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: nil, Gas: 53100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040")}), signer, acc1Key) - case 2: - // with logs - // transfer(address to, uint256 value) - data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) - tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &contract, Gas: 60000, GasPrice: b.BaseFee(), Data: common.FromHex(data)}), signer, acc1Key) - case 3: - // dynamic fee with logs - // transfer(address to, uint256 value) - data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) - fee := big.NewInt(500) - fee.Add(fee, b.BaseFee()) - tx, err = types.SignTx(types.NewTx(&types.DynamicFeeTx{Nonce: uint64(i), To: &contract, Gas: 60000, Value: big.NewInt(1), GasTipCap: big.NewInt(500), GasFeeCap: fee, Data: common.FromHex(data)}), signer, acc1Key) - case 4: - // access list with contract create - accessList := types.AccessList{{ - Address: contract, - StorageKeys: []common.Hash{{0}}, - }} - tx, err = types.SignTx(types.NewTx(&types.AccessListTx{Nonce: uint64(i), To: nil, Gas: 58100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040"), AccessList: accessList}), signer, acc1Key) - case 5: - // blob tx - fee := big.NewInt(500) - fee.Add(fee, b.BaseFee()) - tx, err = types.SignTx(types.NewTx(&types.BlobTx{ - Nonce: uint64(i), - GasTipCap: uint256.NewInt(1), - GasFeeCap: uint256.MustFromBig(fee), - Gas: params.TxGas, - To: acc2Addr, - BlobFeeCap: uint256.NewInt(1), - BlobHashes: []common.Hash{{1}}, - Value: new(uint256.Int), - }), signer, acc1Key) - - case 6: - // blob tx with blobSidecar - blobSidecars := makeBlkSidecars(1, 1) - blobHashes := blobSidecars[0].BlobHashes() - fee := big.NewInt(500) - fee.Add(fee, b.BaseFee()) - tx, err = types.SignTx(types.NewTx(&types.BlobTx{ - Nonce: uint64(i), - GasTipCap: uint256.NewInt(1), - GasFeeCap: uint256.MustFromBig(fee), - Gas: params.TxGas, - To: acc2Addr, - BlobFeeCap: uint256.NewInt(1), - BlobHashes: blobHashes, - Value: new(uint256.Int), - }), signer, acc1Key) - b.AddBlobSidecar(&types.BlobSidecar{ - BlobTxSidecar: *blobSidecars[0], - TxHash: tx.Hash(), - TxIndex: 0, - }) - } - if err != nil { - t.Errorf("failed to sign tx: %v", err) - } - if tx != nil { - b.AddTx(tx) - txHashes[i] = tx.Hash() - } - }) - return backend, txHashes -} - -func TestRPCGetTransactionReceipt(t *testing.T) { - t.Parallel() - - var ( - backend, txHashes = setupReceiptBackend(t, 6) - api = NewTransactionAPI(backend, new(AddrLocker)) - ) - - var testSuite = []struct { - txHash common.Hash - file string - }{ - // 0. normal success - { - txHash: txHashes[0], - file: "normal-transfer-tx", - }, - // 1. create contract - { - txHash: txHashes[1], - file: "create-contract-tx", - }, - // 2. with logs success - { - txHash: txHashes[2], - file: "with-logs", - }, - // 3. dynamic tx with logs success - { - txHash: txHashes[3], - file: `dynamic-tx-with-logs`, - }, - // 4. access list tx with create contract - { - txHash: txHashes[4], - file: "create-contract-with-access-list", - }, - // 5. txhash empty - { - txHash: common.Hash{}, - file: "txhash-empty", - }, - // 6. txhash not found - { - txHash: common.HexToHash("deadbeef"), - file: "txhash-notfound", - }, - // 7. blob tx - { - txHash: txHashes[5], - file: "blob-tx", - }, - } - - for i, tt := range testSuite { - var ( - result interface{} - err error - ) - result, err = api.GetTransactionReceipt(context.Background(), tt.txHash) - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - testRPCResponseWithFile(t, i, result, "eth_getTransactionReceipt", tt.file) - } -} - -func TestRPCGetBlockReceipts(t *testing.T) { - t.Parallel() - - var ( - genBlocks = 6 - backend, _ = setupReceiptBackend(t, genBlocks) - api = NewBlockChainAPI(backend) - ) - blockHashes := make([]common.Hash, genBlocks+1) - ctx := context.Background() - for i := 0; i <= genBlocks; i++ { - header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) - if err != nil { - t.Errorf("failed to get block: %d err: %v", i, err) - } - blockHashes[i] = header.Hash() - } - - var testSuite = []struct { - test rpc.BlockNumberOrHash - file string - }{ - // 0. block without any txs(hash) - { - test: rpc.BlockNumberOrHashWithHash(blockHashes[0], false), - file: "number-0", - }, - // 1. block without any txs(number) - { - test: rpc.BlockNumberOrHashWithNumber(0), - file: "number-1", - }, - // 2. earliest tag - { - test: rpc.BlockNumberOrHashWithNumber(rpc.EarliestBlockNumber), - file: "tag-earliest", - }, - // 3. latest tag - { - test: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), - file: "tag-latest", - }, - // 4. block with legacy transfer tx(hash) - { - test: rpc.BlockNumberOrHashWithHash(blockHashes[1], false), - file: "block-with-legacy-transfer-tx", - }, - // 5. block with contract create tx(number) - { - test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(2)), - file: "block-with-contract-create-tx", - }, - // 6. block with legacy contract call tx(hash) - { - test: rpc.BlockNumberOrHashWithHash(blockHashes[3], false), - file: "block-with-legacy-contract-call-tx", - }, - // 7. block with dynamic fee tx(number) - { - test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(4)), - file: "block-with-dynamic-fee-tx", - }, - // 8. block is empty - { - test: rpc.BlockNumberOrHashWithHash(common.Hash{}, false), - file: "hash-empty", - }, - // 9. block is not found - { - test: rpc.BlockNumberOrHashWithHash(common.HexToHash("deadbeef"), false), - file: "hash-notfound", - }, - // 10. block is not found - { - test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(genBlocks + 1)), - file: "block-notfound", - }, - // 11. block with blob tx - { - test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(6)), - file: "block-with-blob-tx", - }, - } - - for i, tt := range testSuite { - var ( - result interface{} - err error - ) - result, err = api.GetBlockReceipts(context.Background(), tt.test) - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - testRPCResponseWithFile(t, i, result, "eth_getBlockReceipts", tt.file) - } -} - -func makeBlkSidecars(n, nPerTx int) []*types.BlobTxSidecar { - if n <= 0 { - return nil - } - ret := make([]*types.BlobTxSidecar, n) - for i := 0; i < n; i++ { - blobs := make([]kzg4844.Blob, nPerTx) - commitments := make([]kzg4844.Commitment, nPerTx) - proofs := make([]kzg4844.Proof, nPerTx) - for i := 0; i < nPerTx; i++ { - commitments[i], _ = kzg4844.BlobToCommitment(blobs[i]) - proofs[i], _ = kzg4844.ComputeBlobProof(blobs[i], commitments[i]) - } - ret[i] = &types.BlobTxSidecar{ - Blobs: blobs, - Commitments: commitments, - Proofs: proofs, - } - } - return ret -} - -func TestRPCGetBlobSidecars(t *testing.T) { - t.Parallel() - var ( - genBlocks = 7 - backend, _ = setupReceiptBackend(t, genBlocks) - api = NewBlockChainAPI(backend) - ) - blockHashes := make([]common.Hash, genBlocks+1) - ctx := context.Background() - for i := 0; i <= genBlocks; i++ { - header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) - if err != nil { - t.Errorf("failed to get block: %d err: %v", i, err) - } - blockHashes[i] = header.Hash() - } - - var testSuite = []struct { - test rpc.BlockNumberOrHash - fullBlob bool - file string - }{ - // 1. block without any txs(number) - { - test: rpc.BlockNumberOrHashWithNumber(0), - fullBlob: true, - file: "number-1", - }, - // 2. earliest tag - { - test: rpc.BlockNumberOrHashWithNumber(rpc.EarliestBlockNumber), - fullBlob: true, - file: "tag-earliest", - }, - // 3. latest tag - { - test: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), - fullBlob: true, - file: "tag-latest", - }, - // 4. block is empty - { - test: rpc.BlockNumberOrHashWithHash(common.Hash{}, false), - fullBlob: true, - file: "hash-empty", - }, - // 5. block is not found - { - test: rpc.BlockNumberOrHashWithHash(common.HexToHash("deadbeef"), false), - fullBlob: true, - file: "hash-notfound", - }, - // 6. block is not found - { - test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(genBlocks + 1)), - fullBlob: true, - file: "block-notfound", - }, - // 7. block with blob tx - { - test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(6)), - fullBlob: true, - file: "block-with-blob-tx", - }, - // 8. block with sidecar - { - test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(7)), - fullBlob: true, - file: "block-with-blobSidecars", - }, - // 9. block with sidecar but show little - { - test: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(7)), - fullBlob: false, - file: "block-with-blobSidecars-show-little", - }, - } - - for i, tt := range testSuite { - var ( - result interface{} - err error - ) - result, err = api.GetBlobSidecars(context.Background(), tt.test, &tt.fullBlob) - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - testRPCResponseWithFile(t, i, result, "eth_getBlobSidecars", tt.file) - } -} - -func TestGetBlobSidecarByTxHash(t *testing.T) { - t.Parallel() - var ( - backend, txHashs = setupReceiptBackend(t, 7) - api = NewBlockChainAPI(backend) - ) - var testSuite = []struct { - test common.Hash - fullBlob bool - file string - }{ - // 0. txHash is empty - { - test: common.Hash{}, - fullBlob: true, - file: "hash-empty", - }, - // 1. txHash is not found - { - test: common.HexToHash("deadbeef"), - fullBlob: true, - file: "hash-notfound", - }, - // 2. txHash is not blob tx - { - test: common.HexToHash("deadbeef"), - fullBlob: true, - file: "not-blob-tx", - }, - // 3. block with blob tx without sidecar - { - test: txHashs[5], - fullBlob: true, - file: "block-with-blob-tx", - }, - // 4. block with sidecar - { - test: txHashs[6], - fullBlob: true, - file: "block-with-blobSidecars", - }, - // 5. block show part blobs - { - test: txHashs[6], - fullBlob: false, - file: "block-with-blobSidecars-show-little", - }, - } - - for i, tt := range testSuite { - var ( - result interface{} - err error - ) - result, err = api.GetBlobSidecarByTxHash(context.Background(), tt.test, &tt.fullBlob) - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - testRPCResponseWithFile(t, i, result, "eth_getBlobSidecarByTxHash", tt.file) - } -} - -func testRPCResponseWithFile(t *testing.T, testid int, result interface{}, rpc string, file string) { - data, err := json.MarshalIndent(result, "", " ") - if err != nil { - t.Errorf("test %d: json marshal error", testid) - return - } - outputFile := filepath.Join("testdata", fmt.Sprintf("%s-%s.json", rpc, file)) - if os.Getenv("WRITE_TEST_FILES") != "" { - os.WriteFile(outputFile, data, 0644) - } - want, err := os.ReadFile(outputFile) - if err != nil { - t.Fatalf("error reading expected test file: %s output: %v", outputFile, err) - } - require.JSONEqf(t, string(want), string(data), "test %d: json not match, want: %s, have: %s", testid, string(want), string(data)) -} diff --git a/miner/miner_test.go b/miner/miner_test.go deleted file mode 100644 index 5907fb4464..0000000000 --- a/miner/miner_test.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package miner implements Ethereum block creation and mining. -package miner - -import ( - "errors" - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/clique" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/txpool" - "github.com/ethereum/go-ethereum/core/txpool/legacypool" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/triedb" -) - -type mockBackend struct { - bc *core.BlockChain - txPool *txpool.TxPool -} - -func NewMockBackend(bc *core.BlockChain, txPool *txpool.TxPool) *mockBackend { - return &mockBackend{ - bc: bc, - txPool: txPool, - } -} - -func (m *mockBackend) BlockChain() *core.BlockChain { - return m.bc -} - -func (m *mockBackend) TxPool() *txpool.TxPool { - return m.txPool -} - -func (m *mockBackend) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { - return nil, errors.New("not supported") -} - -type testBlockChain struct { - root common.Hash - config *params.ChainConfig - statedb *state.StateDB - gasLimit uint64 - chainHeadFeed *event.Feed -} - -func (bc *testBlockChain) Config() *params.ChainConfig { - return bc.config -} - -func (bc *testBlockChain) CurrentBlock() *types.Header { - return &types.Header{ - Number: new(big.Int), - GasLimit: bc.gasLimit, - } -} - -func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return types.NewBlock(bc.CurrentBlock(), nil, nil, nil, trie.NewStackTrie(nil)) -} - -func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { - return bc.statedb, nil -} - -func (bc *testBlockChain) HasState(root common.Hash) bool { - return bc.root == root -} - -func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { - return bc.chainHeadFeed.Subscribe(ch) -} - -func TestMiner(t *testing.T) { - t.Parallel() - miner, mux, cleanup := createMiner(t) - defer cleanup(false) - - miner.Start() - waitForMiningState(t, miner, true) - // Start the downloader - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - // Stop the downloader and wait for the update loop to run - mux.Post(downloader.DoneEvent{}) - waitForMiningState(t, miner, true) - - // Subsequent downloader events after a successful DoneEvent should not cause the - // miner to start or stop. This prevents a security vulnerability - // that would allow entities to present fake high blocks that would - // stop mining operations by causing a downloader sync - // until it was discovered they were invalid, whereon mining would resume. - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, true) - - mux.Post(downloader.FailedEvent{}) - waitForMiningState(t, miner, true) -} - -// TestMinerDownloaderFirstFails tests that mining is only -// permitted to run indefinitely once the downloader sees a DoneEvent (success). -// An initial FailedEvent should allow mining to stop on a subsequent -// downloader StartEvent. -func TestMinerDownloaderFirstFails(t *testing.T) { - t.Parallel() - miner, mux, cleanup := createMiner(t) - defer cleanup(false) - - miner.Start() - waitForMiningState(t, miner, true) - // Start the downloader - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - - // Stop the downloader and wait for the update loop to run - mux.Post(downloader.FailedEvent{}) - waitForMiningState(t, miner, true) - - // Since the downloader hasn't yet emitted a successful DoneEvent, - // we expect the miner to stop on next StartEvent. - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - - // Downloader finally succeeds. - mux.Post(downloader.DoneEvent{}) - waitForMiningState(t, miner, true) - - // Downloader starts again. - // Since it has achieved a DoneEvent once, we expect miner - // state to be unchanged. - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, true) - - mux.Post(downloader.FailedEvent{}) - waitForMiningState(t, miner, true) -} - -func TestMinerStartStopAfterDownloaderEvents(t *testing.T) { - t.Parallel() - miner, mux, cleanup := createMiner(t) - defer cleanup(false) - - miner.Start() - waitForMiningState(t, miner, true) - // Start the downloader - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - - // Downloader finally succeeds. - mux.Post(downloader.DoneEvent{}) - waitForMiningState(t, miner, true) - - miner.Stop() - waitForMiningState(t, miner, false) - - miner.Start() - waitForMiningState(t, miner, true) - - miner.Stop() - waitForMiningState(t, miner, false) -} - -func TestStartWhileDownload(t *testing.T) { - t.Parallel() - miner, mux, cleanup := createMiner(t) - defer cleanup(false) - waitForMiningState(t, miner, false) - miner.Start() - waitForMiningState(t, miner, true) - // Stop the downloader and wait for the update loop to run - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - // Starting the miner after the downloader should not work - miner.Start() - waitForMiningState(t, miner, false) -} - -func TestStartStopMiner(t *testing.T) { - t.Parallel() - miner, _, cleanup := createMiner(t) - defer cleanup(false) - waitForMiningState(t, miner, false) - miner.Start() - waitForMiningState(t, miner, true) - miner.Stop() - waitForMiningState(t, miner, false) -} - -func TestCloseMiner(t *testing.T) { - t.Parallel() - miner, _, cleanup := createMiner(t) - defer cleanup(true) - waitForMiningState(t, miner, false) - miner.Start() - waitForMiningState(t, miner, true) - // Terminate the miner and wait for the update loop to run - miner.Close() - waitForMiningState(t, miner, false) -} - -// TestMinerSetEtherbase checks that etherbase becomes set even if mining isn't -// possible at the moment -func TestMinerSetEtherbase(t *testing.T) { - t.Parallel() - miner, mux, cleanup := createMiner(t) - defer cleanup(false) - miner.Start() - waitForMiningState(t, miner, true) - // Start the downloader - mux.Post(downloader.StartEvent{}) - waitForMiningState(t, miner, false) - // Now user tries to configure proper mining address - miner.Start() - // Stop the downloader and wait for the update loop to run - mux.Post(downloader.DoneEvent{}) - waitForMiningState(t, miner, true) - - coinbase := common.HexToAddress("0xdeedbeef") - miner.SetEtherbase(coinbase) - if addr := miner.worker.etherbase(); addr != coinbase { - t.Fatalf("Unexpected etherbase want %x got %x", coinbase, addr) - } -} - -// waitForMiningState waits until either -// * the desired mining state was reached -// * a timeout was reached which fails the test -func waitForMiningState(t *testing.T, m *Miner, mining bool) { - t.Helper() - - var state bool - for i := 0; i < 100; i++ { - time.Sleep(10 * time.Millisecond) - if state = m.Mining(); state == mining { - return - } - } - t.Fatalf("Mining() == %t, want %t", state, mining) -} - -func minerTestGenesisBlock(period uint64, gasLimit uint64, faucet common.Address) *core.Genesis { - config := *params.AllCliqueProtocolChanges - config.Clique = ¶ms.CliqueConfig{ - Period: period, - Epoch: config.Clique.Epoch, - } - - // Assemble and return the genesis with the precompiles and faucet pre-funded - return &core.Genesis{ - Config: &config, - ExtraData: append(append(make([]byte, 32), faucet[:]...), make([]byte, crypto.SignatureLength)...), - GasLimit: gasLimit, - BaseFee: big.NewInt(params.InitialBaseFee), - Difficulty: big.NewInt(1), - Alloc: map[common.Address]types.Account{ - common.BytesToAddress([]byte{1}): {Balance: big.NewInt(1)}, // ECRecover - common.BytesToAddress([]byte{2}): {Balance: big.NewInt(1)}, // SHA256 - common.BytesToAddress([]byte{3}): {Balance: big.NewInt(1)}, // RIPEMD - common.BytesToAddress([]byte{4}): {Balance: big.NewInt(1)}, // Identity - common.BytesToAddress([]byte{5}): {Balance: big.NewInt(1)}, // ModExp - common.BytesToAddress([]byte{6}): {Balance: big.NewInt(1)}, // ECAdd - common.BytesToAddress([]byte{7}): {Balance: big.NewInt(1)}, // ECScalarMul - common.BytesToAddress([]byte{8}): {Balance: big.NewInt(1)}, // ECPairing - common.BytesToAddress([]byte{9}): {Balance: big.NewInt(1)}, // BLAKE2b - faucet: {Balance: new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(9))}, - }, - } -} -func createMiner(t *testing.T) (*Miner, *event.TypeMux, func(skipMiner bool)) { - // Create Ethash config - config := Config{ - Etherbase: common.HexToAddress("123456789"), - } - // Create chainConfig - chainDB := rawdb.NewMemoryDatabase() - triedb := triedb.NewDatabase(chainDB, nil) - genesis := minerTestGenesisBlock(15, 11_500_000, common.HexToAddress("12345")) - chainConfig, _, err := core.SetupGenesisBlock(chainDB, triedb, genesis) - if err != nil { - t.Fatalf("can't create new chain config: %v", err) - } - // Create consensus engine - engine := clique.New(chainConfig.Clique, chainDB) - // Create Ethereum backend - bc, err := core.NewBlockChain(chainDB, nil, genesis, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("can't create new chain %v", err) - } - statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil) - blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)} - - pool := legacypool.New(testTxPoolConfig, blockchain) - txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool}) - - backend := NewMockBackend(bc, txpool) - // Create event Mux - mux := new(event.TypeMux) - // Create Miner - miner := New(backend, &config, chainConfig, mux, engine, nil) - cleanup := func(skipMiner bool) { - bc.Stop() - engine.Close() - txpool.Close() - if !skipMiner { - miner.Close() - } - } - return miner, mux, cleanup -} diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go deleted file mode 100644 index 708072b5ec..0000000000 --- a/miner/payload_building_test.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package miner - -import ( - "reflect" - "testing" - "time" - - "github.com/ethereum/go-ethereum/beacon/engine" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" -) - -func TestBuildPayload(t *testing.T) { - t.Parallel() - var ( - db = rawdb.NewMemoryDatabase() - recipient = common.HexToAddress("0xdeadbeef") - ) - w, b := newTestWorker(t, params.TestChainConfig, ethash.NewFaker(), db, 0) - defer w.close() - - timestamp := uint64(time.Now().Unix()) - args := &BuildPayloadArgs{ - Parent: b.chain.CurrentBlock().Hash(), - Timestamp: timestamp, - Random: common.Hash{}, - FeeRecipient: recipient, - } - payload, err := w.buildPayload(args) - if err != nil { - t.Fatalf("Failed to build payload %v", err) - } - verify := func(outer *engine.ExecutionPayloadEnvelope, txs int) { - payload := outer.ExecutionPayload - if payload.ParentHash != b.chain.CurrentBlock().Hash() { - t.Fatal("Unexpected parent hash") - } - if payload.Random != (common.Hash{}) { - t.Fatal("Unexpected random value") - } - if payload.Timestamp != timestamp { - t.Fatal("Unexpected timestamp") - } - if payload.FeeRecipient != recipient { - t.Fatal("Unexpected fee recipient") - } - if len(payload.Transactions) != txs { - t.Fatal("Unexpected transaction set") - } - } - empty := payload.ResolveEmpty() - verify(empty, 0) - - full := payload.ResolveFull() - verify(full, len(pendingTxs)) - - // Ensure resolve can be called multiple times and the - // result should be unchanged - dataOne := payload.Resolve() - dataTwo := payload.Resolve() - if !reflect.DeepEqual(dataOne, dataTwo) { - t.Fatal("Unexpected payload data") - } -} - -func TestPayloadId(t *testing.T) { - t.Parallel() - ids := make(map[string]int) - for i, tt := range []*BuildPayloadArgs{ - { - Parent: common.Hash{1}, - Timestamp: 1, - Random: common.Hash{0x1}, - FeeRecipient: common.Address{0x1}, - }, - // Different parent - { - Parent: common.Hash{2}, - Timestamp: 1, - Random: common.Hash{0x1}, - FeeRecipient: common.Address{0x1}, - }, - // Different timestamp - { - Parent: common.Hash{2}, - Timestamp: 2, - Random: common.Hash{0x1}, - FeeRecipient: common.Address{0x1}, - }, - // Different Random - { - Parent: common.Hash{2}, - Timestamp: 2, - Random: common.Hash{0x2}, - FeeRecipient: common.Address{0x1}, - }, - // Different fee-recipient - { - Parent: common.Hash{2}, - Timestamp: 2, - Random: common.Hash{0x2}, - FeeRecipient: common.Address{0x2}, - }, - // Different withdrawals (non-empty) - { - Parent: common.Hash{2}, - Timestamp: 2, - Random: common.Hash{0x2}, - FeeRecipient: common.Address{0x2}, - Withdrawals: []*types.Withdrawal{ - { - Index: 0, - Validator: 0, - Address: common.Address{}, - Amount: 0, - }, - }, - }, - // Different withdrawals (non-empty) - { - Parent: common.Hash{2}, - Timestamp: 2, - Random: common.Hash{0x2}, - FeeRecipient: common.Address{0x2}, - Withdrawals: []*types.Withdrawal{ - { - Index: 2, - Validator: 0, - Address: common.Address{}, - Amount: 0, - }, - }, - }, - } { - id := tt.Id().String() - if prev, exists := ids[id]; exists { - t.Errorf("ID collision, case %d and case %d: id %v", prev, i, id) - } - ids[id] = i - } -} diff --git a/miner/worker_test.go b/miner/worker_test.go deleted file mode 100644 index 268f3f69a5..0000000000 --- a/miner/worker_test.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner // TOFIX - -import ( - "math/big" - "testing" - "time" - - "github.com/ethereum/go-ethereum/accounts" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/clique" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/txpool" - "github.com/ethereum/go-ethereum/core/txpool/legacypool" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/params" - "github.com/holiman/uint256" -) - -const ( - // testCode is the testing contract binary code which will initialises some - // variables in constructor - testCode = "0x60806040527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0060005534801561003457600080fd5b5060fc806100436000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c80630c4dae8814603757806398a213cf146053575b600080fd5b603d607e565b6040518082815260200191505060405180910390f35b607c60048036036020811015606757600080fd5b81019080803590602001909291905050506084565b005b60005481565b806000819055507fe9e44f9f7da8c559de847a3232b57364adc0354f15a2cd8dc636d54396f9587a6000546040518082815260200191505060405180910390a15056fea265627a7a723058208ae31d9424f2d0bc2a3da1a5dd659db2d71ec322a17db8f87e19e209e3a1ff4a64736f6c634300050a0032" - - // testGas is the gas required for contract deployment. - testGas = 144109 -) - -var ( - // Test chain configurations - testTxPoolConfig legacypool.Config - ethashChainConfig *params.ChainConfig - cliqueChainConfig *params.ChainConfig - - // Test accounts - testBankKey, _ = crypto.GenerateKey() - testBankAddress = crypto.PubkeyToAddress(testBankKey.PublicKey) - testBankFunds = big.NewInt(1000000000000000000) - - testUserKey, _ = crypto.GenerateKey() - testUserAddress = crypto.PubkeyToAddress(testUserKey.PublicKey) - - // Test transactions - pendingTxs []*types.Transaction - newTxs []*types.Transaction - - testConfig = &Config{ - Recommit: time.Second, - GasCeil: params.GenesisGasLimit, - } -) - -func init() { - testTxPoolConfig = legacypool.DefaultConfig - testTxPoolConfig.Journal = "" - ethashChainConfig = new(params.ChainConfig) - *ethashChainConfig = *params.TestChainConfig - cliqueChainConfig = new(params.ChainConfig) - *cliqueChainConfig = *params.TestChainConfig - cliqueChainConfig.Clique = ¶ms.CliqueConfig{ - Period: 10, - Epoch: 30000, - } - - signer := types.LatestSigner(params.TestChainConfig) - tx1 := types.MustSignNewTx(testBankKey, signer, &types.AccessListTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: 0, - To: &testUserAddress, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: big.NewInt(params.InitialBaseFee), - }) - pendingTxs = append(pendingTxs, tx1) - - tx2 := types.MustSignNewTx(testBankKey, signer, &types.LegacyTx{ - Nonce: 1, - To: &testUserAddress, - Value: big.NewInt(1000), - Gas: params.TxGas, - GasPrice: big.NewInt(params.InitialBaseFee), - }) - newTxs = append(newTxs, tx2) -} - -// testWorkerBackend implements worker.Backend interfaces and wraps all information needed during the testing. -type testWorkerBackend struct { - db ethdb.Database - txPool *txpool.TxPool - chain *core.BlockChain - genesis *core.Genesis -} - -func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, n int) *testWorkerBackend { - var gspec = &core.Genesis{ - Config: chainConfig, - Alloc: types.GenesisAlloc{testBankAddress: {Balance: testBankFunds}}, - } - switch e := engine.(type) { - case *clique.Clique: - gspec.ExtraData = make([]byte, 32+common.AddressLength+crypto.SignatureLength) - copy(gspec.ExtraData[32:32+common.AddressLength], testBankAddress.Bytes()) - e.Authorize(testBankAddress, func(account accounts.Account, s string, data []byte) ([]byte, error) { - return crypto.Sign(crypto.Keccak256(data), testBankKey) - }) - case *ethash.Ethash: - default: - t.Fatalf("unexpected consensus engine type: %T", engine) - } - chain, err := core.NewBlockChain(db, &core.CacheConfig{TrieDirtyDisabled: true}, gspec, nil, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("core.NewBlockChain failed: %v", err) - } - pool := legacypool.New(testTxPoolConfig, chain) - txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool}) - - return &testWorkerBackend{ - db: db, - chain: chain, - txPool: txpool, - genesis: gspec, - } -} - -func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain } -func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool } - -func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { - var tx *types.Transaction - gasPrice := big.NewInt(10 * params.InitialBaseFee) - if creation { - tx, _ = types.SignTx(types.NewContractCreation(b.txPool.Nonce(testBankAddress), big.NewInt(0), testGas, gasPrice, common.FromHex(testCode)), types.HomesteadSigner{}, testBankKey) - } else { - tx, _ = types.SignTx(types.NewTransaction(b.txPool.Nonce(testBankAddress), testUserAddress, big.NewInt(1000), params.TxGas, gasPrice, nil), types.HomesteadSigner{}, testBankKey) - } - return tx -} - -func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { - backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) - backend.txPool.Add(pendingTxs, true, false) - w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) - w.setEtherbase(testBankAddress) - return w, backend -} - -func TestGenerateAndImportBlock(t *testing.T) { - t.Parallel() - var ( - db = rawdb.NewMemoryDatabase() - config = *params.AllCliqueProtocolChanges - ) - config.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} - engine := clique.New(config.Clique, db) - - w, b := newTestWorker(t, &config, engine, db, 0) - defer w.close() - - // This test chain imports the mined blocks. - chain, _ := core.NewBlockChain(rawdb.NewMemoryDatabase(), nil, b.genesis, nil, engine, vm.Config{}, nil, nil) - defer chain.Stop() - - // Ignore empty commit here for less noise. - w.skipSealHook = func(task *task) bool { - return len(task.receipts) == 0 - } - - // Wait for mined blocks. - sub := w.mux.Subscribe(core.NewMinedBlockEvent{}) - defer sub.Unsubscribe() - - // Start mining! - w.start() - - for i := 0; i < 5; i++ { - b.txPool.Add([]*types.Transaction{b.newRandomTx(true)}, true, false) - b.txPool.Add([]*types.Transaction{b.newRandomTx(false)}, true, false) - - select { - case ev := <-sub.Chan(): - block := ev.Data.(core.NewMinedBlockEvent).Block - if _, err := chain.InsertChain([]*types.Block{block}); err != nil { - t.Fatalf("failed to insert new mined block %d: %v", block.NumberU64(), err) - } - case <-time.After(3 * time.Second): // Worker needs 1s to include new changes. - t.Fatalf("timeout") - } - } -} - -func TestEmptyWorkEthash(t *testing.T) { - t.Parallel() - testEmptyWork(t, ethashChainConfig, ethash.NewFaker()) -} -func TestEmptyWorkClique(t *testing.T) { - t.Parallel() - testEmptyWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase())) -} - -func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { - defer engine.Close() - - w, _ := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) - defer w.close() - - taskCh := make(chan struct{}, 2) - checkEqual := func(t *testing.T, task *task) { - // The work should contain 1 tx - receiptLen, balance := 1, uint256.NewInt(1000) - if len(task.receipts) != receiptLen { - t.Fatalf("receipt number mismatch: have %d, want %d", len(task.receipts), receiptLen) - } - if task.state.GetBalance(testUserAddress).Cmp(balance) != 0 { - t.Fatalf("account balance mismatch: have %d, want %d", task.state.GetBalance(testUserAddress), balance) - } - } - w.newTaskHook = func(task *task) { - if task.block.NumberU64() == 1 { - checkEqual(t, task) - taskCh <- struct{}{} - } - } - w.skipSealHook = func(task *task) bool { return true } - w.fullTaskHook = func() { - time.Sleep(100 * time.Millisecond) - } - w.start() // Start mining! - select { - case <-taskCh: - case <-time.NewTimer(3 * time.Second).C: - t.Error("new task timeout") - } -} - -func TestGetSealingWorkEthash(t *testing.T) { - t.Parallel() - testGetSealingWork(t, ethashChainConfig, ethash.NewFaker()) -} - -func TestGetSealingWorkClique(t *testing.T) { - t.Parallel() - testGetSealingWork(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase())) -} - -func TestGetSealingWorkPostMerge(t *testing.T) { - t.Parallel() - local := new(params.ChainConfig) - *local = *ethashChainConfig - local.TerminalTotalDifficulty = big.NewInt(0) - testGetSealingWork(t, local, ethash.NewFaker()) -} - -func testGetSealingWork(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine) { - defer engine.Close() - - w, b := newTestWorker(t, chainConfig, engine, rawdb.NewMemoryDatabase(), 0) - defer w.close() - - w.setExtra([]byte{0x01, 0x02}) - - w.skipSealHook = func(task *task) bool { - return true - } - w.fullTaskHook = func() { - time.Sleep(100 * time.Millisecond) - } - timestamp := uint64(time.Now().Unix()) - assertBlock := func(block *types.Block, number uint64, coinbase common.Address, random common.Hash) { - if block.Time() != timestamp { - // Sometime the timestamp will be mutated if the timestamp - // is even smaller than parent block's. It's OK. - t.Logf("Invalid timestamp, want %d, get %d", timestamp, block.Time()) - } - _, isClique := engine.(*clique.Clique) - if !isClique { - if len(block.Extra()) != 2 { - t.Error("Unexpected extra field") - } - if block.Coinbase() != coinbase { - t.Errorf("Unexpected coinbase got %x want %x", block.Coinbase(), coinbase) - } - } else { - if block.Coinbase() != (common.Address{}) { - t.Error("Unexpected coinbase") - } - } - if !isClique { - if block.MixDigest() != random { - t.Error("Unexpected mix digest") - } - } - if block.Nonce() != 0 { - t.Error("Unexpected block nonce") - } - if block.NumberU64() != number { - t.Errorf("Mismatched block number, want %d got %d", number, block.NumberU64()) - } - } - var cases = []struct { - parent common.Hash - coinbase common.Address - random common.Hash - expectNumber uint64 - expectErr bool - }{ - { - b.chain.Genesis().Hash(), - common.HexToAddress("0xdeadbeef"), - common.HexToHash("0xcafebabe"), - uint64(1), - false, - }, - { - b.chain.CurrentBlock().Hash(), - common.HexToAddress("0xdeadbeef"), - common.HexToHash("0xcafebabe"), - b.chain.CurrentBlock().Number.Uint64() + 1, - false, - }, - { - b.chain.CurrentBlock().Hash(), - common.Address{}, - common.HexToHash("0xcafebabe"), - b.chain.CurrentBlock().Number.Uint64() + 1, - false, - }, - { - b.chain.CurrentBlock().Hash(), - common.Address{}, - common.Hash{}, - b.chain.CurrentBlock().Number.Uint64() + 1, - false, - }, - { - common.HexToHash("0xdeadbeef"), - common.HexToAddress("0xdeadbeef"), - common.HexToHash("0xcafebabe"), - 0, - true, - }, - } - - // This API should work even when the automatic sealing is not enabled - for _, c := range cases { - r := w.getSealingBlock(&generateParams{ - parentHash: c.parent, - timestamp: timestamp, - coinbase: c.coinbase, - random: c.random, - withdrawals: nil, - beaconRoot: nil, - noTxs: false, - forceTime: true, - }) - if c.expectErr { - if r.err == nil { - t.Error("Expect error but get nil") - } - } else { - if r.err != nil { - t.Errorf("Unexpected error %v", r.err) - } - assertBlock(r.block, c.expectNumber, c.coinbase, c.random) - } - } - - // This API should work even when the automatic sealing is enabled - w.start() - for _, c := range cases { - r := w.getSealingBlock(&generateParams{ - parentHash: c.parent, - timestamp: timestamp, - coinbase: c.coinbase, - random: c.random, - withdrawals: nil, - beaconRoot: nil, - noTxs: false, - forceTime: true, - }) - if c.expectErr { - if r.err == nil { - t.Error("Expect error but get nil") - } - } else { - if r.err != nil { - t.Errorf("Unexpected error %v", r.err) - } - assertBlock(r.block, c.expectNumber, c.coinbase, c.random) - } - } -} diff --git a/p2p/peer.go b/p2p/peer.go index ed80b226bf..1535da8f2e 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -24,6 +24,8 @@ import ( "sync" "time" + "golang.org/x/exp/slices" + "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" @@ -31,7 +33,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/exp/slices" ) var ( diff --git a/p2p/server.go b/p2p/server.go index a334166737..1c969a050f 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -29,6 +29,8 @@ import ( "sync/atomic" "time" + "golang.org/x/exp/slices" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/gopool" "github.com/ethereum/go-ethereum/common/mclock" @@ -42,7 +44,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/exp/slices" ) const ( diff --git a/resource/greenfield-peer.png b/resource/greenfield-peer.png new file mode 100644 index 0000000000000000000000000000000000000000..8b0e046f25ec78ae9bd5f43fb3dd31ead66b1929 GIT binary patch literal 102556 zcmeEucRbbY`#7R(BJ_}Gnc3Ovlv0ur=h%)U5!stKP8p?8$;dn<>zLVF5ket*>loR4 z@85mW(Sy(D`}_Cz`n|rro*s8^@ArLQ`?~Jyy6*d#s`7OTQbtldJUj}88&}ov@Qx

r{F$2#^vgqqm$`=!tkQ5Ua7r%RqfniwY41=h+LJMz7`+{YM ze6)c|ze<1U*7`EUANC{Li|$6epsTfOt$=9R&kSk9*eCKm*<5Bi3xot7&t_9wgUUYN zHM4BoOfFe)Y0q1fC@6Mj9xcfp+5VLr?{Ohq1$Gw@2 zuK7sA5`KzvPCtge_&_*4q}R6_NEv++ais}C#y&A=KmVj?0^zu!Ofo4+gm>r1V_ zUkZrwU;6i_|N7OzPhob(wsKY$U`>0Ozx?|1@xOoh=R+wzEb9Lv#V(zHKLv!AA(i6W zW10-5BdVE~S`}MQj!DqNaLlnal&#auh#x1X;VD_pd@8wl_8hwo$WAhH2 zuVE+G6e?4CM9UV$yIrLEP5SE=9+fprMUFUZBi!nT?PNmRM>ajmy1GXqk2kREW*ldQ zc;ev`5E4@#9+-_`hWM&7l7!!~c@uud(@GY50F%8dQ1tou+%5F#e)_T3uEx^*nh!O^Il8 zbY4P2Li~Ib)Op~`GbQHf4+=r!O?_>;R#9!ut*x-ob?6mOJmh;a;zU20imK}eEw3pp z@}&ItaAEGL@2Ujh->d!2yKcy*Wvlgeqcy43Rs!fy<;cg?m1GQU4qkcRBQzb{1s*Hy&cQgRP{Aj!}3PS654*Qym|r#OLwK0UDiq;bd=1;EO%F=Z?7h(<$1V`=)V*E zR!fjP)|?Pp+j$1Kq6FaWBpiPEM;!E5jS%ENLifEovrIyDo_=n@uypX85Prc$(L#wN zMZRUwKebMp9Iq5XPY9#aq9manDX}8k~p0IUhzoe&gCV)S9e%neDEaL+1;1rq0Mhk|7P& zG&C0#+6!6`b(5JLa_1zVoZ-)c;DabY?mUC6ac?chWEzQCHb^&8v+{d-zBI!-6-rD! zN{of~R@UXi@Up9x+KxY_4Oodvs=A$AaED+n2eIM4yx5eO@`3Ai#~BFf2o_-e8lM}7 z0shqS(WrSmHPY-uX5n)#)qm6+oSb~q($d~jw&#P0M3shGHJD=(N1MMfN(nYYn2s<2uyHVDcTu<-r&#JNL+31{VZob1S^>yI`g z$Yqnb`N@1+!mN-Y1aciHqJ--@Mf5>}s1|v)s^>%$Jb~m845zO}$Ra#)@;HIMB6~=P z6KzF}C2k#JTPlQJRa?5X%carPhA0yTg)eCKh>Hjk5MH5Hbs_!BB^-EWd*mZPhj*M{ zQ_b=8M4NFDkx+mA=;_H-9h5L}kmg7Y;xC?NbLHNc%zqMc-@91Qa1?*~_MDe;q@jRI z>oROLNd!W`%)^Qr9~~2Q{_9b8=YdQS7*z*QY^Zb!oXQ(Hozp)_NkDiM$OOgB4_N7- z2GVhdh5~tz7>&08H-vI>TAvY!_IS8;%w#3#UT%FBWc7_u;s%HSF9%s99)c7@`YlRQ z^j4IFu%31K943{PjXY0cg_gC^7hp+sB7vBiBNB|P)y*J>zO5qp7AA1+TeP&~Z;3R& z`khch4=jYC^2E@NQUhpwZ)F=EQo8XotSGeadkJE4-i#m9ZmyO6KX2WnW_@V{J_uk} zX(z!^9>vZ5FTqnJpbfV}ANo0ka) zZNa13xuG$MgCRJo6G-Cy=jQ3OK>DM^CaP|s7J#-#iH^u05}T8m5b{Kit>ze?y8Dq5 zLlQkVJU@K`pCjG%Ed@DdMs1?V@acheY}X;E+I?*#%Wz zRuorzhKeWTHo)jwl=k3ZeTD!A;Xz?Ei9n2Co?_=AsFwivE6T~24{Z+^&jH|HB&kv$xV|_>r^yVs|36U&>n<{! zG3D04;72q?%M;tb|76*$LJ7Pk1C}QnfG2iXo(Q$eA95Kdb3HxHI=r}8`4hoF^(+w( zR+S4KL1A`ELAvH=F2$RG7B$Fhv{x}%b>oIX2l#4qZfFg#pXwOe5Fu45e z1i%3gj0kSMKXVXo!te`#NFmvd654>UHCa}Zgc8@mNZiii!y$z+1a$OKN+ck70$6$U zf*P<>1jK-;{Lg&$Qxkk?8UirUZt5Dl2T0D|Q2LQ}^b}U;zva9-q)|RV))__KkrI=0 zVKpk=80)O#RbpgJT>wp3UtR|vo;Fd6Iiy)Uz(wT{!?TzH661+uB3cB@++fs^Bx?O1 ztP%mb>wC{-lm?hB^=>Wg|5x)q#c!oIs`eUO?uor7-FOi>fQ|iRtL1NaLWl{tRPYbV zh$o~KNCZ0R#CiUE$cw0OF`K(#LMHVuCv47vSG%ynP@ffRbw~t6XaU7Ly+)PK01fqy z)Od0hvZvt(*FYfvrFTTqs1rkhEWlrngK#7Yd^RN|xh{Xm2%JL}*p2w8D~n&Sw~*)) zP3$s-+s8QrAP2rD9Y4;Rkpgr?o7Uuz<`UKt-sKlIZ%b)xZVrm>Mbrimpaofq2*gBP$y;qVCzxna}X2{0M zih>|MLnnXK8EnsIY~*%pU2ZKL`(b+wD1*eE*A%btkRYW&eMr}cKM(|eZUMoPV)|0u z(}4n>s(|CF#;hJBmvB~%wb2n^oEt&IeAx17-l=-=kKyuUXPkTj(2v|c*9mxklpn+3 znyzKRH3;l=rW{IuKmg@uXx;Pc+uPt@ddN(tIKIK=h76*R6DOimz#riO7^IKBtepf%|YBe9PLqlI5RPY-Azl9GXgDu(Mx z(hYT&l7s|yeQ9C$^~J?-%}s~ZBllJ*7=U4o)B@R=Y$TX~g6%THAr^k8dCHv-uNmr7{PJEBt{^iIzU3(nq%z_N$4*UsE_(w zwm1PMb?sm?V>El8Ka39p_dZb8r84 zebE#FGiccgg}ot0UB%nvb>V=<_f`4}GJzMAZFDhld=%VlJC}^y77;sSfzsQ~T`lY8 z>Af%5?7DqCx$@K#1__B*hu?$n5kgR%oWLUe+0w4%LG;pp(e7JH$xf zgPVC1^f7~ATS<0KQ-|{lsskuH@+@!}eqza0R}U(F;$>92HCG?Jz?Cc7e1#6$N(i+% z)tS4oJVPAQV=|M`^Bn#r>Ly}jXCVgDdz~dB&34@3UEq_Gr1}d5LggLzV6fp_G3`|Q zwiRA(BB9`R?!fj_(h`q$gkmVo+cLt@QYgQrk`W#&N&~S;z|qhENpX~G!^djhvK|DY zL&n8nxZwKf+ky}BnDJpn-=A(%vo>P)Tj!vy3I{>``V8VC7VK`q;MaBIkVq<=dk+HK)} zgm;Bwx3IQDj-?i+l8{Tmls>VW*cTV zs1&CzS?p3byKPZeZXVQrBtXZea&|U0&?jbddj*rC$V;KEpWba6Ydx2s3}?H<4O9E5 z{*P*99fPv$nTJbV?HX_NY&(+hTU$l@Pswao^2HGopk-Jn*nv9#*A%TO3v#= z)DxE1m!WMF#e~qosuv`(H#LNp@}g|!E?|neLyILWp8JbZhDOfUsQoCKiDBt;o3t65 zW2&;EDYa`_D^#oQn^ z>pA<`B9D_fbY@d1$tWSSP`S@4hTma+X#VWh^a`D7YS#KVraDU|@75i9?y)zX;|15A zU)l8Nkgi#&@R%>^_f4*Sd<7p?78pc^4MM_!$$8Mic*vM$2SN}S=%gVcIbk4YwX1B^ zA`-;Kj8xs0b?`Y+Ey&WWQJ{A!mBjfegr0@N!esiJs`;sjbgG0$TTEkf+P;X3#Vg;f zc=eNe-n#s##B7Rd#wEzEy>hSc&FwFR-V0P$rYI|#Xc(JAN-B|5PkwY8(=m{olZ0)= zhzOM|#rFt zP*$On96%XSY*vCbU3}^W2JF$!>tnN_YgAqoZ7#-IF5YxaXInjYc3+Qf}}$SzoZ}H(RM#7J3?SzYB>uQp3Hr-aRlL zse;IoEZMAAup)Njp-7y*0jF9mr41pxHOB-K^;qi3xH*5b44J!X_IV{{Gii3_o^Y$; zYfYjmx;uR1M#CD$%zz|LGD|fX?Fq`9bOj>X*K!Q!*FVo&WO#DNZzPL;cFH?X!`yt)1?0I@P6`f%71s3FiXg zF;`yIA)l2>ck*InEqq3EZ7M0H{TfMPsmBsk#aREgvP4R6Zm&qBWT(8*K(v# z!>z!ebBGCcRBdW{$jy<*!sOQe0nl9T}QUa$iaMm3tO!;iCoJ{y>(nLYJGOXG}0kv^h)?qssGF*b)$*|^{xJ8~+SXh`@0WWhQNo)hw z*5z8?vy%1g4#W<_w6ezX;`uY*R)Peoc#g8PBMdd>NsD03gR9j>HRI^K(~nF!Z#^3k z>9($z&26_h-Iv*U-*ACKbabIx`J-O0`}FG>pHA6)L1`(Q87g0QM`umPr0KF?{8qo3 zFh{ZVj;7>oE=}YTgKDLfh?30wXp_5o|581_FiCq=+DG~PiPDYoHBEiyw1Tzv2Rov@ zcb-^Zn-8y^Tyh)kpYkJ;$sBa6knCTK&*=Gj95Zgv8|ia@_y`|WV%q931*XF;wD_*T z(Z}plMNS=egOti;fzz2C)b-G{eX-L2rr6KM|HC)cT!ielbI1IrCr!FUP`TswR6tC| z!L|dPp3rqjOe{E6EyRSiMiyvPDMSJ$*HucLHMIEFEY*h{>tbf6m(azv69O5ogR4sA z%DPs}B1)y-p#|=fWCrBz+v}oZV|Ib-&K=wSRw)Th&E zJ=4QiHa|?=P@P)liGD=X5*?IBl`xrSr6aRpbUX9x*w2OrQrVp?mL~NWX;U3%M0A$MNyf zLZV_)h$9N@bGilGrjJubW*Q|$>o^iZb4)oEfpf0{F(#u`3elkmMv-WoPPy{(2_BRm zH}d}ZerVUlwbY*N?M6AwW^%g1<9GwA)!HJ_^Osvcx=qos1$1XXD}80{bn@2%B%227 zWF=!8))l>d3z{_{bjecN$p+l1C%V@xtjJ7GXWm;(LYwA z4Oy!Aviy`UGAdefTF%XTtA8WfzHBp)J=#O!Vd={2jE6m80%LgvwdtBgF?pU`Oqz)F z?r&)u7eH1T4mJrcQQ}*Nlm&W5wZWQ8BsA31EUHzL%0O&01AfJZsPRbPrqDOHpPy$d zhH~r{SUki^N^LVUN7+pf@3PAi+m&iAZ^}36uk^B57EGkgZWPT>{&ppPG4r-IOAV1A zELduXOG>*pwet~&>g@#C?=Z1@oP8Ko%$RXo7l99Z1j9Hga4b5{Gv!SOB_#A)kMp6r z2F@M(U~Kf$agJSOI;-eoy(ru80DYO2Lk(vPd0zd7x%19qP?O!UsZ2Yu@h2=dR=9V> zcuBtT8myaDUPP@o4QqO3@om9%E**u^R{19v^GM4q*NylbV{tj(Tek7uDiqGsVS@2c zCQV#N+Jpm#g4>xN3eV-qS|lU=w!SMuggb6V1(=EZkbQXN;dJRu^+!q0*4P zFc+by*<&rYS|6Cs?VdcYl-_x1ZMtc65-eIg=?Z zOId3!j*oe1BJy4d{ovs%qy2S;joVbwu4qkS_A;euM}*w=Sa~o5?yI zTQtSkxA2Q6lq7NcRk7>Ll&{A{dMbB!1XoGMVqj{J?HtT~&cUPo`??;-j$^AyJo8DJ z3?)sMxu$Ifz6}pC_BD1(pc=25O?(XcTyP><+ zIn52F&TXEA_ozTxa*yE`jTz zwV^X_LML3ezF7BttoAZsGwit@;$w4t`lFkea{}jZ-kasRvS-0W&;ia2c z(C@QCJ+ioT&z#`vS#7ne2#=MIGhW|_oiOqZEe%{uutIR_6P+m2%~^QH?kyQar(q*$ z-@f8~uCGiN0sAl#D7K+EmiKYsGkhlUluXWy%-GuJ?UNT#ek-AY!fDXjQbhps$6F!+ z&Re1e8)jP5*ZBMr+$>~g#z;WGASAtlVeX?-?%Jp@+?L(>Cs(P|fi6#!y}xT+WBz+; zt%c3}R=U%p&BUhh4501HOYOeurQ815s}GJTkrg?Vl+3p+uBHr0!gh3Kolj;}jc*;j zvnL9ThaG@4AtSSLr40QARG~wkLB(ei58ka1wNsjF$zzmJ zOWxMKJb7Q5G168pc4C7spKQ1<_4MdkQ5=i(kk`qzZB<_*P1X9DPc=oijF@q2$_z$C z?(R57J|qnKB~O&`bEgESd_mt&W<%+`A-pPy82B)Z|O3hrQQAL(^cttmg#PY#fW~d ziu(1tV|w(eTVpc_l?^(+FLdUb?8zMi+%$E0Ow;-2CRg4zH>np{6j_K(ecGzy(=6fp zld&hhbPbq-6ZicOWq=sD8(&y+1+enr!^mAIyNpMS8+T@8cSu&Y`YSNM&M$t=wK1yl zN)FOoD={7Vx(L-fe!^gDH=v8K%($Gn{-Awm$!;P(eMWb+MIDn)wS7)6+Djbk22#Rj zz0XmS&2-w&+Dbv^=ePqL7!iAwaItrE1nJN-{SqYC8wfs zevytcn0NW3x)OxWihBpHp`qjgfogTc44y%+)1am`gh7lcni<#tvAQS=UECH zUdf;J8q6{)4;Jk`5wP79+B}#~Fi}@yPS~Gc!!|azRI@4*;^i@{ORrjKR~@<0I{U<6 zTd<_KmnoMxMyX4GX_!ytNxFFuYIb$VRfIt^a{29S*HSxQI(pn4AJz&5C1wbU6T3U% zH}^WUI}t!%M>g@A!f-gsfa z>d#~4@7xLR&P6D(C~8nvQlrDChPIYT)Y(14LFMt1;W)RtBWUM& zbGtNB6S%$2m}?Iw;=&y%xJ59mJWp9d;NcrWH$cz_`7;lM6qA!P7lzBs15WGf7{j(V zr84ha?iltOi!+~#t`!+9Kx&{WC!ScikS01tc-ZFiknjzPfRw>!pI_KQjx8rB>I`R( zT{@UF1$AJdilmOa#|hB+XU&4{3Acnq1W*+fPo-|j+?=Cxk6CNWJ4UY0_JWCFxepU% zSgWDCq^nX)UDj(~OT-XT3WI5{IxtzBxPMxT=izLvrUsmGF@LigmiK6@c-HS8kvgKA zRz;SXjm5>DlU(9X0w<m*SX4B{f@1}aG%^+Z7*Z%+X+p3@7mgB?yXRhOmLmoed{hP_yp=D9h4~S zsvPhHs`&V=sH>!haOzS)(P5y_(J!9pt@{Rne%RX9Iyn;yKEZ7WWLW$_{Rw!`TPPR? zdXGkRc+&7kuO3L3*@|08D0xEsfg&(|K5;mItA+~3Ku_g)qJy*2r{KnD>t z)s-N-TLHkK9ByHBp|Jpr>Z9Dw9R$htY$GoQIs{M{diUeI9@rGvO1n#da$Yd_fa~3f za|id6*;i0fks!56sjsh}Ix3%}gb#aGPKopZX^bj6jXe-v-8wPcVh8XQi(-G93y;xEcdM$|uaT_Q*}t9Dz1O*k<4hze#;$ zm1rW?Z)Ib1@C5cshpJOYX-wO#xt75Pjo%97Q8ECRbuqLz9&#BFI%j2bl=%Rm8x0Aj zPAOL=0XB3Td63ToIFsY09}DCkTUNPe{rylKVx87>1uP*+mdGoHS)FYopGzSfuO-`X>`~O7Pq| z=#4msrHv|#{Q#5@)KO%ym%7ajAYE)4w+ZAEjNEsY0h+PWJ zk|9gfk!5gI)dMKmjI@0vrgpvzfh;}tmOikcC#0D$6U8nP77BOcZ%?(atHNwz1rcK!Xa+ zGPaV{A31vb09{n?ATR!WhG_x)EViOeDLVw{9;GPCk-zb+53WT2Z`xo1$RW9hQHEC} ziFG|B2ermu@CO7rSk?NW7J`Q?1`0hdVYjZdA5=hWRx-!=Q_9z7AD$vW>q&b^L5JhVKNzl8F=^pONTY`#sA;b_vS<(p&r*Ny0#4 zmHxLgb1h&S#)W7(*%35?~ z^MmTr|3zgh>An&~p7l|;Y0I1=N%Y$wXVRVkh^EFIkp_Xq91wvjCi8>+C!^HJi^ZH~ zVsr@tOtGG5xzlD951)ZN*o8?(&>>L+K4N=f9hS3h1T3f<6~5&O!FF^g zeQ7&!fP<=1NM+K5fk|WhR&42|xaQJX0t;-5M$ofH3ZR78WF<#>?Exi3o+Y!_5u+Oc z;rbAJS4S5%IOP?3#{vLU1H&8o4hI4BrW^fY&7~agiOvYjM&`T%WbkK`(*&8pFfcUC z<~o?{@Z)Ms!ayTcKzBuq*`OIaKt%GpPbnxZV0-Oi@3n#~(LP*GsG)*@ro02K8!~N2 zTkiw%ibPQNf(kLVx6mg8eb5_myoV(Cs@o_NL9ac^^iS#qZ5rsJB1urO%+Xz0^aZm! z06W|2efz?GqvZ+tLFA>bcwY+wTV(HD?Yed+ws!@nr{i0SEg%fX!D{k}=%D@3QFFZU zv>x{!JE2-~$1o6b4sgT21tto-3wER9tKE3)fTcoNfJI6Y*MCYYJ|IJF+WEX5G~I~;di8R^u6jZufW_#tziWT6x5hNp(;98H za}ibe(`&E-^j3}*z03l>Lh+|hw-puW{;?Rol95|K*w}hW4eke7+D zaeIet(Ny{3So1NSWUPqH0}ZR4_%;SaVc!h=50Mv5--=j-ml3C37IOREPsDzTrBA}} z3WXEbO;3n00n=2e$!u9D`jwgw9#RTKM)Oi`>&gd|f?OLSCVa2(B@t-q6SbFfq8U|T zbt^FdM}$NtW5y>(K8=#t`+)`3?{|sq zH%Sm*vU`{}GleZPm%nP!vtbas8#kol(E!v9*iHXuyLFIv0guhk7Ogg#~pV!sM9PdW$olG zW|yH??~;?bC5N$z9p=5ps(s?7`VfHS_T7pcZCNlJ0vd0clZ^y2{B+4vYxA%z-xy8A zrutfv%nAaual1#e)YsI`-Vt*s$jgI>^p`Z`{z{yGn|z%wb0l}O;MAxK&8=2BeP%-9 z6CezPd1+q)wqc)uvJxkBZJhZE-(fhC{4kk3Epy?>R#jO~x}$EfwPDURw_x+DPk$sM zPqDo5<)4yxfh>P_e?{C4&aM=G0(8lkC&z_L<97iHr2^hsNJ~o`v>VWKtdVGWg}2*; z^nA`KWr`*BeDDlZ``n#l&lzN7v|cqWRR+S^eVZs8ZxWol?$0%~k|Sgr7rY_CET5KH zD>!mJP0(7uuIDEq)+zuch6vx25Iq>t0M`+TbsfGMq)?`eoXkME2fA99P6)36$G|EJ zZ0P2<;OK)N(E%AFrS|zkg8qT96PX29Od}kh^3u%-#a=wx^XvfmoVErigV!{Se@n&dT?t0?5Ljv z0+Hbo4rUTJG0@NK-LtzaM3w_%M=D!+^gsjpJRfUh?anDiTxS{zbl$?!3}X%|fFzK# zZl!0GI2X3pD~Lnm&oaR0ChYp$-wz<>fY+%@?FXA?8v}t#vYbr7*`xo`jeJ50c#`aY zgirsGM2HozMOIYT_oM6yc@Eq@Pc+_lTmTi0jSmPi+4hMqaG>MBy|jvR;ta%BV28P8 zgc5i5&xc|K?0$UfgCPz#G`7hV`GNR=w`tS@L@{!2#rY3YY|yaVyYg?lg`}jVR(9y{ zV&eVtJu8TiT*wc?7P8j{ZBh0oqDtXo+>7u`v3*}IZ8`_tdo4i8_@pC_kj!krL1d7K z>h0IvMoNH^V;GS;ZeNkD3_?x`Hy#|t{;)6L1ORm?n+cZ>Lt$G68GMiIpACX~1*Vf9 zN8|GDFIfQQ9=~p?;K0}=C?pN=r65`+3YVd10Y?a7n##dADZ%ci2lrbF+C;LJG zY;z3WAa1!o{G*^xWQ+9EcV7H)y~2xx)@=Oy{Rj(sO3BHL*)*pT^G_Smi0w7&nUWPB zU8@q|9>;DA%=X3%)kIp6x?=UFd7bX(+6tn}Eal(lp7eQ`}tj$&Ny zM<5Jg7Xq-AC+u>c)bR0xuOHmhn1(@eK+@xrDgioW5KN*kwzIjgLGE?>(d_40B-JBU zinz@A;Ibfg6-tvx8~KycuJ*%ms^*Yj81)nMjK@lmA!w2B>{fO)avxXG(U*h zbG+0)9tp;W=_v`DwIssbpL4(4o(n{Hm2EGjm(*83ITs*y_LM`*Z7>d2E1JsHbsl}^ z>l!sPaVHq2K_a`+*-*}Cp7LYD)&)XW5lALce7L9AA=+-y&{ z^_Hd-h)HO`pfV^f0G|3*P{|gzFHPQ=8>k#h#sf{6ZQKyakx098Wyz^GbPG6x$@&tQ zZKeHQ8^(-)=D-08)Fj}~KDc=RL0?e2Hj zHApK~+pwp7^)m*Yo$3-Snzt>of5^uTnjJw;`iKSO+j5cBFLzs&S=VwV+zZ{>b0g;o z2)F`qCKeYLsEQ)r`2cpwktEXd=$wErp*4fo5#I!=y(%arFG|bIemYVS=&=}UUzQ$x zyHhE6%42&WMx{(USU+7DRGcbVCw-VKZzOk^C8w5O2-SNH8^MMOzp?ty7N(GxoITQy zIg#A&sB5Kxi!Zx!q?W6HKmM^()XFkzLyPl6H9-~nPQAg?)u8aPQZmC+mfG$C4jGK) zJQC=T82b=(v)z|Q)qT>4g-mw!GZiT62C}fE6-^haxjW({6C3dD7Xz@pqa>)HNVv=3 zL!&AU)W>iO6dC+~W;ZL7v8*@xHVw9yF6S72eoQpBE!?h0E7JWiORbm!R`Rb_`B`m zKKBu{mB&KFnbOj9+l`g79p+!)SORb9>cDJP%vOrMOSu;bW;{8a65C)D#q@CGWA$k= z$(driu_X`gl#DC+Yri5YMt{UA=7k)C0*aW1iCE~2>EvcvgNC6`N2t88QmSYF<67q! z^~CebonR`s#9xwpB0YSvoc}7)6^I@guoS7i;X)?LF-Q%>(&FNxmmH<|d5Vxs!GstO z*ADaalFW+5JSh}t_4Io}t0EOZRPpot~j71na+Nd8KnTXXwb zuT3>`iJx+`lz61;c#>h1L)(}`=_X1FC9yslTY>RqfqQg<(>uFz^obg%jvT~}4EJDH zrbn+L<$ctDWZlMZ9S$`(9U!~CIy)4k5)#jo_E=*0je5C;t_4h@y+6PcE&I<1jy!Up z-J7DfK59=ye-BVbgz><9y1KeH%{lHKGAs-%Iga0&(++2%6#MB3clZC?Qo1qCB>&>% zkBLW!V+82!dyzPIO0Ew=YI{kiJ+t;(!;k^grt)HRXC|0czsJbRKFsTNj0;vDtPZ3a ziE@H#S(PlAg^S|Dl=Mh(u?8AsI$)I_s1G1|3tLdUo!5uoTEiZ5s&vlZ>I1DPXuGyv zYSr}p5qrH|>J>OTSJQ)wtxiG#I2^BV-1##bN{A@@ALVNp*Q3zq^gN^yR;4al8FwyA zxb$!S5|twl_sJwl^m+da7g7E_eT6g2zo%#B#8$%2whN)w7h-l!<@S4QH1(wBcA0@j zVGo-zXH?W+fF$o^;Y3>8SpqbTH}fATAiaQz4zbV!*=!CU2zPlBhh~H4c$axP#j#V? zpI)*Jjm8F2`m%U9kHLS}TW|}@P$%aRg0q$%l1`M=WK^!z!5an7e*>`*zFmiGI8GH0eN{LaFnzrPgFvx+NJH_|! z`g;#{rNf#4H2stMKKOe??+DlqA04uJwI39F9}v6?{p#evN!t`Q_Zj-Ok!n9L8qb3G z`1wy^oC6-c1ER!ZDy8pmg7C*)V&0hS5g$bdu-8a5KadH`*7|*Zv@J#Z9XQ)CrLdce zF|$d(w3n4=#8#@6-#!>Ta2i^X=&qQs!+6VUUu*PL!#z)d!q(B)E}Q%8$1VE_z>WF- zWtKf#e%POZbJyib|2}s;1Zq~nPZXN2?Z=;A4DLBDtVA4l3paiF80Cxr-w(gx#=X{0epC@l!(E(>)+TxOi2G6 z3N5;{3eH?Kg5&fOxqq)!Zd-+3%IhZfirk(5GlMz#cqk-1Hj|SpsVv5bdUz-g-P-m$)POVIFy`f}} zM5)+_*!5`k9_x1}Faz7GR^oV2?A?76n}1I1vb<@3K-m|;uKagW_|JsMO+fN_Ax+o) zhZmW7!E3X}KF#8=BLbF{&Qy@XdmmB!xB)uv+RFKQ5DcoD-W9$+VAlTMGC}~ z?uOq!&fp<6z!qQULb^UqoRIRsN-!oRTjCzd#1J zko0pH|Hg#l1xg68B1)h^et&jF91ERkrv2WL{W~|nbZ0&mJllsWwk|2QTbKO9vux}Z zw&a(b)t*dX2dF1tYsxIO=7Zb3kMaX#eU(4J1~(?BI)g!Vb=Mfx1;bdJm3W z0R-oSBX`dU?=FLU3^+zA_{!(3?a2>wX;tz(no?2AK=8KRK0`Mt`Rj z|9}jVqzWK4^`Y|;{oZ`?fa8qs(8{<6zezcY%@I{Ammv3i-hV!3#{UvRo{*UeoSXe_ zU^e;2;9nE{r%M^Yu1Z@>_(9VLez)a^7%1~C*q*HYtET^lDn8CD&j zgLy$&e`j0=hG{8o;M6|xqCN=$g%-R3oZkb#{Lh)TuOP?u*6UyN_>bW&==T6_Il{VP z&p7|bGAUAu?j{};DFLE7*4tsd_d=~PJ)`~>T&_E^)X3>I+=r*QvEP(WEw+PjyW z@aMdBU$Q#}%13$ZFZsqnRu9b-=l5?8L0AI`wo;>O%acy(^@5}cHZTIRL$w-(mI^TV z*CFj}aP+^G)WKrEVbdjE%yYdd)=J+Ti_7DHOCf;UqI0&T@W-j}Z=N5e|4M+Kw^AWV zI4WZI*X}{2fqE~gLXYiZ>W~_1XQB};QAB8_ICt0nj!~C?-@dKUAZLbmFb-7 z{rApFq1*usQ$h@`O{~a^SI4)U9 zVIVE6Dn_>>S(&1p)wGOlPx&dEM-c(e5bRO-v1hHE>Ph6P0izLdu(xu~(OVN%xgLfW z&6vOT&#bzo=5h41B2T`k%rM^!8LUW~>Bza9n)ugsO1<=B%ra?EHdtcV*|@qnL(|*a z%c({lpSfdsd?%|5bhZ5aR}xh97;rw!)UY2C$cGNlHN=B&kK_GwPcn$OG*>+s5$QE+ zZgWtpBV4D>Ux=5ih(XvnxeiD7H;`?4d(ABg&d82$84l8gNY%MRhxd9FzIx!HKN|^w&2bqv%KmW=8doSKW>2Hw4hD-euo*V(8 z*U@*vEY-FAv-A3kf#1c70h|d0zPEEcY4TSsge$s1BX^_W&fC}jQqLt2n6|d7TPRU; z?C=E%gmulmTp@f1rTr=V1B$FO(SMbzd0IoM&t!vtA;`q5HAFWx$^8fJYCF*01m+e? ztF`7js$MDt(*L5k%^Duk5p+l845o~##}EuB#}lf%(}-(BkSC$RzEGQ|b8f^k-}FD# zh@9V*8_>Htyi=}A9y>ta|BW@wjI>PtFYa%E;9MY;oVuvGZE)5|%%R6A;K9iBI1{pv z40;r{Fw&6j#>Dk!E1|>vJI?~dRiMYI-`x-#1c*b(2b_94ZBjPZ6>gsP54iKGWbx6Y zLot1xbLzaPq$d1+D@>DOA|*>%C%1VU2wbp#ZyDYjh1EUd#M+3xePkge=h9W=yfs(p zz@+>2X~8eyJ@Tym1jA=wo1%|KzP$~a!T(bk8dXJ(#2{oY!i&YeK*pOC`IO|6uAg=} zFSXbxS8{coH0dtVdHWil>uu*6MYtMi+Q6DKZxK6LR&7J2dFL9PhSr&s`~kl)8!3jm z%Kph1=TmvZA-ZYyyyl`C{_O+X*t7{qGBw+3GigV4+Bm8>M&>lP+i2t#W82wzC)_3t zqymPsTr;;OMVYQ^Olx9KcKq}_GLRZKRZhl74$|}8ic9xaLP$H3l0Ut__Pji+K_;T@ zpVUG65q8Vr<<6Yi{%q7=;Ufi=Y_2bt=-sG0%P<^~T+32S`VPPGhCdE;)EtT56=c(6 z$dk9QNkeQ(Ugb#=%jVBx7?z#Y>s>jafpDG|I(DfU^{eeZ6nz=S&f9C1hQqDx>8vNZaI%9 zB{Z1_57!>~t|hXOP^@PkhxAT~+TUVkoHnJUF%Fmb@;i2;`KgQ4 zn*QLx41%nGYuc$9)@YN{Qk$H58lKf#RGu$da)BhFjWQWjRGu2vKRtfV==2GBw#%QN zzEac>gbC%2&`UFe|1{&jpg8-EA4yt`V6Nup@-hjs#=K?U4vjX5B-B?-V{gB^_=eDM z4Bok>r9hq+y*-1d0A0_~P^Jl``na+h4>b{ZU~+lWLXFm-m6Wn$NQmzI#+S#{2d807mP#TDz&`I<4-T^=~$^bXl~3GilPBPFfqHzRi?wa;El< z@v%a_q^YMeMjTbH9Oo`ZR2>s1u#>VOa=v&?ey)179~qZvOT|T*-?P zX(OM%!sID*^bPE!N0lt}?qrN)r<-Jg@X7!Bo3b>L#CcH@=H|i4R*zEgRsnTO6pXaT zr8x>S`J_QX(XG`W*wpX0pF`4lRNSjgB~J1<4_ zx0OdfaAif>$LY|7UkpI%GJdFpSSZ{G@zIIfI{AjDVEK_?yMVp+8y~pB&v`xavVoFF@TRzauCgvY%u7tM>zP9R>rd=S=P0H^PFKHVr$A&5 z#+5zLRTvHRmR790;N3iH6j$kx%rs>lm*|V!zPL6uAx5t)uC0*Y6}E&u6jOYspoNn{j=>uty9!y| znDK~6VU3Jg9+|to{m<$31$|6XH47P^`slAq6gNn^XQQtv{-hg(;6!yzf$q0-^eFcFKyPmb|ngH8L_pL(78nSU1&dci3v2lis#hgmb14J$--KL1c?@cfvo z{Ik;}npEX{|A^~QUzzuY)sB1q$SbWaNMm?j*23PP037uBpc+vkdY+!5Sk1{ z%oT4cWHWRn9^WS<6pwA{r92)6t`!)&k(I$!I?S!ezx6|@r@rl1hW%9@pPPSNWa`&KADxC?Q8 z#Wqfe=&n{U5lln9b(tKrDlMoLk`UMw%z_IKpR}qgeQLX!hP{jqbTIz~$76GlB_aK@ z%jid5X+c`gTYn$Y`H3R& z{jjwXUbY7SK2fyC`j|EFNIo-MN@-0v zy*%H}YwO0#$Gn|NCiK@%y?_IfqL2jY@wXH-m2nMfmohHEXgLBP^iXXDSZ^e0BHB(3 z{fq!U(|+}#(lDIW?RyNiHxFE74(O=Lk2Fs!V2cFxWz1VTgVVXvKVxgSw~OUd?x;T$ z6K8N(sGroHq^Ua$PO3i;FEJz?u?>vqmt59Olr`}#1G%r`4={uvZkn=0t{Fn39 zLXfyYt`mdizQM$hdioC26=`+69ksPj4M{wry= z&MJz*^_SjhN2St_wxA8cN4QNZ*7KY&HhRZ)yQ{;eG^0Y*2#?ZyEydmyxpNa#qb&3u zAi>Rx%*(-nK1z9A*+kNf=aHKvUuIPObu6La?kwePiS+8<+c=O}rs8{aHqstkNZ7FQ zovK?&u~b_4b6x_eNTrWD1i7)(L7OJILT0$ezQYtCz1+bMf-r)Brk7Z$iGn=JdS#84 z;Ep4P0zY|O7&Mw~mgviB5uiQpL|Xa-7~GeZ?R1;*Q)#}w1N&Ovp!mAq@}~7$-oD+0 z&*V9`m>G5-p`tLVQsJ1?qIdzhO&IOIUcJ~j@J;9@} z^C9ckOWV&o?zm3>?6VA`0PiMheXr}fF09#Xb4Ieg-v|z8W!NTDcJ;eU>q=)nK|NE@ zR3i4i+k6v~1d-N30)#-0D%qn4&w5r_f}On40o3oNl7Dx;U56pL7yFjwV5Kh@o4y7s z3nwnTpFMlqNO{e@G};%)0|AA%d6d=-|G_8A03un^reJ$*;69VT|a8S1$Jv zSbYLqzzPw^X&GZc)x-4HH}Y*@00)Q=TEGfKVdA$C`LcUyssgJ%n#MgN3e;i97aWVHzEE`WdbW<)_5v#M+R7*w8bD%27i58{~_bwoxOlt_VaYwHa# zB}HUsk^YCycSIhYr=c2Z2~7;~!ff3uW-3z-5>kDN2Y;;(0NO)!6>|x@?R-D&&PkoSIDb>(nMYExZ_Ns;dOveR} ze6xa8PDP|B{;MGjk?3`kJe8>ie5R;6TR@4_z1(E0Qe)I(`_g-{?(=F4Y*4ej#G;lL zPGh$qG9%ag%4#*wP@!E5Wj2u&)YQ}@6K#A`No&yFEYl&N`1f=*ye9yDnTQwI zVI-Z-$;fIp9x;+gPYDc}nKP!rcPC3&7-gREzlGAVq6**RE$gh8 zt66PUxSxQcs%&_v!73J@4V^r%4^$YmYallJf(LU%0?_GHKXedwRsD(dzw4fv!2(nR zuiY0zT^jDUr|(d$anyrEW@d_&RDOJsc{=YydpUHTE7LGv@9gMW8VJRL2i^(>{;g#} zYOS_9&D|$4>WX4xu6B6f!6IQRn=l)861iRN&Fxdc17G+C5fX!LBZ|pDgvfYyI+@EZ z!8nUWY9Vmw1EiHMcMK~@6w%-9E$Sgc&S7%Eks(#cDrmcXCK^jFDcxruc`6t_&C?+T z0=p(lDn=Q7M_QRwt!RKV|CVumKVOQ2uldTXC*-9cr0;MT6zD$;51W%Fdr!H_j!a1cXtt6YG#?L)NMk4&IuD( zU^EE_9F4Xc>3qFz#^XkOikUPETjT8x%yIBJeIQ@-8+5YPQ-?wS`WsZqad}t7X$BHP zj(vs~vbArS?RsYlk3$h=ioWsj!&bZ6UXYm1T*W6fv}ZHa`CrSAw@)6x=bsA0ru)pY zG8j#&VSwmdWtYn5Qbm%b43QY5r%DbOCZw{_a2yqq@~+i191p!ZVt3}WI^c3d5`HZL zyxkn1xo`Z^0j837r2Y%Zf4sOHiijoOxbHhuiNA*FAN~sZn)Iwpc0zu2h{WH7A`pqG zWcHSZZ{L%a&-+y#9CYDb?#I6!zF`i0fXY&Omw8izn8zNb)?^T_&GV`CtAZaIso=(B zuGsm-1tecqbGC0nI39DJd@Ac<5)hz2?~iA;jt=PvA>%RqtnPX#V|qu-i+yvn&@?YU z2N)spLY2N+_T;vw@?H=m=7f-8r>E=7Q_Eqw&+7|l<&7>IOHa=rBvx;Pz*z@B-PwU7 zoR}(g8m2jJ(Q@3+H~dEW`%&4le1KiA7#x=BbIFy=?bP$v(}i*y{MvxaYP!|*1|b)( zUsj^~q%nCy-(rx9!aT19qmga_F~Ifu07JDyTZWD`D^&~hvS(XFV+q% zmlQ&&Z}$2;3%;B8D|Ap1#ztU;-E*`G-CP3{q9(t$*e(r1Mcel|kiTPK}`582ZN;R_{rK`!}yH zfvo7nLdf||I0y-+!hp~DIOXA50dU_rCudkY*V3_cuJrnSuuH}a9gsArZyFU_9sl?j z$|aYLsue3X@N_!AzD3G%O~!VQTg8^7hnu5f{b@r$()b0}A#So9wE0smDqa$ew+eoq+&rJl-W*0?VI z7Vt&+9lUaFc?EFG8;yGS-*IS_ zg^8D;)b-ZuE!2ovFZfOR!wUC*Y^A@aVEx6G6iX=uGAfz7@H$)XT6o-iUh4GXi=&c* z)Ac?4mCA;+RU00)d{tbj&Frks63A<}6*%AFZ*Z)<-KYmF?FhzNdnqy^sUDetZrXN=`lL`lSA2SRiYU&=KT@Oqwa045ym0lyQVGuP$di;u>^b|;N_^T(MiQ}UkK3PZ{ByxRKReB(9?N9HrDpJ7lHQV z%16&WU?^-38bgqCV)x6@5|A~Wg!^wBB5?#hi>TG-Igx&{64#3@qw~Ig^Ff~{;AvPGLtgg4GE*dw zMp;4Br^nCzVWlQ;UY^-Wqy7#ac2Ues1g>>(B@y=>_=>ejz$BjLt=(cj+&h926K7u* zN0Ymut6dJD$E~8zHZG$vg!ksboUn@7f{`|>tv!XpD%d3&h7cMxbd1E|P(8)nzZAzd zahV$*ynfwzh@VH&3j7n~jeOpsp%2;@IAbxuB$FP2EU$`HTAL4Kwqx2(l#}{@{J3%k zLPOXLJHa<>bFtBS+31(hPsPF?rlVUg&yORyKck4ckoH+d;%TBSd&esPO`0yd8N`6^ zu%3!dc7jw>MZL>fnBFl3itGnOvWwD8|WNB^o2F+nsK$b-j^OX@P?Lmw+?`c-*&@J{m z9cO$VDjHiysEoKJ;sy)%FxV`2c(`B*&$YSO;!w*6sEsBu&a7(bkRE^A)-%>y)VKkZ z*`<>{z+aq68!CUpyERb9?q7s)$Ssf(ELIUiRa=72QNC~kcX4L`ThwIbk0OH!f=_H~ zd%Sg~x&6nEHn899B+NQa<~!8eCE4Wtzb{DOu-?x*PsyyNG_UU zPj=d(-Ur7Eoj*>AT4RvgdKSkD1ZKCMH;&dplJJRc%U+LvMLc{g7PNg(X@({h=uxJS za6o}TDy3V{^M{Fhm7I|@knoF9_I<510u%ALff6j1VG-ktQA3SQQQizj6!yh>0UeqPl@*Qy$fMjI^arn!$Q+gKH2m>e$#?;RFQ?huH;kO9dald# z?tJs^em`5|Y6D7rNsk*Id*6TShxrr*tTW(b?2NyzG)f7(JXu=1@gZ1`L7#pUw2qw8 zIvabH-@%qFZuZ|7{~GP^xb60luQKRZdWz$%)~rlJnkorWs9%x>NdR6ngl)7qP6`*Y zlZLswu@=x_-)Vpolb?Cw!B4aKPFOTK|<4|>2+*7(S_gK_?G zr22nt#2lDUStpKZ0e|Hi7yph6n*prE21+EYD2Nj7<8%Pk2hjo>v_+O1tu+MNBJf}C zG=M1%Ke1rcQs8+CKmyv+a&U^w6ivgDg;obrSwE@IQ*ISo_edO7c=q$oP%JbP6H}Q2 z7#fsM{RIDMdmyrsw*(27p+ryixL~^XLmG3QR5S?)hH{}Uz+dcYcOu)6W7~?D*YRUl z|JDjZNgS|@#~0hG$zq0j&KdY~CHifR{a$)KRb0+CwKJBFe@~j7j*`}@EM|%xNDdPJ z$!|S*5U+$osAYJ9*(&+#WFJP&@BRFY-fQ#Xyu}$cBiT{+p;+D^D0QH{kNCs8kPHRiKY;V&af&o36%*-_`QpnS~NZudC&xnUJ!K{)eUY4FO6}<&~C`+O%ba7yd ziOS~t7V^HT=f_*Z;3ShbC!vPDTW1}IzB_c_o;?tP8D_nEw;NzVFkJ}*AkSbZwX_8h z)TUWblS!F+X=gr7iw1B#;*k3)Koxy;ocL?D5XW|g9!ggIxtC-mzV)SlcVmW}1 zC3$m1ZJd;q3QqfCxpI4D3sO$RdB}g@JYY=1fYqSZAhHI1%zZN^Yux|0F~P#-e0D7e z3SbKsZLA{ZcSQ%*)z1c*&lJ7m&L9augi`=2LrqjNn{M)L?wxcWBr#0y$+E=1C(HEc ziR5B%;$hf`N?H6p^Od?9*(hBR642^;3zfQDKu2C%pX8$vL>iZERn%`&0Lr0UpRF5a zrUy@&fctU}xzTWgsjihssB8wWc6yoM-Vv1|JzQS@QQHM?@#~=kH`h1=R-`#N4>1S) zR4$Olm=grN1(cB8*4Ab@krV3h^+L>oDZ@vW&;c-QcR@x}EN-s{N7zz9@28Np4v!;p z$oGOkZu$9B#1uwdP*9N8u(RFX`~rXJu@_;yp0{&6$Z6Y&#g|^Q0w186_B}*80g6eu z1_VyYh|J`3?p+RvRep@#l#b&RtBIn?1TvlQB>l%yyLUUI2_lf76PLCeQVDnLe~P49 z?Ee-?wbayg7_g$L0D@xR0t(MUYE=^{*m}~CTo(8_zB3nB&ut6}@u(12COZs{uk3sv z>+D=1ue%U124xC2-!WVKBuCjBid8xxhYS9a&Sn;sjRn;AL@y6V%yR<>93=d%zMl7& zsVk#A3YZ_HA#@k2^jbbcmDH!-t91?NiSt(jPnv>1@R&@|l{rc_eD7`HE5&DacXL$T zRrXaTaq%;4qC; zo! zV4|VY|B{NIy*l+2)2bjSNBV~Awr;$ac7KbRMruZ%^Rr`wq+V#!aS?f7N{}B>6(x52?2(IcVH8&@NK}QbF7yeac>5~0QQ0$811Lksv-hP zk1y?k(D0}^0Cp_10TS6{hN@nz`;BD-+&hH#w)MO6Cdv4H(&La{4e-t1fO)XJB*d_C zxEK)ua?@OfGC{@m{iQp{o!Peu8|JL!%E^d3bK*a)#!KNnedpciFdzQvm>FiG8+A0Z05R7*zinsp z>IR$W`n;|t$xaePKUhuXlB#FUQC8U1Hr=f_;};p*CF||8zHk1n!D*$_Yk8`O`syAP zmb-%Jl@yq5ZEkH%yZ+)mhQDR|{BSb_w2W+4nsbfh0QLmu2`TKuI=~IV@Z~So_D4~| zkr8<-1(;FC*W2};y|I+^A^o>@iVR&OFJRX;fHMLGV^oVc@cup!Tr7XDSPKUbx*Fv0 z?-_nL&>^)@J%Ir7S^1VlUOw&v;&ic6bpT!0>D2@W>iZ#bxK9fU)rOQnQXP@$cN^^F zV1B)3wtk~!UXi{njslOr5R?iFa~B&Vi-iEahSM-gK*FmS={UboiqT2+6T-}H*iVKYKHOOR*H zt*iee+@;p!MR8RFhfn0&b;i1Yx@cLIgty9gsrWEfuH_&IJhA<~Idpq-{dC68{*i8n zY+p4TW6Y@%%7F~n+Y+Y$p<%Qpkh@J{KRP~H1^OXM_P~#2iCQ5)L z{K&Kb>{3|pFJeI_fSBv*lJAREkV*_wfLA$aCbbTZdc3~Ca@RUQu9h+|!kepM6hmhc z<#uA;XS_e3=i)coG4IUHKQ(9#A8aC}43L5o562n%QVe`OeVNEj7xyz}1w$E_DxE7P z(?cV}a@AVRKD$g5YOZD2sd1E|1tFR`kY}YnOtE>AlFi`FZig)@jckKW33P>|^;frOuOe)pK#U`>e{0fJ4YC|0l&qw5nz1;2{w{o^m)nG2C zH76ajjCCK3qu13O!~KB3D#WS#>?r93*4Q%AhpUe(+#WLp@3Pfx)DRPwH3rr*iRz$1)Q`lNE zTuBvr0>ScE3c>a(CmlO!krQpJmCxD^JC!L?d>YldU#u+Wrrf3oKFG%|{aN^cj1Joy2gRktnsXQ? zYbu+sRc#<0LnflF(^>IWLG;!Du&nodZ**D=c-WeHl09c-#hhsqt##mJrklx}8fQbWS)d-C?}F`xS&S^NJ)n1J6QMJf z50^^IQ)Vr~P%|ZuACOtL2b8beU7sNzw4TL?>qg7_&x@oFPs47p70*BJ_NB2IWqDVg zv99as0B{V?jY$EHE70TA(e?R>gQxlH=5-a@VdnsQwErA(sfkI5kORV(cgkK5=Pb%r zKj2D8^gMha=R6mNH!6NlQcEW<6Q8xOn~bd=)mX1_a!9>=7UX*Nk0%0`^a33t<2lgL z$8nh?@|k}?>RXAKVc_>i*Lk!0wPCQ_Y+}-MSm%Ru66oi`^?f|rbLSPDZI)2qGFSEN zT`Yx!Nd02%+$#X#P%n6fee-G-!7nXLU*~#(}+q)&qF` z8vFhqVa5f(q7FTMFQEd9-xLRyK&222^d!9N`i5dDDS?qgu!DnGy?GJ&OP5<6JHVfp z_(GIHAW@G=_yH?mfMf&!x}-GeNManYvoOcm?fqiMESDZ>@NRfEduM1Gk*4iJTnDw& zKs*t)0rNWAy?P>sa&3O;YnWgN!POCSq3&J`@*%S!p8BB7No=7su8CH&-67m}PW-Av z%NHFiQ-UPb!T6y~r?3ozV={u~7ngWt@0D(Z-wzBA?}q8rb1;-Iy!skZ;lx-f`8!^* zJ4S9*ig(kK#BN?S)CM78)&2D2IfVNskGl^8EJ>g>uf;H%J}f!TmCIDs_$AO+gq`Ra zQ*-ey4!3mdf2=xz0cx;DW>(^V@T$09O=)*MBj1JAzO(D|7HliiYKW^$d{7EHsn^=} zMWj`;%U_f%fo8cV7o~P@$y1#XB?1mfl%LKrvgGiqG3;X0Kb4aFz44wNJWN9rrWK*R zUY-zb4ZFI-rJ^sQa`cbIWze~5kp;b7(XyjS(FH$$5Rdrjrr9;&_X~VZc>_N}HR>f9 zf!H`&Tak!tWky@oY`2Ag`!WwJ zq*;*zp|R4O_6ZqTFMFR36zuv0eRP;t-!*s_NC7dW`VxE@P z*!=KUU+eVz4ojUp8^qZYjpePzK^Ok1CRHH~!@||*lG&m>#2nCaGm53-)i&10_^@-_uP&)t8P_A4VS+^S+TH#H9Z8{X?-VsOyFDR1pZ;ArteGVk zk9iC}1dqm}#pd7h9#?XrQYD^s7;Zm}S8_L)LK{F@I^qAj3qZ&%LJ1p}OKUefrb$O3 zL%Ruc{~*n3;Mb5!6)R@(eP@R88-KT=47~I+4b?5r&j_&NqO|v2j?)kTiw-PZndFX0b zkpfm~u()5FV*~A`BUPVwC^^$MSv(HZvdK)9nIq-2=wARhfW%4%uwvasK)|k$qx`qW z4t(W|B!9C>c&lE22!b55R7U6rnM__s#0EgHk$)=}ix0m+maXsI3YS$Bk$?84pCL4e zZco4h$>qvZds)($-DYY{(yCmF-#M(FUBe$^T~|$N>F>3EF|?PrIB8hGT2?CS%i{>k z@qTru_AOJKSfc$@9XPMlIp{&oF=?q9@@EX~$zu3hdyhFUf^cwg=eyzmt> zS6ko1Co}%cx6(rQTj8=BBL4X*MX`^~SF$$kX~6I9n+#(v*y$6!lUkQ+-nR#yFhq#W z*Le+my&WG_IW;@4b9lhk<8zN$Q6-(LcRis>tM45^q_i`?|6K4Zme5qkNjOc9WS!$g zEq&oLU*xXa6+zg%?RWc=wuf>dXIE={Aj+jT(~%;lnpK^*_j z-iaaD)C@LpUxswbQ0&z!WOHfM?j~|~wTg4Emu|LUO57bqZ$LsIVVrbP6InY0*k8ax zP07fi-{yykPuG9q?S(Rhd`N&*UDV+xOAYidz@CrPSz;k?y&gJSTibYdT^hM0hFV{B zN#R5$eO_!j)t_5`@pgU+xSSHX8VvMKI^LeHVtP;0h7rMs0+!$}F8N0s`}|EpGrx-= zY^sk}+7J&lj#}c(_&f66he`RcA$t<_cbMsOQQW|}Ya>J=&+%ohp2EqM{DMrJA~qfKDq_@g z_!Jr-t0ry$&~0xi6o9T99@vYAeBj@o1k_*JS6Y#zpn!BA6D zQ1BE`#8`#pQ%~aIJe*+#5${a;5oE~4v!TgiM95`QqJ^m5TX`FoPTgz=e~gCE7%S7z zILHL}%kPD|!L1ExfYtZmh3W-0j`%^tccYIYWaFb-oCN@tYWd{hz)?ndg>tUwPz*V= z&3b1fut(MicrfGxIzW&EDa|P*N1a*wzl5qvEA;$sKhnSCpyn@f#0VK zV*<$3g*`}I+%})cjx}1}$bq+pWQ3)s0Iu+j1~QL5#2cZO`Eu&}D~OG+I|7O`h+c}> zM0{rIklLh#ugiV&qZiRqRJac$PSe~55?`*Rb9Ijfr(8Y0N{pZ(+zO8mA@mW7$_e+i zzF%3=CR{}32iYX_s+}LKdpVYz&4~OUUGLUwK%){(n8I=4wf7HwbX|IIgw3|MzSXFM z0iReD*#4$R`zNmF2`kgp1VL!D<+UUR946xcbM~ow-6&;t_1;;ku1zOuG@HK~--QVC zB(P$nfV8X0<5~-w4r7eDrlNdkHhF^}BXIdp!?lxujDUA%XbjC%@6IuZnLJni{)w+~>0#JSqn*y-L7FX-GCrli>h>G?eY10FEndto z7OGk-a;<%F?F75Gu`@7-gzLW68j7i)i(!7b4nLyZ=ae73)$MbIsJyFUQ#H$T<>i3X zH2yJ%K<>1rPda7f-6y$Z2@3ZcSan~t-9>lB@s_sziLJGuI@kN_P$;bykcQZM|M(X~ z%DXX{z#ni6(HJ?0YZwhyPX$EkEQ>+3{T4U~v^iTrRk}@zyKqyu9iI30I{?p}$)wke z-t}@wSQo)0j-=h&}Yi5i|Iwd;CwdAfv@z{DoRZ(XyeC{iB)<48w}UEMXLAB>fTlJ3(*~ zQ}1cZD`ueUfzbZDRfm|<-)uf`2JV8QS!Eo`KR)UHj7U}SVSMoPz}eH<7%B!D{sB3gc!&_32@$7A_CC{~(e z9b2yIB=p$ee9`yVbgtR*wReiU77_d@dq4DDd5r^H&|RAs7FBwaWa%DprBiS;=^CBu zPvuyrho|50R+8^DA5*Ed)|gRg*5S7_`R-KRuCDFAW%6r64m#c=mp{5j{|b+ukQ!-f zfNJ}y)TahzFEe9G#>h)%5*xRt(D%d>?EgiWBgd9>wo0E+B~M%Q_ebV9EMW1t@b2zz z(DCmsUb$eD_x^992n|4YL9{}XOu%7j1n77V7oWn>c3VwK3WHF-t#=Bz|2;0{LI!M- z5Fj~`$A$WX`kpd*93+WH@l?o4-l%)T93+rLA*ssSraGYf07pWAj#{NFGqESV$$05= zKL6&NSoANeGgz9F+C7zQ;ioF>!*&L58oe7ohQCs3a}zpY?Xj!P@@G2B6~ zgIwFe_?DCCQwt0lZT*H668RvNef2yux z4!nuL5F{p#6r3`j4{@nkUMQS$cs`oUs5Y>1*Qy~cv5D*u_qb0kQs=PDwVAk}wi_5G zP2Q%rGDGeYnH|fkB?wzKZnIu8?-soxHbMK-7{;>C`1x~uIipr#*^qJpw@7*-$S2U&aU@2+29Os2ns-iv=8tW3%KolhPqIq;%+m zHXbF7rB>#<1~(rZG1NS4f4zz#+^n>YuO8Non@%9W5eCm{3+3$}0;mHLa zD}XTm+oXK9QWwwT_H^znopbEg_FEMKO3r6xV3hpBoZ)|)x=sv+KwnW6NV7D)pc3pv zZ*fRqdm_N2_HqD#`-q|ecz)8OpZUAyMGIL-k|YPZXB884k~PK~Z4JKIitD+xw z=+<;GmC#ntO|bUs1RT+C$Jv5cpql@05#{8=di%v(sJ7Cc&n{>`ljg2?S*W#dB&F|g zGEFRH&8n!C**2)%{L)NJJOWcD(~#d0L#Rfn&;I){Ase@CS8ghLzBu0K8I@7fb@kL=0IA;jo1?dMzgK4?{bpT7v&5 zc;2KvIT}Xs`&RXIG0>!I9eopG1L!#*5oJnOh`IQ-CIoI28s&OuCW{#i@Igt=+V++_cJVg8Y{Ah1<;!3GK9Kv*W31VD~F`*o$jqLsaf))#>Y0#J~p8 z2Pg@ryE^+j2%=9Hq%Ey+6qXe!t`bpIT+D_WY(X8r}+z)ny(qHZ zA>utVWaOp@YJIl)3uuhd*^?2n?9=PUaON_0Q8ndQVFSU^;C=^Eoy)f|Bt4!Q_-fi& zlKns=JaVXKuMPj}ps=lroAMFF-K0S=o?KPa(MqM|`JOQ~soGQ`buJ~Me8I3-`ZD4; z(;>?So@$G+5E1TH_xB>d@L}qB&#SLVVQAL`X5NqP$1ty}D>^)7BU=2pA`GTNMVzKu%$Ugdp^&KRFG-M60 zf$ajcps&NpN;F zCCS8Yb3?*u+(9-=@V(dPi^9{2E#$6WPoygj#cI}~Ab1=!+!T&a(CRA-+#@GWQTCn| zK{sxHZZMg%+H>F;4R0yfA%PtM#|fCUg(t25A4(-JhJY@cC;2_Ba!qpcJyL9-x8G?9 zbGB~d+$Q~%q;f-C$De>o*NLqA^Fz(`YP#0WlUu1C-La}_5P4)CT28wv#*hMOyf^cu zQ(8448%%IAv&z*zzXGCafjUpD9&NG4VMYsvkf|(@ehZ4n{@71sB>I$(k%X5q$TY-| zNlNv5;T|rr;pS8%>+Mk~FfAkXr-3U%F9pKzVrkY1%tss3L9VwCI6YCnhO|1Op!lm1 z7m4)iu<>qBG{-5p>}e9e$#frthnx3LzHql%(pkLEDh>7LO_fWcg2xwN@V<*7`b2-R zF=G0V$%_;=Yb*b;k`Dj7KrAwt?=tZe=vGzT-_gL9rVdwMEqf2pNf zM#@guS{qIFzCZ;Ne)AVwK!HI`Z`4#|{GP1SQg2K1cRX9B#b#pbS=TmkTefZB5hENz z3xJKWO(RM$f>}_nPbI5YgK!15CT)#)V%goa1hGGoRCc~%&o|lOtdS!* zd(1+mf3>0v211JR=PrD4o@0KFxyPMQ9z~aZ^xNAAf2^L&;}@xp!`-e6x4J24So~<6 z-{0`9|3{wtz9m>;zs%6%afDDG$w_U0ESOWOQ`YU;YDwf3)J4Cby?Gckb$h|5soc@r#rGx}s z>*4dl#ai=XgF*P?f6rDV7$li zBb6c@{BN?<@BghPQ&f%mOzf52zG$4LJoM3^v(pxB$b&<`&VygSWxsHd7_m{& z=l=z@XW21~7dr%r&boEEi+~5Y60)E)b|N+x%&*(TzUNUnHH|Dy1thA4Lh4@r`dvD~ zxsyd#5KQOkc||ZEO{{2q_Lod0tsVxGH^!>GzEjd!g;@WF)EOh_ zGoJE~uRop3cxKT@=lh|PnJgo58%fih&IkFZXS@wh6&lVe*jT~@@@};nO)uEHB(%f) z75VnYozx#W${hnR_DHJIJ?2P<85BeHwmtH@dLsr^dD9eG<>e3h;F*G{5e{sw+HfS5 zp2W0>EHp)caOxyLt}}Yq$1?@8Z6kC-Zv=Aug`>o2=x+VWJ!hcusdbiu!yQf z17;%*43;06+Z4`i6VjlX%&eQ(JzgWi zMun;1OQKF3v<-GP)uJ=kudBw#)S%Q zh-Gl#{K(Beuo2Tu5d2}sOKsyE8KJ{vm%mXV)t4UrwhTS&4-&4*8N73U1oMn8x#MT{LhR24gN;S$my=fB( zkCz&d0sYm~@`xWTI`)608n8iW)4{Yx`fq;h^gn#u657v6h!S?h_tU^I6{!pJI_2%S z)x%FSq7Txzq!vGlij|50}p>~i5=clA2MAOE805S)+xgPw{=^&P?-s!RNfpCWPBI(cc}|&g&vR$zH#jvC5|YDq=~xQ)jt(^01X|-Wx}m%OVQp&YyM4P6sR1|G=0*KjO{Afh-iFQG`+&v$zf*(Ba#woyT;g@IvKX_qL*EKu6w00^s z(xpsqhWxlC6{+APfigHhL-U=J3yw=SPm=RkhU{j6Z7xJle%`w@a`z1Gk+}l6oI*jW z;GTzCpUsh6uTC3$`@@3aUC41tC*Kt{()CV>-;MgTFh8W7Ay8( z(K5XFsnHg4H%XsgtA~;!d7eG_S7#N&x=5vuhOgxo{)(UnCZbxANE*KzP)nU3gDD_} z!N*fKR?S{}1tf1TXN1fKAmeUh_@1hG17g6Ba0tNrz9=a)VVc zunKofNTD#27M%>Xe%}tN1nNUNJ1#8Ln~wL$9`WA+$u}7jl)7`A>i2hrNaj(c%B}6_ zy0`)SQnUo}j)5)QNR!wpe-O^#Qh`|)Bp6LdICaLWFREyN38Nl#cU6$S8LqdUO~2?b z@|qHz>bK_D>(|!x&t(p9CXDWUGD$Qnf$~cr+VxR) zsU*38kE%3?!oNV&b2^Y!&dtl^8rkMCVMTr6GTi<0xmfi9s-ITThzI%cDah4HQvhN4{28^*qvpYhAc&e(8g`((*%0pXz~ILHoj45TY)W=+=L`C5L;Hdt2YphtI^y`UfwyOP2p{M;$kfsUn} zz{T5@&s$iS$cl3?=dU6X^aUQ)G)d4#rvH++qpTCL>fp6%g2Ve9*6TzV4{ecZuVH6U z=HEbwNo1)Ql^0khbTax}j#uL+yu^V-H{?L4P!mD?Z5^}_P1;)&n%`5!-X+Hk^?CCT zv<`+$8xrkONsB%Gg+Mw@bWe||VK=$O=!QywS(y@)y;-AmFQ@&Hooiovfz-u7<>vDP zET`)DkMdx&vYJ35gP5KqjG<2N)1X8#_zj?W_uxS;7y1Kx=?sArGpE%nty>4gbyyAC z0x_D_t=zSM<*D^sgsVW5l~&{uI-g{+7YLf$VK((6{* z!-KZwVUxao^aweWATmpIG7ns%5Rl1pG>D?*=E-e~21p%atOt6@Z#`-EF}nO0vGKB_ z&uTrDeW0{-Js~v)4X^dY>4VX_LmTKZnB8R6$;>n!MEkvLA197>_n|wiqJk7G27za0C}T%9A%OYN&;+ zXwlKluKGZ?(l_sJWs&S6=GCx`X&ZP5^k3#agJU1Ge8V7>2ELUIr>Q{GwXfR4N{?VL z$Qj1u5a7V6kG>p-JMcAiXa1ED$(8?&a2N5D94Tnf?FVL|bH1m-Ft+RV{e}B|qR6_} zi0w@T8Lk+cvz#d~_7^;)0L=gYe)cGtEewWIT`pEztE56c|9c~+empA{B)~38lr`#c zwxBJ`#IlBUb${igyZcZZFcRvxzw+K*OhRk(Qd=pD)T+dQ4#hY@n+0Q?0~R0jgPy^? z!nJI40tT3c&Zkkd)`_m(sDU^cOUCH(6;nbIas?0buM~sh(!@^Yu#}(*b|}<*>p$3Q zRYfg^VopUm*0pALc@Dh5*2!#FBE?)jTrHFY5GL4KZr`cE%qx=Xrd}p2mof58u5_VT zz9x4D)ftZ;|E5*R3w)-bWV>T}Yvt8`?*FzIDH}ru$@=wI2+Dib9P_C>T;<^lGLN=9daR56|+aJ(Cl{nJvbYr~ntw=fcfvYh*+yPK(Z3=x;V^Ru5 zOMn8vX{}g^G~V5eZ}UqxxdykV6rki!HaqxTAyfIWW41gqVfONx&L)f+*m z^f8Ba7c~$5F%nU^mXNx6WRxZvf&E79KIgMaiJ&vCDJ293xxa-s&eg?l>z_3tS8#SA zU9y>NSRYCX(kk(nNNER9}gc6;m&T7Z(+UDJ{Xo{U{nKVvH+}oq? zGrF7>B9BDftl z?)8^TszS~LU>cfFe-3b1zQg#l`7!wC_GeB3z^S&HKU};cmS$pcrZ`FEBc32x=|j`^ zpo5=JQwbPKY!nSje#wt@7}evQ)bZN4U7L3u-$dY1Ut2{s zqq|k3!)aw^*@a`X?>)z$m?WUU(9W;%`MP^kM{_9PXm>VVtu>SiaFM+}kRjIz28;&q z!3ovnj^&E3jyJ44eJ}kp9xe%2bg~f#I|bpw-$wAZE^Zp07rzB~|)u-vJm0 zJG=uda)<^bMd>?fTTy^=KLipQt~>E^<2UGkdWxf(YNdFvqVoSCYtZBM;E)2|^o5;& z{#T(%>pP-INY=-j38gb61nf=6q47bYa^%GQt}%{U7I747GN?gT57i&zAKG3Ag-v|> zOpMJ|2iN3pHBlL9YW=Q(3h7|{& zI)$&Bn`mWoH+=Zw3NEqjMl=h=5h~{SuA!JWmj$4)t9=9EqyRa96hu+5zlh9l*g`#x zdkvEOiJf!fVzgkmFC~Q?toU1#?nE{=#@SWaVYXO7C!_xMFsai!Md!ZXFT)_`y4RZ9 zeVSy8O>=@}0$RQ}b+;>lbQ@YJ_PDS64S2{>+*RZF#bsx?$5TNOvP0(kb0t_eH<&+UT^ zsBQKucq|!vG|cl#4|h)TC6S70jgo*Bj*1QE0CoJJj-B6%IBVhe#?C`qyTCvAVIEK) z2&_AbK472|my{m@k3}2@o>2h6#?yzlRRQg+K|ljkwg7~ZkF}K`AS%UVPFE^_668SF z8tF!`$xD8GMf)YxiPhkt$$At3+OrB#V)WBxfsXi7;Tj-oSbWPyttW=guQp4}7xY>+ zandSAS`FJ)9!3a}I!NUUnN7*naf!dRU5MFp*ubx2N-msg+K9&>vgvWViwlXRJajjp=@qE=EAHrkyoA`P-vLQpD82AxFNE zS3bH+MU?Ba!aGY_sEs;<%vbRlT!b)jgirjufEegy9q^fD13#FwVb}q&xB=k*(H4nA z#*1xq!LwIvj8iH^yKH%_kki+W)^?Hepwe2-urY!x%@~s#D!iFLkcwtzzmsnTENFc+Ks6a8Q&d-9p; zh~jMt*zp*+Ip&5trQRTKLd<9)ty_y};k^EWBNk_^GQtVDt=cn`Y_W4Wn{RT%Me)ua ziS3v`l+EluFR4`2YyyFlc)gb10BMNpY@^y(JNV@Wrr2SJJPqi5c1XftzCgkGox5n4 z>FEO>+lj>)k)7tuV=B`#`J*AWZF{U=y$X>GOCD4ii|=)HyPlVOO?2Hsp);`=g2BXR zO~@}z`t)h_sOOya8ALibjk6^93a6{nH~(Y%E1_43SF#Svv#YN_bgJbdK>AEtLY*wHarL(CUbl#W#6?T{=Y#2&_2s!<%7KMh^Phy z+74eeYWifF_#Hfq6Zb;Z0Q7cg`o=(peM8K~qc^ue2Jllymtz(ph@{hc-FbR&pfoYF zq0Q6H91WOb)r!^i4Ae$i6^j0tS39)K<LO z?XX64=v(dFC|sak$O0M~-M3}>`0N*(FFZ=K+QHrrPVAFLpwO0^J=cgExkWN^(@ z!wGrES=vmgwpt}KxCk06nn<0Znkj!}y_)t}Nx%wmpii`(hmcre1Am}{f2gFRSCI6x z4q|5!*IaA}MvMh2G`K}Q=vF*j!yj!{KpL-YW+WSPBcPUnNcbLiRDrP;Su~t{EV^zA ze_o;XONaDF?+f&4ulw{p3;Cl`_BobAn`zxBd;Tnu^Lp{GWS6r?;ti9(KIr{wi(hj+ z|2BwWoc25$lq5#T8VfP~ID{M9A`OFWGXoo{1V^774@;5T`_6W*jKjA)>^0QxYK(x! zw?$&}F`IPms1a!#luV*84)44L7szEs0ls3{|}Do?FnLYVWr6=({gWCbs$D;XF59}%Q)l-bnBO2UzWMP zglTarb*wEfySrn3m5g||%NmzUi?ekZS^<37k|w+LHyJVhw2~LU6QF2c&yjZb5r#1; zRk51;Ji}hJr&tkqLMZPL+YTAxOGZ0TSlya_j4D<0<6z3@N+&e9HJ=j1yha|>cQAg| zInZT=lRIAgByB$kI+*Ym8cS5wztiFMM26Kiy@q1o zzSOr5{^nh;wl9({H|-^Jx8o_3gTbZ(bw$?|v`k6IyobuIbk1yL1Z;(0_ksJsUFkwRVoCsjEyCAjG&*ukh_*C-{jmLTg^}fUdV>MJz7q~SclX$! zorxrh+J`napcD^)tlJ|OyOqq7gTz@s!Dw8mf4=7^C!EU_V*fU_Vw7z{l}r=;1_}%Z z9a^1SRzwIo*hUne=ly~oxR)B9EJqxV1ulmSv%{of*)`a2+pu$-W`ztMQ>fR*wGW!^ zKJSaJ^0Uuc`skYAqe%AHgn|sw7U}&P#ChH z_*VC+gBwMHUx_M1LX6bqZ26u&NQK%DU?>rKr3{ZqA}tKV4$zFXwLvO=TmrmyLm-ha z{*_tZ$CSy3wmohDrK9wf_Zu*f<2T2K0CmvUJCYQ2**7Q_k7V`x`o=S2)3`KkN=F5l8w) z6Oy6DzK=GGL;z3&@B}Vx*v)|AYCkyN<^Bu?0_@eVmKR{&YSdnluy4kOStt=M`Mi&C zF7LM3kINlF)?Z0-3?E|GN{aexN7%KH&+8`Z486*~)FpnVUksHxmu>qXYGh8*CtMiQ zeoaeTVOM8#teX_hlg~?}R;^0DeCau+#lsj84_CV`^<0VvCyYrn>417=7dLM(Npspl z7jV{hpXthpc%>`Luq9*tX(bje_E>|Tc!W{&yEhRH^$N-)Aiz3H*vy91pl|l_6r@Ex z+30wmt)9P-HD*{Vs5Kdh$MNObi+i5k3d9`0K8o`nPOWtqQx)i7hBZwK4_{KchgW3D z0*`+|4{Rv`-|^h62pwcEg2qUeRGFzfv)9EBl9FZ~F_Nhl*VEakPQPgx5+aUyJT2gt zwdn9w5)e>DDgGP=^9(J>m^d-v5u*MsUL`)(^n(11JZ|YLyj4210YUO@f7z9drgZIN zv)dK68O?h=7C$GNnjH}UWM-Qwu$(ckG<~<+;#Kw*gfYn8Gs=f5(+}_&=q6f@2lZO4 z0(DjGQ13^1AqX&;XxP_cio($X0eMJT*eRg>w9f&uPU|ni%j5hS280U%f>4%TfNOw^ zLM_UOlhQ)MwU7z#zu=KAEh<=ZD63kf@&u+n%J zh1EBAk=l2i$=B0Jz+rp0$KLGuOgw-|A+q;QBF1+yF?0j=2ft}yd)F%#pHI?1z^ z*C%ef&`*t|xv4t|AXUGoaNNfT@2mB|Jx&?LU#{~0UPe0y3a=6`J+b};kpk-ST{H52 z?F)FB>(Ib>{ogdP4;0-=(%Gdnk2{et9S&SDTM55Mb08EAXW^Ls7URKuxn5sMp&Yh{ z!DsA@Gkwv#;16~KI9;SO6FBtcdK1eqH7C9M2#E5_;7z=m;^M=vEb3fg>puzvI^0ka zbNd0#`@H4W+ZsEsYn3hVt+<{aCw^*t$FD-ndr>Z*O9eaJU_zphyk%Ip)^+p1bM{#bSzG%y8DndKl8p zt3C8caO!{dj1b|#%bVE(o;|}u8Jlm4@ayYl*IVSB1{>0l>r8tp&pbF0WE&3;&dfNp zEvYc5S!8wjEN9OJLo~>^@hLU@EH{bU0`m)k2p}TOsS4|~BIMcz zG5ZlDo8Bj0!mESfIxPsBTVeq8VY*c`1YMz}Z%(ksx^{x$S8=n}cH?j>*RGy9T7Xbha`!bZ$nXUkM= ztwwiToF9+ia*kpUMhn3$_qydWx(MzkMvaJpA=RQ~My50pkruBX1h6iq88S zcRum%XGZQy*)xFv7Ve=ESOWc?6|Abbd-RS_yukab!nZfLB zz>e+t4Ga6V%Rn5t*&T91&Hg@BfqI`NKvZ>=vT@Sn9!)5#124x+qfyE^sNd__`o_&{ zptv2~8Fn`J7=M{Fo#rk{^?*3!=lsR>u87jtYq z>O1tE-Oze|-mhJ^vrQyvg9@p~X6Y&ljl#(#CCa%O4}u)7sq;Do?4K&dV?70lEo!~e zn1D8t0=AFlQxu1WyN?U!KG&{Vk!Q3Ksjq0=k3@b(5GDAScLkw*p075}qEz|eY!yMs zrP!uQ2x`K%Uj^DvVFip6s;%c)f3M`RkWGwP88*t0z!qOfec7^Sozdhmp{}*=w#|F1 zn&`TQW)PusmWgUIT#=gj1RV&C)lC|~rp*KzOja-A1((yQs#8q@Kc zF~Pfq5wthVS7sCNEKt9}Y{yibqktt>ReqJ|q@6b-`;t3}KLK*YFJX#VgDmA=HZJPb zJVaaBK0ePowc#feo+4T8j{dzZbQVdFy?ms9(s6%I6^I(!WB>W0Q9&Ije99>Hp@=9z{cx$DsxFV-&n| zxIu*h=&gbPZlZl&tu`+ri~(i*uP-E=CC^v)g-i|t$^bk$jn7kKX(3w}4(m&z_g(x4 z0R2t3N)MPrMbB?uj~OOJ%WcnDgwi~6Y3&^~JC0^gmh+HKQppozx7U$-o_A}D>?_P& zFg3=)h2Plwof0v}`zbv4*MtdwII1_6=*Z7;Gn#2>SGmohi)8QjPFQt{lrGe$R)7Jf zO!Gu~-%@O1X2z{+kKbE?3}Vp+7o9yn?*>yCiC!cBHia0ukR3)HhQQIym@mXdG#xRY znYeym`ij=MtnLl#un5$0VREhMCn`;S!~yOaRk?WQC|jWW`;HMf;Qz+o`G;oaaAu(U zpN@w#d58_DG=SkF^@fy6 zNPczN#nVIrokv~!E|AX=`($$^((`r`gYr=eX0kxWALvyFQy#uxK<06npS;-8iT7~x zgYTn2wdmeQn^QV+<&>JnH-3R%o9q*t(x`-C=^t9!mM_-Jeibb)aZH&kOXIOr*pLjS zywPa`NJ|QAJ(ZiTr}X=B_@4!i9FG=^)4to|)ZJZ7kHOWOhjQmj?(OvYi%qF23+q09 zV;AhQ--?j2B-Vri0d7QRuL3!}58fBd*RQ6<2Gzfnm^my#%vPJ@IA)!MYP6P(1f%8d z--O;>N-RAESKpq^Pt6g(w4AE~(bj*uAeH{+g9$Wu2ki2fVXvl#CskQ?ysbct*=}9E zWKObuce!erwS+GXV#+WXyv@Xz7lmXtFviexbKhCmHDr7c{Jg>(Uvgkss(yuP- zl1HVdnFjNt?U5VL1!O0_CI>Ol)y9eo3YqQ3(@1KL;C!_kKmYR+TvS?H)~F`N+uO^X zraN{ExKDj@PxL7)$?adg!!YFYvuxWN2G)A=WiHKss}$CY;y%HEDySdM0T=5u<}8si zH1idnQlSvNcTlIhuqzGIQY%!0QF(|AXc`Z!lw!PUpsXiFff>n*kda@bzWSTot}@L~k=BTJI_qbT6z4JE2g~YerRt zCl{RymT6{x2;8huW?X6X=QkZuNFH+E6n$j!*?S)d_fVY7q_<9}>`o~sM2NV%qq_Ff zukH?|9m`%xv_RhX;8nO@Powd2FoygH!B3J)xQB9%c>zXnE%wZ)S}RP^n`I{g{*HIZ zInLf(XlM`PIe3`eQDw?0&8%sEC)~+`>I$?Ev?uus^gK( zl>m~MtoHwtL_HLFJnzQ$jP$DIT55HJvq#pYJ6Aww7OTo`pY{)DQ3Oe5GaZ0i$v`K6 zl=n9-frTsA4e2uJT~Bv2w6gorEGN~GD&5R>*R0Eu`_;ktx7`W|D_wwtrv!g~kH2Wa z!r~{DHD_hLDe3s~%$xdc!>`qZfBIooYZ1Tt)9Q%lQ7gn0tIMKC1*Ss8-Ak2*oFsX(BoXF!4(q?8cWAJ;?)trc3DzP(0h2dgp z(d&Uz1Lgd&C1Oc=(*5z1Y(~PJ(=*{igCUhy>cM_a1wic!``&6j^A{w*k5hyylNz>L zTmRxSwQ_op?$jWP-SX6}<7RP7Hr8pN5R~t9O&qbbvcA`lUPB&XPZ1lffzVAe?Y&xH zS-HM?baw*ES835_*C{p3HXD>>#XWQ@JawjDu=~^I*T2<;@jc7-7?72az8-U?P`0Q@sa8}}Nap;PQJX~sKgoM3dKIW;e z>CYh3_z1vQ>9bOFEh=X+?#G{Gwhaxdrk7{%Ez7vFEZ{%8x6N2utXM{_sowjvt7gR4 z9m{D}COL6*s&4SjLH0Mp0#BB6CtuvEDuvM0@n`>TQNlZWo>q!u@#w&#X$YLX74kXP zEsT<@>`YjhyWfwNx<>JIPnsW)pdt* zyVk6z(*-ErH9yAUg#xq?C)Dk24ct&!_7X8ZLYtQ#FxKWvX1|M%W^6#~Cwbj^rgy+W zMZ7!R*tg1z{Q>^`>xx@?4QYO&K(s22OP*Tu!>0JY3425QG6YeC+IM&*l~Ry$3jv2v zd>Bqdvj4>#p}c4zyYfT+58}PCMHmw#dAsETdu{LWV+*zHkNB8cQn8#Yqbtp!-l`QH zO^i)=p{^>5qxFHYQho3$6SZled>=uA6 z_#_V?RO(iIZ|2l2TR1GsI&==9z!()QMXM#cD>e|fPn+z}*l#VKVNVG8JRlkQUBfnh zHLS;viG9bVjwt+cv_9IXbeBXqmPVAXSr`06Out1I^~IFiVFTn=wA)|Iqkxt!a(>=` z_lChgb*H&ls73TqCnsKkJDW6H2pAB$nt zTDzb!Z}G2Z`474$Tlnm7pi_hHc3+2`pC@Q5XiqEwzb>yuYSxCsKA*$IcKci@!5&9vn+v!3C zR>bCtisg=blPh0+^x!F|Eh_YK%x;*Z#*XDKB6>(IsenNzI6@2lWFdCgVj2!$n0Ve3OC1}-FJmg#CA{(9I-qg5B{r-yg8jF3c zc|*dd=Ia}lu!d0T*sn?lSao)t-#_C?vGNR+xyxz&aFXJ+zyMjm{K|+g1lI$;P`366 z{&=s)!vzhW4G!thvQq{1{H#$`+gx+qe(A>N0nU91eS4QWMxetd<t|90tH+`k2l{QPdTSRn|llv<5?B zM~$`SeW(x3Ed5QY$^(8d-L0xRV5DDh&g2UX!W)B$2Gf^4sT{UDPf$+Bed^Wi6n5*g ze7NyhacGh>8(dCJd*rFq#@_{vIMQ(q%H57z!6O@~dvB(yVTeDv`sEN?fl{sN6U{@; zvK{+ICC~aFbDYC@vn~5vluP52pAlDZ=#*_<3vO{O;aWH!g|T*)=n`u%GpT6Nx1_bW z;dpv_cXuci(#yo?l63v56AkrnEwj3E781;FA;ShZU+2JBz-FhnJE#d`{RR~w87w$- z|Mayxd)fGxy?+b2UvJsdnb@X3yN5HQUXzxIBifusPcnm+EJv8^4I6pE<&dp0yDXke z;T~a_&iftB`>6>!`^#~b$YXE6QWGI2gM zK>!csT<8QIHx)Owly7RQ+hx*EQw`n1$Pl9aYAN~Tbl~#|X9>iwz%PC41XB?1eO3t% zt~YF{dR!%)revGP+Zn@BrvJdc*Z|_e4(ls0htvfBozEh_i5qzF+Cf+Z+S@pe<+6J$< zu7@{W8aAue_%~Q;u{^jF&Nm_KI&2C&>M!@7j6WZno+fFZRk@JauKgyZKmLUSj3yAi zzrxsxGT=#=xrPcWo}1Y41Z3h^sNH!*CS1!qzEtM+aR}aBzJ<|WlJ%q0DOv08q}dxtt7f`J#97@MKdak6Dc|UwJ_qi^JDQ@}#!6)>NNfCEao&>UpK&!7IU9M%JPLZv?hej7sM>sB-Z;|VYlfFnJ)-QJ#;s)r41&*G4 z!)Kt=TF5Dl+tB1a;aa3645C@m_`T^lcWLT@e{h3!m}Lo{q&$VQ1V%&r#P37Z*j*>b zUnZnQ_BkE!Bza@6Jtv(b`{weXe9y&D4Md-hX#HtF_(@7h4h+HqOc?+@(j+M)ss!Y_ z)gtK4YjSnjsj;!Ku($Qql6)4g1urMR3EVt1tFaU@R)-Zl*^JRj1;sQ2gb~O_(&-C> z68qg{nrQn^;$p8EK*O_xh1{=C9(ImCFqw5mu_(Y zIm>p-eg8L7&j`rjs}3H6ic-wyUvyISa);AU(^5&bEj>qLz!4>f_(`TBX*!V!2^NvtYtxit5C)Rq1mi-&Mw{OxE$VzAH;oJ@U&a1^!Thx z_^TCWmw%7~dcozp$q2@*aAg}OneEYu*TeR-CIT2p4ED~IY`Od92V^EFo>hAAhnx!P zynQk8*8=SyatB!#ON?v zUg%MoJh;VDnu62EV*I5G^)lKd^Vz(uWgwP`7Dd)Nj4F*m7)BB<$Up~gZOSR5DdprWhcgu~FeX4%etxBdgyao&wVgC4GoYPQD)#k!s(j=p$JIx1mKzsF<*otLF?wWIr$y^;wt%-$h=hk-*q;0kwi_(P=a?WP@PXaqY=VP^)f z@cQO}!k2dDO@{rTfY3ORZx?HCX^NUpPe!dgCUg12W}L;=^TuJ6=gtdrkXK~_mfL_n z+<}-0c3wiv4VB_!|x;F<{(9N=3e-j!@TlsE1X=$>P7yL8jaED#Hvh^HFKls zmnmxH!8?r%Y28LdC6TM>tr+7|T3osyKQ4aE2|7|#M2M>0inhwj;}B2yEl&;m{uL+2$Hzb+M4 z0I}X=%%Bp97bxejUF(Rov4es1+F5DBkaM;FDgf7xxz|I{Vc zPm+KUCh-Gq{ho-ApCr`@;ml7!OpseV(muYp)Gio(Vg1-M^qey~mafjqjuol{8|w;X+iolQc4EG!Fwy zQ9bo+8H(8+FW`K1y+CIyRwPgFPZ(6JMECQcTAMl|>i@cL=)q%-Ibk$tK%|uMvDpD@ zR)i2uWVZu=Zj@lgnz5P+4qo7`ecK?fl2bt(VIgNq&{dgqqIj!v_hK-BgjW@}ct!zL50M{FIClsssW~s-ZXmT5XXA`-cXN4RE!XRnM%$0ae?nwBtu;Hg~&eXUru*Csb%F<#%>=_{WHa@D7nm;nx#ySdEld z*Z%Sv{mPimJ~XHzzx4eJ+7VPs>Fg$(LSKSa#h^g^tW~teV_S3*gNZxuUSmQ`iI|x9 zLecDnn~Y$-OvXR@S?{*TdwXr}x&5aZm@o~=Oe4CDdaTM6K(WGuPOli-K zwCE-V8J?;&n<-8@podMBG8h>IWHmWlvN|(l1jl<@_AQmDiq5wK_@X@rFaT$)5l2Wc zTh>V2bHFqv+#)^Zd7LFa&<3V>mHE54Tt|KOz}rocqKaO>3Us(({-X1qm=~d#f4H=K z2o3ly-LVV=wm@Xo&!u)XX>afR+Mbpw%tHw-Q{_p+*p4N4FpObY_UYb&V}PT zO5j9TX0LvhLxI%cMlN@d(XMlve1GNjITYGGlWoV881fSiNb%Dq*1UW^;%2b>tvw54 z2>m#R`C5*uWU^4BL$30rd69O3bi`vnlKnA&g4of#28X#htbKG{U1}t^H-37~yjM65 z-K{%ZR(9Wc2zbB#W$0^EpIl}R*p#FM*<82q!yUUB;qKL{=lv>8bvC`aR6TAR`f!2! zl_H*O{o$F`Ag4`L;p~Icey)ztN`Xww06WpgLRr;WKwVgVC&5RoQ0LcwJmSygO{MU3 za|CO`MbfE;Im?s?j*&=vVC6yVt8m%J{6<7zabD?e<8sA&8k-_lA~T?SnHkTdVL688y{ zk*IeximS$IUkA^c+#hK%Ly#+(0zcDDmg2If#W`W&U(n$6=Oil!LJQbcI#n@58NFZ3NIbd>Kw>nQEmPriNBaVSi zOJu;*ojc>BHzRAz=*HZ9jmtYp{B0a}$*qwj6gm!uq&K^o)3xjQ1M_OJ(g0H?kDN4{J(m~(n702 z=pd>3!_MYx|J683_J!x)HgWw291&(C`|EcplalhT(y4R-iullDX-np^Ulh+-`<;MBB>`J%JsA) zN0Ia-&9{EHUoYzKmyF_u@)pQYG)}ldOnZ+H2kG7Aa5_5#L+2Hk10d^!M;GDI+@sj; zuUE&m!tEXpyd?!BPN$8Av6Y+TB&$|-ogE{vQ})MIyv+#W>+K;A zuG-&m{Z9KX8Cz?lS4P;HrJzQM>|PrkYxrO@^B2Y@FX7UUFJi#B9K#6lRy?MapY=<# zJ#jJs_Yls8L-U_7+6^OSr2!ya`HZ8K^mx8`P48MJ8Xj_hErtPdD}(BDvIKmhg?+jI zo&yehumDFM zf28orGt!u7OI=8nJc3cF8GB0#29xRMyyL92#+i}5dOfjAx`+BENNCzz01?c-;oa_A zSCVLCk9)RT1D@`AlqmCU^SFh)S78wT86AHLI$%-;>np{IUZ&6S?4drmJ9QooC9i(t zrNWQA=m_RI4|zvLwO`{dvfH*wLKV70Ts&M_usj(ysYL+mVRo$$9-z00K zqC8~@ZKB)~gWVVapBH4FxUp^w(q)-yEn^6CmF0nqD-fPVR)gGg(|XgM7U{4Bf5 zRho?;FOTPT%jx=}a^6D3qkqS}f-I>&Y{#zZaFkR~ZM(d<{o_b{o%sIRT8Fa)TKN~r z4$^77pfzHkaL0b7$)Y$#yq3eJ;=I^ywGkG@gNk7p^(mFH!qRcD7(6X$xk^qPcSwm|}yZY&5Im)S)G-emCE-Y_kS^KTow>_L6LC9|Nzz8zE+ z>Wy~j7~slASCU=NL*UN&}LwW>HIFBDe)ViS@ z+6D~DYjx^-t!_L}dUkysoltf%Lv$|qA~qqu?WvM9zpSZ?EX_{Rpx2{cQc3bOYlUDM3I5%D)kFf6uOjt$bRM-*g#W5#OyI)$@l1UD~T^nI~{ z)8cE2Gp!mEF`hQjnU;dh_Z4Y;zM>i0wz@_n6msz@2`dEAnmc3qk5hJ%^3&(|zGGky ze=39M;$ku^DIkB)Y>qOuw-eaSNb{b&?!Yy1kR6J9Oc#|Vfork}M5c9faNyJkEGqv6SjbP7oz75C4iAkp@vd9Gb(NeNnQu%(T z07CGQ`2TYEvLyxcCn0UsgfUO7KcE(;E0Mo_+a7billOOX#M_ZyT)tK_B$J3YL5}l$ zLg#j^Ch$Y$hF|Bh0HyGlX`j`CE>eHb78o477EOUvX?%qo!0R5iNYN@%&Lp{ZJ>P0x zQG6%-X=9Rh8idzk=hUirueimei%`v5C3W`MuFV4@>hwrC;G~IB1SD9S#)_o5bEAJe zNFm8OfayQaPZt4=ke18KQ(<9|#&y9_y?u|(KzU1Mb~Ys>kJ6ES2?39Wghc@*)j?4= zrzv?;bb`wSO6dd`62i;_naG|H3=`W25S;GNl&cxcZ#zCiRj-vHJk+@II*)w=vliiV zSU=1>p8OFamrUq9y}01ikyb@8X%Y!_kpV(jLu@+_CXnfd75@(kzag`&y(0C4;qsqMeg+>*%&(O;0o>m2l=>B)K~fLE-wIa68g3v4n=E_U4|Lv(-^&)4pir*8OS2(nH(pM@ zBRBp#l|-KV4Vg6p;R3o(%eg77;55K{^ZhWbCH5uOO$TJ6dNOdWR$2@QhTSFplb@A@ z4r(h!^J_Z>ZPMyMDsx}5N*G-c^nT|~1P$=c5;p#HUB-MT8Arch)Rn=H5obzvRi;s} z8MhVmiQGfJy$p=0K=s9@D-NOs^g$`f14h0soT?g^hPFBzGt}NEImNXARa?mnl# zdN2)!f%2YIJINBh#T)IZ<;UuJm3PmaghKJs7iz5*%D7NudUf#e;6N_?YdjTG)C1N0 zyJ}ooWHbeQW_<(P#TI86B)8fwnJOj~ARd+qm2WercL)rRlQkd_av3``bUFHpMr6Eix?0+qK>+>l8&6u?J1 z%+LD|b}U8ka;OPKjnj~uk5K_r*Xg&}i${u_sbTG6yHA;ywO75G{25O?DP@S&^{Itt zqj&z9UgpNR+XcH+P1JH{Gr&|xsEn8$`q>gek&MQ|cTA+QtyKIQD^A9uw+~%mR5+&h zGY!JlOcPeC<;27{eJ9S(43K@%0!95lC1XdG#VZEIQ%JEoGx>XNyMocHkL5Udc#1v2 zz!pJ97YoThGAWr~vwB-TnQ>?ExdjYoZVFd%qr`Nw=b|f)*=}_!JT!WQ($ajDyqI-- zcyDxfgQc*}TJ6H~$sz*XpF4IphY3?QWfjCL9Y z1Eax2xX#sX&iTS@G>qyUv}Sa_y?pAn7NOcV(2eqylQ zy1`(%vX0%o#29R_#Fco@>y01QLg(Evp#2N|ZnZT|iR7K_YZeM@cZ;p|ooRD&-Fd{} zw6IUSUiep!^~ho>52et+*iH8bQdFAvv*j*$G1M)!|FiCwmBo5-_HW6L zlJqUiG*ii}!OSq;&E^nm(Of}^1errAvraR-lDiadlY?eRfvK%T3M^x>l0{f?krDAy z3yiuf^TWJD-W@ZWVrWag5kkitVf4`whPzgJUyFrmY<~|74N%(#0*3Cp+qsAicO5ys zivAp)CVK?8%e_E=F{SwlVp;Iz%qZUio|K=c5quEvLw<|&fBv%T$`D`KhR|Gx68tsy z87V^}^kaNB&~F@W4bs$VkbzQ3k#b4W2K1;nuo;9e*$81Ket++1!1C1U|69`k6h(-@ z&(A;n@uP4do~;@tGch1A8)bN=(to2bKFDamWw|BT9!n2zHeQl%73DdH83++?--LE% zI`1p4qf5*JtJf3^|b_GCB$9AnPO0<(A9B4GXGW`Hsgz3$0K4mKoiwtDY z+fRYVb1i>kGw}TLp8t?~0X+KGPfd^|kWl2%%7L{1Zq;0)3-Kr{92gmF(|X%$2WgrI zECE#pxI5X&nH_CnFVSh`e=tV^M}l>$$;LNUhi=&AI!L!e-o|nLZ(tEPQGJQRw^tvM zE+yraD|D9rFQ5JvM)~K1p4NBUj-Ph6|5c23ghWbu^6RnSfEQE-Zj&;r8)q6n0wZycK>U>5$!B<%luvKqAVHOY@0ffPvY5x=fDpDK744uasR(P9jAbiLA0hx!+J zet`n8LK^l(_3SZdDo7Z~Z_y|(8;!FNVlwml%G4CBUOn-T1C8?_aJT2?aNBxB479ia z;LxnkLLfAgKrV+T>p8dMJ08qo=;-Jio618)2nibdh}!4TXn0X8%5zPC*=D{_{4AEo zeX?~|4e|I}#Pfk(_h|ehPXUmC(mN5OJ3WFG?1Y@{ii9{8y8NXMy(=U@ku{Iemi(rhwQ$ z=WN=iE^y9}6*Nwx*pU7kq)1Eab%$WgRhp8tV*1gVj>l0(C1l6?a3Mh7%69vcdzbJQ zEiNt&1jpF5_n9MA`YhA&HWAoufi&Z=m#sk*t;&}K7U*p?@b~jFc{}x1~bzo$A zd5;SW1R_|qeT@P$g#6`F0tK-DR;i}Y!vH-!J+11x;ydGU*}XjcEd&-iJQ7kDFvHdE z_nr)B?I2=AQCoQG7w8`S)eD@s_v^A8>VU>m)@$!Kj%*tlmKHj37xHSvR z5T2KH(f?hS7$lGm?FB{$0ES?L%=WV|`s$NXkQ$}z%OwGnpCCNYWPwjE7S%0diVHG- zCITuk>u+P(q%oP%eA`2F(Y>w`Xirt^E!EGVd&OTqvdRwJR~@L3WQn&gNDDLJzpf(@ z!tgvNLDvEb+;#SZ9R_DDDZJX?z0+T54f@JP1ytGpTmlmcz&N7uKlt#3D!p7eY>T$ zL_b?bRCwgs(mVK4Z+Q6I>UEzi{)Vl0OKF00#>!)$5v9^0J|NX=S*xKa?++#}L22$X_ zWFr6O11Ompe-bG1$f?%oy-pkr?^rvG^Z8O^C%MkbFvpl29RKI~VA={m7Rsh~0#dqm z=8BMDwXgtzz-)xc%;&3|3roH>G6cMgUVY*Qf(7zrUqdK>i=gJWZuRa2^+f}(Dg1%v zjY2!~F9yypJD|Vke>0pj;R>+2En(K9=hMe}BA+)E-d*Rj8lizVdfz5pr z(>@rhBkpkz#smRnU1mM#G}f0*ZzclX(y-B6D84H5A}y0Sf6X{TKdZMp@j(!URDe2& zn+ZO+fdKL~7e&uu8ivE6iB1~)%ZFbMjoY`qT|hdG6T{Vf3g9MXAS8m@pn?#_R6Fgu zwr%v5LI4BPj|o^|{}#iSfx_7IFZm6&zk6Bh`gc)8=$sgCkPxZfO8HM;MUCN*I%(!$ zd}RZhid-$Z8xdUNx9h@zyZ-mcfWO=CF85jVJE1q93iu@1EhXOpzXO#1e&v(+)d?!_ zkHH2#VR+1tvMDc8vK-Y8%-dSq*$GyX!fn%r^9TcON!`nyV*Gu5I=WslKaV=RY!@TE z8Y?0&y$ha+iRs5zEUG{5+RJOv1y=zJ_vZa)bgj1107aBlV1^tDz7ib# z8BFUB1}LFmQ+W%eUXH;=&c|=>aQ;*RGvviHw^IBgnC4>8mK2F;D*u=x=l8<0fj~vQ zeOy~inNojr%swaB6m9Am?Dc@!zx+=w30T2bA8ZgXoit!lEHg_B;A%+W>ms=BfCCnH zK~brH=ko4{JCQbzlMpbf>>?dm?4=Indkt9GJ>Jf3c~qIVavKFF*VnsykIx~{0TvV z1g1JD`O&WR-3DfvM`G_%7p?oz-2<ErTkrT{mgu}t zj|712&SClhqwGj&S)~NY(PeG0J(mKeEH@5Q1P$b76#mE$&6H?Afi~)$`i5s=x1GsG zn`HBgz4Im;3z-BL>hCvr3&Dm3kAR?H+>s^F$O>1r!vJknV0ox>XdVQ#zFH?z(#v z2ENbzJ@@`I$HUCK_u6Z(-k-JKGoE+#k=;4!?**Lg6u$q60~9<&`qqGUt+oWQ^9#bs zxZbA!5sSl~{*j^h@2~&rw1IT~JCn`khnFTUfUU}cnW^*ncO`m_WU~3h1u-m# z(w_N*$hL3*3B8sRuz;NQBRIj&@Y1KdBOx=Qz=uZi|D}Qa75YP(51*|vdD9Q?8@}Iv z+^*%YPt28iZ27H7-Pn{dSx3@;^Pi0bvy&L~7qK)8I=ro?DyLng$DKES5k&$F?^_tF zH)S39B8_aKiJ{F@tH%qb2lAM+8LbDv-{`3+et&Kb@Io`dk!^5ss!y)EW&Yi{st)Mc@u!)2OB7ovC*oF&-NuXS)}AnF2lzn_s?VtJ&^)rzJ@;bf7n@!+) z%dl1D1-|&|wdoG=2u|ZG3ixIe5ZE6k*(XnK?i2CJdacc0Qi3i}c)U3GwrZC;Ty{UK zv$ON^8lN_E1qnp4C*tkS2YIcbKu25F!_hx9vnCA%zf+TK8-Cchq;F&oJ|C=LtCap^ zOOE(sRIZGCF@;Lm)|Y_}TOIb*>+Y9MWPq9y;e9 zJFjRvI=1s%yS=8yyEXFC*U>P?6FdnML7vF~Zzc};N#+l~@0tdK%3VUlNem{yQ0eMD zJ~&X`a}|3l4I)Hwlx;ke7(M`I(A{7MJ(<@y3<6x%M`f&jw34(N?Xtcv6Eef|U^suz z;&bnv(Gaq+zRcVYXNc~x*B94U(gHFWaS8dR5Xu|<<}ehaZO?1u0hC;}QhhZUrKB{e z`Sg_pBF;zlnH4iUSOV#-Ll6qAs2;0J=E3Jon|7bVZJ1bn1>^;=HeW zt}AOn`6dT@B5E)0ki1St=GOntUlls|yyV8H5{qWSJr@a7hab|v-|x&(nN~rbb^SgIP8hm_lB0Ace^zi#VM@TVB%!&LpA2=R8@B!D&!JA1tGnu*1H%DcHqY0;3GP4W)I1=a; zO$5&j806u`lut7`&b~i8*6u4V;ZH4)OuOQB@T(Q2sINSA$SVlmD!iPwi`_904x3q@N54Xb^F0_FO=%y z$I!Xy3!B9Y-*wlLd|JfO`~+z=yKMJvunk#N7WB_8-qERr8}=8OB8?@$eFiulf6n(* zZx7f_Gfejxefpz>W~L)1@o4g&GInd#oNl!K8K{CrGZmg!B|*kfLw0q=yt6+JR&&OY z^V?^k8Rp+?&QNV55AIv7CrU@t0ZY$LU96<{3RM7fRYd zd}z_Yd1rq+HFSynMnOH;@z*ja zSr?napLy*ODpdNaD?u}Ke60mwbaZiFcY7DoHza-WP-O;eOkUf*4@Iphj>KWaw`RV0 zm=#vOsZFeYZ<>3wEtm=5S?l@u~O}CbsqWs5tXLEwt^sCq? zvOf7u`7e~&%`-Jca5lKmf$hif2s_tPy}2edg}l~YEc2MO#1v*lmiPOzJ5BbmA3DSB zx*zz>e_)%K6p@M=8m#0Rw;MGtlA9lCIu1l!t3Bz_%Ao!5Par4sLI+!kx2Fq+h4Xbc ztql8h$qcI`&PNd=YxF&G^;?43gj{5XoVMJy-4;^wZGK)X-RLvct=``V3Q^MNoy?S@ z1YUud1tOARP)KjD@YJI_MJCjEA=bo6y0<#=%7)KxwrDidp6L%NgscsOAkOCF*R5VQ z<_#stM&+R9+BqxXBe_8$Mxdj0{-KN2hZ-sTL?8J=Cu7f(JsSP~E=exvX1;c0lWvEp zeL%<)_1+6-O4_cdO0IT22{G4e3jTJUjITdxH0m(QUB0SspU*|>dg^26sB6-#6}CmW z4y2o|*SDu<*`81TxiLYxsb)CWnfb`{+Pn38lM(p2WtzDP&MeLvUotfsd(Hjsl5J4; z-#^&1t-7UIHK5k5Ch2cWx#pmssNB6m&fPW3{r;xZ6SWrJ@16SJGGqE59$Ig3{~9fn zE^ATiugYO)F)<$t9yhV~>#}a-oO9NX)b!oxQ{J15mEKlKc+p+uG;H3QkavFnduT>wu7ji$8y25j~Kj`>@cPv13EbN)zd)sbxGG+{ifGM3L3R#!+ z+G~Km71d#6-&t^eF0%e?LVWk-S)vbClj3`~*3T2mcA3|bz!4)P4%_3QO~h_Pnvfo3 zJsxV~>h6#=U*@rS!hupl2=u@!`fhR$yL<{ylo)pBdFFLXrQ)exO9ab#GM2!bQqkxi z_{tyH)b*MReh1@GNoUDZQ1cpk_byZv#|Ph}i|{l+CQ8n`xh@8$E^-ogTyM<$@kQt$ zKvy!9kALkj@~&>;2MURfkSBNQ8dhF~_Hj2?GjpZ7E-ti^PL5Rjbr(LkbJ(A&`^LK3 zX>lTeBR74+u{b@^e))rb_q^M#hGylgUG+!nJ(IL26g7rFl6j=_lH)&&8kwvs4m0bH zrbzdUPw_5tTG$Nn#1GS@J|!4gyE>>;KKLQI+h8cclf!f-%DCRLuc22jCwnq%B)f?9 zooT>?Nq2sev%q(-L2YQML(NMp4e6;xg?M&--&1a7zpTn7m^)x{7E@M9!G}gad!p9x z%0*iFU35!4V?VEb7iYK45O;QVqC(8}1;atp{YKZB!U9(2oMc@C*T&3GE$Ip!6HUgm ze@r^&3%Lq&zl)!dx7VDX>@iKe%&gfQW%i}U&|ioyR>ocY%KoBz{*t?Un3fU|%~T_| zZ4KmX7zxc2O;=lm!^_|D@c+hvo@nkQa6gS|Z@MeL*S_TM+LSH)9xaK*f!)patw$Vl ze)wtR6W^8LwNGl?XqPwqBnZd+o}r`bK|QPE5DO)qY6rubiB5TH)Fq z0)L!$b}j7A+YM}3rZ)CpjtQy|8gzEf8H`=vvn!_#-HYYYFSnb29-{cfa?`OvN>DIz z)2WKpDNDTtb=Fx8bU>F**XQrr9LfmoKkpMR?zNhCDpL}W4lPtMiy~HIwO_d*voaN| zPiTGk$Z6JWSZkFfD&|>J^hoB%TgqiUy$4FN$-)r?*#iMaV#qY%Ck=K~&v)+nH^f%& zGYz}$yupWtvRLPgUH;fP{l26Y_L9!Dui$-1RDdmbIc5G=LVoz?;D(lSz*e;)AiMUo z&iTzU$qlSjMIV51?`yi@-WC7LN_l8&zXo2yOE))QOGYA@^Ck?px+ zFPqfJ_DytEeD+H_T0XdG6bzc~PNC@6BI-LOrZ+*5Rjf?}g(nfWjxcU_NUg3q6~5v4b&tf*p2?AIoxl2lX`Pw|9(0 z`4_p=H>o3YF6@8q7196k&^hZv^6We)p%RkB4cRT=Z}qL_^maG_iM3*7S; zb7Ea$Aj)kW8C*PFQw{i}0;WlI1-1tPzd%jv(gQ<=QF{6uO~wXJ|;Fj9C%+josbFc}gunx$^QWFK~|K&knf) zUlw$wIq!|$*8TbTI(TeKixrh<QyL2>L;Ks2L+(5hye z&Ay2G7WcVS4Eu#g77U)2uk!~1eC~f*G$olC8C&bfn+zM6sPCVdtd+0XNLU%VCmf>m z()Q#*o^!;q-#&Jg>cd>FAmUc81^+o&FCBfHb!mzUJVQP`kKibq0AVR16o#@i&|M($ zfC3XudNu41))&J>*tDwe&b~8XNNy;x9BpK#4bdK%OISI+|)Cx1vJL z#6KM+j7o96ojh7}!dJSRNF?b;ym32X22ZdG#?I24CuCj4onv>mGpK$%oXnXt^D8^j zAnY3^kvS&26Of(oD`9{LdV~1KnymVZ8H-2Ql|x`q<`Jri7Cf20ObIfooffn6a|E)t zPkrxl5eNo;pbqitPhjdO)mUfof3Z2>=?^i4B2u>)ywvZJNlS)W{n*n}4}M_)M1Oyh z*bIj~t~vPgDJA8m7H1;gFz7-1pAbtx_K>qN_JiJ6 z=7~h35*Gavf?kON8Y$=Rvyi|Gz|3)x-t{MX2a`pqAegWY#YOhUl^FoW$FE}CpU5vz0bT9b+G%$Rb@f=KY&6HXxU!BZ0G z<{KohBfxx}D#5$LSQf&4&;;pW{!9!kA_6CD-@}NBWS|xC>yO}xw-m9eCt|`-g0m_| z8Wnc_L>q#UK!IUXY8O4y01L#j#Y#`vi8y?A8pT$XS34)a9|P2fFZ1c!!_ZM+EfJGBrGj3ixnybP}u1`H<5ZWK7#xL}ok44(NgvU9Q} zZg8+A@ei-TYt_IL6ErwxG+5&oX8;8o!ePXE;sj;2z&1e7DBOcu5(A+TWw!P|8B3TX zZ&gn5U%f;&L+lj0h<`i+qtmkzKEUxn0kKO*^?02k`!{UP-eMw=DeCr(Qv&e>@GP$? z*bfQf=dYGhQc|}TLL={EHwGEki6L+1%IW27V{9h#k8L;t0YkaKPd1F0p$Z-Cp8=eu zyt=yj-Z_gCe*%H6fVI@0@mB!ZDVWqp28z{99B49$mS9GEiM1kD$UOD{mshxxU@#pv zAY{A#I5;OfxN$j;mULAvB#)$o13L<`8LXK%$P$eEAMheKe3z)POa{3#e$RH=n$IiL zH|xn$%CZe4Wqh)(7o-GWkCtzr7J>rI%2nKRPWC>a9LV8^HOt9Pse#9=+Wr7LopxR83=_4>Y-=6_`wWgp;5ndrkG4yBPPR$_aBm&wBPhz= z0b8ZJq0NT<4hX}=yy33(9l-pICnIGiv`=GL_cMoPC zsvI)ihYJbq)+g@9!wEb$1JU7z!k=HsBdUO*!lQe9Pbo$^EX+RxwSp~gf`B9l?AJ}I z4O%p^5<}@l%afnuKyQ!%VGZPlke&$3Ltv>Spubr_sSmi-kVrD|6BR?qGI@Xq%zE0# z=UjSL)-6TO64q0zeE@WU2*~W%4e%ft4gKi{IUu`(>TFK|78X>%#mc$Jj!IY<<{~f- zU-=c-+khan>knPRjHn)-tEbrx$FRMplmR;#@*J=$$5l?S)y01vVs{Y&dx5BNK?xh+ z&YT#3B=Va+*!>l-x_V##pc5-k%_J0RQgYc>T~L1@@x7hmKGsv1lbkP znEmX6N;V_N!TQHq1+a#g`DXR&Bavv=JY7Pj;fIXyQ)@_oW3*O>lao`d^d{LUNvr`T zNEu%P*b-r|K?FLZ|D9S1lvYcW$<5-*>xsCF$Kr#;#IrhG0|U392SDVu%_ZI(e+BmT zvI2TL%_qGHQ)WAwJ_`pD5;8&xfsq4^>OycypXNO~bc4V0$?NSjXvZxS?3m!!6+mpp zBEu-|0|&C9{ef2a-0b7&)6>Y;xaD>m6U&pzN93Py^VC#YG|xfST3d%bPXNTh9suH? z9hXl{go)U0MTAG*kFx;F_xzOn{M7P&SinYpyE5r?JTuY-*wR1!8w0%1DvT%k@{soU zE6ngBHEz}2^+zRm#EjUD6DA~pM1r)EYupc`T&`%{*VPI6r&J39J9ks7dUO^SD3Dg; zWvsWa)d2gtK?A`d{vm_5oLOBRH91A!3x=A_Jw(wtTP#Ntfb5fIZ;Y7{GnC;F<%rQw zzBonTWwDxO@oxtBV!tQoj{l6D!)#~w*Z@290Eoi&>?6bDuP{q`6ql9Ni1OY}d(p|% ziHqGIFTggAzW3$GxaWXvHxehmoO*A^gh9fI1w+2*6`Di z(uU9^1Ord?Z;7?4;CDmwErZtsiUAjfUR^#>b2QQbHn-`XZSgPO@lIJlT|VX+67dwU z>a!7%(M#kMZ0-zGb=(E@4ms^p`$_(vRxAVn9RKU;>LOrz8FY$Ch`2v@!90$D5em?Z zimSY*{hll)Buk6%t;c#w4-g}td;Uq55%%ReSYS+1LdvNP+PVSc+U=1%;W4j?HGw_b zDiW~wL$d)QtJvk?$6tX-<1Z~?VqMa*iNr6*qac3*O^_IZp>RAbfEDm8-%k@b^~g57 zXA;7a2(pUU1fJv5E?{c)!05M)EzHOu)wpwK7_DWuStK2g#{?XHB?=@IT&_c(o|8i; zo}7LIJLdo2gy$T*cSgO9w~ed>kDno?!K|u))7}Ru3`FVN7m4`OQ%TfN$lu2B&F_S7 zH^5OL(=`FI2MBUNX`6xg0u;M{I36yzbDumuh&~L-y*Y{XE^TBWvUH=c_J)J9g5aw= z!Q#Tq;uyb}7zJ1m{-ot@vJ`;ClRlRXxJ0+CK3^cZw9n^g!vS0<*!sQ&9|} z6Hps(CdlNEoW|_SFka`e^~F$tz&EH#lnijIwQCXB{J}dJ6?CbCDKm>s)SECQ(T`68 zbfHA8s8RLhm6fRsxo)a&Z;kp7KKrF^uRaal+=+Vf1YA;YHf>hStm(4sX*1u~Rn@e! zjwu?`ih4cty~2_&=zZo`YdqqBuHzQ!rjbEufv%r>Bq}F$O2mEs8;J(l39X_il&OK! zpcQfLo^Kd74Pk>>s_`AB zx5zxOsV};y-8v__HQhs6gboMxm=xUi!O3Vxl&^Y~OKD>?ztTiYEE|P+#mB7bgB{+` z&-f=2@S8NR1>fncaF~BRg)F9AWmQLI-}!le)L&>U(O>Y+;UGA3#ovu}+cE5Qn#~?G zwI6mZp8h#zX7Qw`SC8WEOl?x2s zyKMC8-?5%}>^K$gJr-$RWx44OZecFqCOwZ8QavD~?8Vf!;s_k*#^<~Jnh#Afg&&l4lyc7xw>!~qL8FS@}RpEqe zKdY|Y)%$^loM|kiab1&Z(-xjl#UrFN_>yE(mt8#M)Sjcxe^+CFp?ooBf&J2d>W1V#_s~Mu%d!W$Fy;)L6?fh_eV}6#>8XPOv?=DuGA0Xns#$UGn z7`yq*U*HKiAXRLXJc)VZ#-PFx82(Rh)`3kFNNfRnMGFqnN}L-PnW~K{)U$9rjT;If`N0{tQ+>EAw7t^6)|B*>t!hi>dwI!B zM&|Rmo{tk@^f3-!?!ol#(SDUCkl`A%QLw%*>u$OQ4sL(Ods7?DY{ee#)vevDw~*8e z^;0#ck)P#%LX*cFhUSqu{&QTh?*5$A~(*`<}S?@I^6pdWz+s> zU2tj^e@82qXsh>@(B9HZ6L8$sOKIfS)-u{AP7`fW5s?9oDCYH{1+SuU-JS^nn4fD= zlwF@8ZWZ}AtOfdf2`=$4#2A?zxN|)l{Nh*KhtSZ-7_FTL?vL&28B@dNcLtxv8q0f1 zmJRL>u9Z&41Z5ZX)klSbN9d)Zc;r|smPD+p4sc9qFuY z9CjZBrPW?P=;PXlp7cr9vGzTi1JcMP`eJW)_^>E)ykfQa(!8y1D7XzjV&uE@R92-6 zA@3NmVQ+r20Z$T9WnV|r!HU9}Q0K);ZtcJ+%;6i*4VulX*}fZT9h$Z?ON`_XjcU@q z#`TENpyR@{O6mzg2J<#i7|q%WvX7%@`J!jYzjb5R{>>h zJmMO~y8_TN`3-W+nelElEZ||VLAFigxjfK-xxei_C0tn?7jb~0odN_ zJ2kn!gr8E0hKzGcQwHa&oZJmFs<%324aY;2>^4(Xmhn(K%w0C3Yf=#$k624apbE7= z>0@v4WyPw)Pr)0|NJH41KX^?@a)f^dEdwY|6vQl@%TRvHfJBnKwJVcJ~FXVK$Z>N*r(KN|$%EuOvPYReM7vUmcd7gq>BWG4e zy20J#9lP}5Jdv~qHdWVI#GR=PiRhwR7!+Iou&Cw=IB%GANIrC4Gs*jmce6s-b-k<7 zyGyxyRd9bzp^f_SPNsxw-Ypa|nQXDYIJW|v>!wiiB@G*8X6yZ`dwcB}^oDC zfl#RLd{vKR4x|gnFst)?Vd!>Du5DM0WM860D0o|HDP^E=fyw!teO2Z z4kM6WfMaT+CBRR!aq9pUyFMyc=xPUz-$(4w+MrHx&U#kP9tK6o`k*BaWpjNGROG(K z11qXo5bCea5m&N35h>>VJ5MqNEkPsL!Z&+sQz>AB(sjNg)gRz})l>=$eh-@bvK34L zz@GocDP{YfJ=(l+R*_sn&8AIEFYF%wWYEUVKHX6DMm9E6^AxqoxcbhQ63m%IoqYej zW+ivxFl?{QGWP$Ee@!EWRs^_CbIP^E~=hN2iS`ONFzwxs9=}SnTwi;V# z)Wzf&I_09u7mLaef=}JU1vO%`o?zCvc6OAz;qJliV&qDtwZ6c?)+h$Gu^C!a1SO9d zbQpb?5BzMOR0TH1jlY=JLdZw?iLgPf@kZ!hL{!3X%UYe1UE6B8?X2Fg-ug`RR|%O` z8nNCuvCVq@j-DvDXRE5~sO;sS^%v<+^|yhfsN=S1~xdvAS$goKKT zQd4YA*|{Dz-fby?h?b!xCS}@(kbE+M>TBJvV_jC>xEfE$wf~0Ce@$9w^_EiRI-+(1;Iy9F)byuCKnCh8k9wltAbzF_73wd408M{wJ3vQO$x zn+q&qIo%_gfwOw~<i=4gmE!8H z&2kosO(wrmmwlP}I1oaf!L1m!&3AP(7)hRqnjlECNj*qoam2i0uF2YCvsA=?yH4of zH{p7yi*&~3A+YxMom-tnd8n@Jb}Go&63Gyz71ZrXFlUb~bd$QewJydE(Tq;pQI(FY zcWZ_!Dyb_D|2uWVNP0Xe5OUKFIZN0JTG<)2geWeRmGoCqDkCZH{=7@qW&RP!wBbDvtuzADZv`KnamDNcJa6>K&zW5del z5c7wMjWk35okjJM+b#|bu4%L{+V$RIt5|xm-MqLD32GKiP895syHF4{hGvS)U%$FP zBxE*F@`^@I*O|2rTL={fDbYC-nh-w$^3sc!oV>SH z)W1XPzW4SgqWn@kl*$8F$uiol0`HJ!xp9K@3SC22&RX8RrLUK$U)dhqBIvPMmL{JU z;2yR$M}|l#^WCg&7M)NJJi}yqp1`kDMTi0<04^e`Q>A?OlL*_=d=DU6hx?n{)~NZa z-32|>49zy9xFmrwo6_l&$j4zo&I|NWEinRv+vns9z4z4&Unf`gR`_mQ?9gW6eAuC3 zJS1e{;QtKNNdINw@G`W$V1|w-suYIWp^_6PZPoYO@yaXu#aEq0sB7xoRn))Ezm^=T zp`su2OAX4jFPO95Ml(L`pESj*YwLJaaPY{(B+He!=|a#OR~U4W$?kjInshhWhR_e4 zQS%^(XFS71^;LaRt6j|djLC47tCgG{0};P|puPMvf(VW^74ypVi%zo!tw{p89C5AR z@KAi4?Si{=&x)q^L_qMJrM!8NDEwM5YqmWAUBB@sU-w|Sjs{qKFbDGH!!=;>^;KZi{zgBaZI|Om z>3hrfH%rFQGr%y|PZBK==1#_pWEuG1;BWarKwBA0Mxo-^($@0QO9EtscP3|GJzR0^ z8!-UeN+E67ngrhb%C_M!n6nu#+{HJW736Yt5t}l$p6{7_t^`A|_C$o8|32?Q&wai@8?;+$4~(u>vgc8|Xor+!am*Yi2c z`EZ%gKF}B$a&I_}m9| zg0|y3sXZH=MQvOw&LG;GzWL%{e{)zWFJ*MEe;eA_-^?69DeD-muwK*QyaeDM`?fs~ z_s28$zrOTx$W|@R*|e+l2-?(|kJp{VT{Y`QDxP_~n6MdNxyZrZK}$BxJKm*zEVo~n zM)swjyan|*64`0WEERkH;XCe zA~FXO5?%g?X26Cz=D|Di!+q3%78Tt9`1g;gHQ+tv-zGCXl zJfj!d98)ShUPf<av&THajAN@Ga;w?9ylv-oJCh~J?5PH2kBPc()HEk~Up&ubOSOrMzT{3nfGZm>PUFw|3+|*j4}n^R*#@sb z;DD*LOx`$%D`%|qx2ne%K{jTU%H3k6@4#bU-O=$qYV-opq?z@Lv{%MG+(rhAP1A35 zx%FLaC$jHM@(62xAGMhTm|#7>!p&Wfc(Qcw%(_bxKV94|5?FJfU%FezuN|wZPd}Df ze1WrIbLxV((&F;4`(dSXMaN5)(Pg3iwIJuLRmmrtJ)(rXwI(#|TIk-CSbWv9rkN<; z%`#AD;`HVb-7lR?=A-Z8N~Mnl_iJ@`t-QHJU$z*%#L>mLz@`86$GEgi=J?@GclCgE z!O+a&+C>}xXV{W6{1Z5&dtTIXnV?j2ano|2FuLW>Tlu_!Yk&Io{ZfW=*UL3neOF)7 zZ@TRJfg;TCxD{U=HdK619Pn@emmAb@Z(ITyf7w)m@4(sHlS*$5XXgomqqLhY`hEKS zg8S7H^~l;|%iMVFK-QrZ6XbZ~rE%)i-w!f$x0Ze{+-{b2n+O1rb?xB9+FXWp#NF7C z@?ks_`B0COaaf3vrIT`L-4>NBI7uwkHI_~G^>QnccHQ0J?7y@B?5h~T!?{0eNb3d* zx7$YCsefa3=Zd?pwE9Np2+d_tn#kl|8*Wjh-%Lmvg3d6R*QBk>6y96!R4`K}zc~Hp z!Xnr{w`G;d0)Xu`E%kB$iAVR!SsIttxnfAjT{aKL#Wlp;T~mQ*O@i|ADgD-K8|{0k zu}X^xrsD)ZOqON@yiQ7JSamW zS0wHsIDr9%mEY+lxjs+%_>TBK%9k5v<@HOFr}n^7(>*DgL-yZTjI{&d-(d$_5J1aistm$u>^P5)T+-g zRSK!5cE|_~X6MW+Chvl`)(p)5WbTvR*aNkvd)mcS#k*^KMfQqcgqtbM7imqM35;h{ z^MMnmGC`-c|E%p}b;kZ6BSzy)FHCj85qE_bg z;g!d0eD<@7l;17Ks7V;&kU7?}yuxqSX1W?0_Y=GNfG*viw3`7@dboc*{+ZfoSGwji zN$G{nrb(;!c`NM(%JYCb!i%nZIBBn`m1JPV_AE`_y525;Kh7PwRC zwT1#2i#`Zs?EPtO>%8KL_iwW*Rx6oAf$ij z4)Mi%^Hdf$mNgHKt=+u4dUd+uF_`5;skyTTpmaJ)l}ydTLtwDNsFY7B@KRRT;aoGY zJBpyR2%Z(N9B_>5RGD2|!wI~?q7t^xcNk2aYt7Z`SXgnzDd1AdPf+Bbm1d){_VfF; z_)08AN=e;TaWvD~u6Mh34WSRe+-udq(zG0Yu-q(MxLCF8bBW;wWlp!v3rb0-A*^9lA@wiq%00pJ}NibE@DVp`WEaYJW;2GNk z;zfeAxovWZJy1zsc$rXT?tlg z-eN<|^%zcrwminjZxph+))ja$%>gd?%ik%=y;IMnx$UnjSvvW@DP{oyMP8Fx$41ya z$9$8*t|w;7JKJw4GeR|3D5f)XhuNH!cu>l0aOwF<{Ce#Ys zcMS(hKH`6y;|vnLYt4R$EqIU+mu&sxhG}|DX3Mn4%C=m=H9&!=Uo5x+TGe~Dc5PA=kasCZPj?%6v>ISij5#)pSX& z9<4Lw{PUT3v|LvuZ3FP6$n$KcT0gI?wa4iF^xgQc16>~;*GHfq=x^AO^6qv=j9lMc zhOUIl!h+P0gB{MUgwH;Gm~)AAyVQXXO*A+h#31CaoGTgiv)Ws(^PT6_J>MxS_4467 zOX}|s^@6%cFC+5jH`)44KD{#(HE-4R-(KsGNp?RxF!N_AAKcLCpnh)m5%kIggOI); zvS}7n+$YRBEFL?Du&9-^x(wZX92Q`a<(jzpx1$V1EQZ|6pds#`WZ##l5!9TL+P)?~ zP2|6}ElRdK*ii9vna0(w2h^0U?qzORm?OdYl;({+i^t-&Bm5dKpYk4`yovz$BJ;0^2pAd8#0@wg4$_XK*5XyUvJxBD?smegOV$8wU|{hD48mjfi$) zBapg$03o^8dr?Oe6#skQm7Ug>#*D$YHxwWzgK zre7CCrX?q3w37o^Cw~)2q{EO~a+yEi;8>k&J74dXqnyQY2RBGr@Kt|Ygu3FL1ax4r@ z0zkkNcevv4;ICBTf+1Z?zrX6%pds-ai7Q9;4MQO!K%+*`9GodTUH{u~`1dPqOzFnB zLCzcorPHFh>bvdxJ=Xm|85~VA8E1X;;sE`Vh>NH*SONekdshJAVh_iQS;w*Egk&%b zQ?`(jIART&49@rczY}0B0+82xvcWu8H+f%2!3@@z`+~S>j_Y4@bAA*FAjstpNpsO?(F19yKoAO0`~(Uk44{$ z2n4YN24|$OsM9P=D#5_??pQ@IggWAbgXU*~85ImCNAU1h;7LS;hXjIH18n3{5f$eN zz7h%2g}+Ps^2@0cdldc<9MDUwjZguQ4?yCET)*m#J!K?5M>{s+V4fjLWktRPDQ z`+^a(X1l~+r`GgJ8G}>!`p&!bMmqnI&sXO17;n(=X)@G zwn`a=Vfjpq;j;lAlLyiPaPUjNUFYd@Z1(ej&ta2F2`EhSIB2VGt^5dO~CUqJwMGNjT; zbiCJFl4$e{+xcI_NYHHpuu90$%g3Z+teV%kvNcf=ntOnon|qls~2G_kp;OCVWrGIGUN*7!dQ6TZr2qbR@%2+Z6Jr zF{1D-<`IH)CMKpNki8F|mnAnCy?T^(lBb!0^lbW4YUhD6AF2qOFOo!4qNbI3=#9)-Q4s zkVk9Go^cG!d=FH>$E3sR8T_(F&F<`1AX;BE1DH?k5}-^F-V=XNkd~3b#K^|B8ISV8 z+EUO)04OA5@dHiwFfbpC@Z>ZMvs15O@N|*Rrzn8$`^#@SjCuO0H=aVwfU&2RQKctM z8W`@ejkv~W1Jsr)BNi~5CIg_;g0vd|X{!QRJVPbAm#nLE_7vaXFcb#3*T;@Je>95@ zEtthuZ{j8i!&@+mwi@H{{C|n?p+`Pcyl0$)MS^4O+22lWq%@E_UQ0{%|Igv+FsQKU ziSY?y1^_8k)OKY&uBZJQ{`!b(k+xvQpD=6EW7Y%kX$x2%$I}|Y+GU(>EfHy~(*@6X zpaCF?BAc*<2V!_RsStbQ&<-Z`gBMa{dG7_dJmK)cJ9RYDaxNmd+ zk11ycnz6-z)iW%*vQ`3Y%9Ez1rc)M=*cr3leQ_L|BWjjF9kyQlv5yGC3~aQevE>3b zbO6%i0OC)CqE_0fgZq_|esRh5lP# z5(du!LhYe90!IrbmH}&WC~_WI=%bM6Mb0d$n$c(T+uXJ_{iJTn7Qj-(#`bhjzk1c` zbG_Na&#@J%dm1_f>H4kBA7ew}x|kJR&uu$yXvhflZ>+9OpEa*1`!r;0KP#Hg`bv=% zAA9Hyh}dERj{0vy+rOFbKLiRW^6uJq)sLTpxr6&=HT5Mv9$3&kxI6O}O#|gBaN7QC zX=b^X&;gc&)kPf0Fl?=~ShL#A%n`&{Ux)`-&mPZ0{0uDRDz{s@`5>WfoIo->Sz?e} zO{f_vBURQjP(_fLQ&B46w8`Lq7k~4lH3!pax3R5sURRJw(~$AP(M(4P{(0lB%F2{_ z=O;hh>TVNBKkW~65|>0eD+<_8hUmeOP6}d~rlk~EmGfvn4^*_uJhVz64RT~kg8`&H zW6hK}liZ;?fJP|aCw@<-YiGdZDXc&|WFw`0=v^o(=I7FNZ9|@!WA6e|0tZYa`3j_g ztW3vN78jHCva^3XyfH#$A89eeM)r#so&b8;o%XluUOW|>IVCgza$V8#O~nzk;Hh7d zKQ;=-R<{0@mOJD3JU2`~3sM$lcGV^B+Cr{=^|?}~%c7Qe$?9n*;Rvz>_g_$7&XRt7 zYIfWvtk0UIN{^HxO#PHWO0o1xFH!lcX48V&gKhdZ6#Ca`0obJ9ty*H3IJBAmN zBS3Ei4u|wlsT#NCU)v_q)D5!QrPG!i&}7oE{pN-{mQRHg@V#glO;s&yPxxzq-y`>6 zEoV#kX+htI@}ruj2L{rgZ~Z6j9?S$OwwQj8n3vS!;yDoEZRzUcQ+P;X6FB&Nd4TV? ztA$qbx)ERbd2=G=$e$uW%BYA(X}5h{tJ=cfE#Z4Cxa$a9*upe2FQ%yQ_0Z4Pr{&v; z1EymEs9p*4;Kly;;C&m%$$20kBM|8@n~PJK`qvpt+|Y8CTh45jFU09SvE;8oLxkC{ z-DjOI(u(e2)2DRsFz9V=;)d*sMgHD*xt@%GNzh@sqQWGN9GUwSQHSiz-+W81$ig{imUU_3Iv9VNU|`U9^4ZE`W$7! zr2!C~yQfGI#0NH%E@=fnB?MGrcrTbS zH+ar5spwG-#ur!#BoB>ZoWbI4&wFtIf#-QkP`4LcSteO3(ESedY?a1*0iET*_Tyrz z2MDtP-Yd8AE$S zk>Nn7d#19f3hLJA&} z{-SJ4o=SVR?nLK0zXhip=3J@Rm|4b&EpEZBJmJadM~Bh3KYgD3K3yB#SIk^LztFC2 zJe(8MqTi;9zpv&b^lT^G(Y%>*kybDI_vUN>rlU{mcU9yZi34?XBcMCqF zin+3%_wH?bq)~(*5pbwf!vSmPTWXe}IG+F}IEyG1$b4u4*9hcBqq1Ogp%N4SdKclI zXgrj_uHuibL?%%=O{NpafkpLlfutiZ{)Tq;sOrS~<(DR)eyoW6mhH^8I$Af+*ZbfHQ=zF5nrHC@L3gCq&VcP3R@_~T`MN_QD^}=H%7)zpk5-VCnr2({OeW@UCT!l`ZkL?mC5@1wB_)Wc2@BP*be8Yx z?1W`Ibz%VFr>PDDrp8E7#NZl@_3;=*wFHJ@!0AwX;kNeTXpyA&qwXh> z)AM;CHa-KhDXG?&jArXdkvhMAC}mKSZ4HV9ubu)!So@A_Eu)_ej4y1?IOC191DoB; zYkB#U1^RXoR2E6U2h<-7#vKZutq-Vj_4)SQmiTp6&1`H_$g^e&qXmBr)O%AsvkM*% zF%9e)yf1CAzEbtkG$$FsZR~+Ndn(>L2WGJ(G08d+e>Q)Gmv^X9ACmE z%9i24$#3tAGyqy6(r@!Qw`4y-dFYeK26&bF7HNtXh~?#KC}g7rlH~c!f|o7q{wIAP zfz5p?)?j+PD%WfAZb(6FE5PE-Q~U}p*D^}(nLE}kYY9Gz9oC=VuQ0Ws;HE2n68&sH zVW{5*Y({U8u7CmS_#aQ&+;KeG!LFHCIz1I(Qq_lMw7ck)7!}}}ux0$K-uQ1cS=obt z36n9&a%<)>n^&7$l*KKNPNRbM0@tX^{LS(t7y)g}2ope9^)+zU+_Dg}+U*@^;$oHf zO_b10&;Vs_yg+=6Sv#ZVzN;IAW~zC2zl->XFi3p(atDMq8TDURQ_k_6^ts`9h@9`C zU7fRiao{}-vY`0QU{e+ik|iZ@2?yB+>X3sWfn?AR90nXbFb=etnfvQcTF+z%xoyim z-(D#wlZxaFoygp0nRgrfxa2?l{O2D}x_9J1jf}H`mU~3z)ry{fytTDYrm$yZoTHl7 z@I!u*&poHaPa`RyyWZxK@0C3nxBX>R>mMoqoBD(Cg6`)ox|-JMK{*Z&xs0n7gjsiE>&$|@&8a6oxb}6){P-SeZ!=70 zJRWk^JdmFL-JMW={XOG+>4v8Ksshth)AG%Ls>z|Kbw^8~05sD=EY8A-Yycp$9ml0? zV_9dJ9>hCpl=jwrxw^b_J&tKFC{f;BADqAq4emgoK>1QW5E%?*TFjoW_N7mcQtXKYgPrx0rA-BU|OJQy8I;Rf~d zQMGgEZp*4UcbP@;=9x=a_y?Usa>S_Th(Q>e?1zWiXYlTTAEus-|Fo%~DkLl06_XCP zZ$H=4If+(G7fl0ci-t>+ANJ(DUbQv{jJ>K1NDh8^T)ctAfjuzb_}HxpY%r9;Js0Pd z0j-+*KXF|-$*0U9@MdbhJ95Oa{+jC~u6&F|M!i-=avk;_C(BPX;F;R}<^wU-ZQVdu z$NEFi>3Xz`^>~R=!|tv6R<5+FwH>_q*M{ds-dSs$6hoBunusk51SO|(yf4acthu9!Ep#~$SWcoD8Eao70 znHB-1UWSq5bU(xnREyM(+*WkpPyDo z%Wbu&6@9~+n1{NHy}^gEUBViB2W2;wRkK}#2am^Tq^dafZ;e&5wP2AKi(;S!LbW>oH>Qd`%iY^MQwV;Q59N516vmDbJYTtt)RLeCv*A8Rokg&e{TD3 zFRa(Mm8-Gp=v&i|AhdCkm@X~ftFbBz^tPa==!f@}Kdz?!$J`%YaN*;-3&fNUt;T*| zSJaVVL$m zs_0n{Fs#9QrrWm1-o$zvke_Cr ztz?=hQCy%uL24bLcOrAOf3$KpkeEN0?(^LAG{eJbdI<8Y*ooMmhSG7U&BKjOM&#P@ zA;CzaA(AF9K^n+=tS&yAz5x{CY@j16?WexeL?MRM!Q~@#tXEB!lg;?))U@@(5%j~Nvl_{S$8GJkCSmv z3QDbf)tvO(l=m#BLV%KjS3SioMz$u1-{`2Z$c#`W)_^R}#RH2;F(t*Jx`Bwr_w8&E zq2&lFMt5`sTdCQHa!*J>da!!b1%jWxL%mmkglWjCXY`CB=$BixV6xieG2&bu$Cv0( zT6?-?>wa;5Z5gteO8EkGw6F9`v$0cR$w=9*??g&yh>YB|0-(cKQ!Ib?tO3yjb%4m9 z0$SWDQ0Bu{G3*Y9?s3u4b{hZ6cc4Hx`8jRi89|J1R>k2PF2sP#5n+^E6`TD2;t zK;{`p(wM8Z+JTDw(VL=4%wMUlFvw2qQ(Pa4&V=IHk>SQc(DrLidV#4oHQHoqG zJk*nqck_zBPR%7z6eNO_{tV_pV}H7lX3)FFzBcl3en%XZx$F{;@5-5fxOyE^u-%;K zj^~m{)1McoTCXj&m7G4h(Cb@TU)hXqYman{%5tdfw?n%h%oX)b!aEM61XN;hU(N`j z^|4QR?nt^1f2HWCi=>|Nx@YYB?PRe!;ermPOlswkDF51DM0yv~DDNt0Q2kJhiAY?| zk&hnhgQOr@Bzh(Ias+U{YKJ{y0e-UE;KDbzq_;2slX|Bh&a}2Ni>?cVgm}hQm@hJ5 zxw_k`bvO`~(0H?7!g;5$*KRJlHVYgMlf3FGRiEwpCU4hv*No=EU_931rMBI4bX_oT zrl>t~i5&6xtT%KwYl3jSm1w|HO~Z8oK{sKnWMN>(Ma*AUt+w$-!zn?`QCjFe8>|1@ z5!ePeboj~Gtp*td3>QzsDngl8h6_6kR9t9WS4hj-bdqh2b}E|ukFLjJ2y&k18-c>n z@=Rw^s73)jV(UfBO|=&^xy!ESjwzk!t9h%rm1GKXC{uQNFbi#o>I1t;k`30uYHBdA z*c|mq%{q{qW>?gBfnUE6dWU+rsox0gwD#<}!wHLSOxdPia|+j7*T##g{)NZg)V<)^ z=D~vOT<-@_b*cyS*+1?&16h9Tjp$yis_qBkqzd?Hta(L`^Ai*f{~J2iZ~}7uPr7C- zWcO57nDfLzTHD84!-bc|C~bz_7F%Omuclm_0hgS*$35c5VDl{(#qKs6Q6pU`sJyrd zBZGEBY)}OUd^z3jVX1vIgKf)mEL>xu#$*A`XQrN?9G;E)gr8%Fci$yPgt6;c%zGzD zU`SmZa(1QKoG-7v0Y_n}n%;=rMv!o;tH6ZV{7vtLj9^tk9J`p72ip6=t=E$rneBe? zxzwyzYQv5S)Dy!W&_^z_JKA2TMC_ALi3+pwg;K7sQH)buQ9=k zMpC%%_Ok+&wmUh;7H;+Mvv4ws);+tsiRE0Ozn#I-LP!AjqfiC+pPo3HmRlcXv(Rrd zPG{T602%${<{A04z+;Q~+00EKt&h`s?^n*a3j%4)3sS}YfCr7o_1pzXP_)UgMVx@F z$Gj6VhP{5jVP)vsV$xv?E*|~t zA}kJTUyfcr-RfB&b8{xAoGf~GUa!U}5O#QBNREeD!c|?we1AUjf(ynP7pGZ1YE3P% z+b~A=a{tNlwhQ`pbAtWki7#xkRFA`4f=0$hFF15pA1-W;&7l%)h&$j1$(%42=PFOn z(c`;6<2>ytv-VF9AlxzwTSrC{qHGf0G0clH{zDJF*Vb}WK1lk;Cu7x1iB&oRzOQYPixUZ}Di5{lflRbmR?xv;Tf6 zr`6?%(3_MI? zC>LZ>+7(<*97PG9R$C^Wy?6=z2ThuwYwmBKF+@HgyzUZAvCL@TtCQ*oX&%vRTx3SnG>W%4vGl(jP98jEIsOvyCULALu;SRk%!&bz$sdMJ~a z(AYYwD^dlv^jdPLv*goB6HKK!ce)l16p!HScK_*JrP)yDApiU5-yh14dIl{56jVeG zF7I%>I7Y%Q`bB@MAL`XTXhx^dLb9&KVc})fQ}&e#mBMz$(R%9i8ljR^#fL)c%zBQI zgD!&=x*g8hN0o#&;n`J2_M6pido@6n;-WTmE%WwY{OhvC0&%#KyMD4g5P}S2>)hK( z8cn<7OYWmjMyylo+`HOKOqbp?3H`063bDo%7T_-8=K_fQpW9$a1t)RrzBAiJpE3=u zM#Zj$ilyBX5r|6xH{CJq)F!P6GY`?&@p;1TYkzIE2*(A#E4s-wu_I+!ebu%CS+TK3 zSE<&yiYNU>Y6gtmRTDDSIIY4>vPas47QZm6bmtxzu1LuHaHdf{u+?Dvg<6>Sfz>$B ze<*Urr%}1{NCk(5K#(h);%5Vo(^XvPXS@LCJqNg5?>BQ%1)O*9hXr$!Z@Bk@(7Kz| zqV)0Q&6%#H>#2U?+HMuKA?ykEVagcwE$7r4Y|yySj+5)6+!;C5U0I}P?mby-bHPFz zP_AbkZS%M~b#Vp_ELzDPa(|qWGRy72Z`3>J>4&>=sk~;eEl4%g8pX2LZI+BXx1K0A z31%LzJM4*S=0BYA^maRZZkc*gq;cIxok`68@#GNWXK7e7{Z<2fD8a0@aUq_H=SZ)} z>vabq-{VD1sVJ{-BPYY9wBGhmsq$FggQLXxw#!`IF|ljaUy_wq#+4j_*&UUKh;B(2 zqZW3BF9o^PR*XN5)LC{_l9Z-#q(DHvp4R#v^f(q&EttU*pH!QtfdO7Ds z%T^~94pLyn$3&-EH_yAjUW|>$FU<$&Z=kzfOjE<=e>}+16?PT&-T=Q>0FP($+PfI>s+~$5IXciDlG(Ho0hj zw>m?n^NckDC^kj+%7*L4Sqw~QgjX|yXD0E5@V59?cr{4lc3k zAOetM$~8N7=}7ACn-rNy6(} zz-{-vg7<^}mOr~5ZhD;704j+=kPphC&k7vto$hd^9ZaWNyWpCdTi4~fh4Ti{JM9)( zwYWG8u;IqKC%CXCMp7(EyH6yibJ5EsWnx|Wg`6-nVd>1npYt^dbpfEmtA=D`d+n3R#kG?mz>ZqK4HMB2f zI=Q|TR)5UDrWm)dtm5Wc?oJ)!k=m|zq^FWhu^ukL7i#w2N~jf-%#)r%xaEH^t+ffh zUr5`kYs!OEN*q|cxAc-I-Ja>=;X?oj~!b*l{>i{zvSzY z9%s9p;J!UbIV_N53hwli*U0#!8fNX!OKso9?{yl+Z?B=(WLND)77rH2O;-SCx5w~C z=ctV+*A?s!CzXc?$M*XvQoWAWvA-BXIMmFnJxr7Kny9o)HHPgOttV9e<<{6CmKr^F zhsAN%THI|`Laa4&)$_RKc`G_x^*`F~gxF45wq;ATn+#&-W48L#oUwB7H)bJ}yWX6m zZJbaqUU&|*xi$QZ))Jc^yc^4gCnV@{4@gMPew!n%s;<7HmJ*EtRp+N%-q!3c{_!6z zC;>$=G*S`$Q0h%wP3<7%z|(sIv#aiz>ycxMG&R^t3^ZelQ5{=-B5Fkm3_k;Yu_W(^{tmw(mgjY%oTGDj?6EEaU17d3*#B*8V78v@!R zqt!M&?%gfiIRzFTj6!K!`pZooGG*8+BXJw~7;!S)UZlYAcCl6O^&%_j}@T>;4p6&|ri9?u+~CSBn!zl9xtOCd*TUljGNxmyzXP7Dh0!$d{}8JAPd6m2nmeIgN$OGKeCy zzt5p3YW%5e3j8z~+*|*A)ov2!Avf2Q?{ zCk3yq9qzvAO0hVp9Y%~?mKWeiW7;txIzb|N3q#Jjy_Y8oFMJix%y zxgcf?6OrB?cD!XMv|-dvP1R0yfnoAPKPBte`ij)SzGAl10*9L)3cZ9h9hT^IsT$wl zKlzMR=UkTDwY;1|jHGLJcYn}V8ht$Z|E?nNBuUY?YYd&j@7q!?r`bHxICrr}orp+V z4mEx!0BQgXT>CjTLuxt=s{GBtu`S@6Xp{TmbN4^X2P535ARR4bj}EYL^6%cMlL3m& zt~iX62+~PGj!4*c^%Q{ULEU#GG#jc1mzOi%E0d7B{R!L{ba#}PDZZmfev>N#&&O`t zYAc82O{F)DmIImwDtIDgA6*|63tWlSC(@o8 zkGiU%1*Usn@lN%RZ6Pnw;lROU2NM+Ua;Dj=dS!<~WxzW!^b6$2moEa4PcA=rw`*SC zLy5R|6Hz6EAT0{uK5{%rW34_}`n-$o@7)D2S3?;tHTjkKoV=q`enHEEz_2t0r6fn3 zd@xl^LGwflY%L2ZE5)hK% zz>Uf3Od#LK!kYkl?tn zM^Xg6zFR|&yKjLRd1|u(kKL(&gWv7y!Ojvv@Zc20Om!u1)6!^klC@YsSi$d?Kg#!k z0+8P)!AEmm$oy_bjHLoHm!C-cc|S9R5B#h&N$WWYXx#+T?1;3ZT}i+N7YdR#E*fH_ zCE+|@>Pt4-AG5et59Pt8pVf#a;NArjAM@CrU=IXBG7laT>R+G5pKF02mBi!*x|xot z_bV9dHZ$$&#UGgV$Of*gAMSm&+y$eQI(A4~Zyayu`Y>7|Q0**@hI{W5_lE|>CWfS1 z;No3^avCF`8v$|tpHnzb0vS2F{~-r&WCX+kdEK=t@oxYHB6bKBAWtIjI`Q3?JL-Tk zWHh_;qLlz9V|3xeOJBlRaJKB%@aQB-|Aj>&VkZQr{byJ{IfU@9Xu4F`z)ni1b ze0!wtLq(nh=_``G4hXQJp(@J<-19fnF5c$ACCV{mQ>7PO&{3Ds_E)z7i!9`)eFEM^ z-i|$AMZ#r#kOPfsid zx^nI3bA;nIkq2CAeNOR71K%q%7Z@0P*7(PZ0mjnQiQhUs3|I- zZ72}xP12FZ-TiQx;wR(jfT`iE+%1YTi@qTzh5R8%m3E%M!{eWhAmdL24gvwCZzsCUN#N>MfOr+}K}iNNDiWjKShHqT3W*tI}%R`h-8HMDKL z)H&DzQnL9roY3zx%)7^dBLypcKFt?+hVccFcCFkEsTzX>_3tth3NHLZoGtr7w@e=8 zeT#C7_QU3GXE`JV-PQL*+L%c#y%FBDQO}M+6k@`oQw;`&ehwDT@EG~>MBq4BLFHGZ zIOS&(Nm8x4QzUH{H*hAZ)UoWk1QTdXda#fD&+mLZBtbq!EX!m~!$*qzHsLj*pj^S8 zs7j(JAH36#93ezeK(YmPYR)PLl-|_m;z+Ye&PZ4$2V+;4v9sS#6&2^Wn5(n}>pS^}PT1_>x&=kZp$2mMJ<&v_;q>Idd~Xg8n|07ZlO<`rvcvIg#j zEQJ){1z6m=Lf!KvZwElRD8^lyKHtDsM=I2+hDl|!kKX$^b*L=~(n_<5$xH+EC`Y(i z*DrHvkv-h32|(&M!yx-WbZt3n^AJJu=mEVCYTAo{FEY`vQ@9m^sXirB@B3w{*ECqQ znr?8jjjSN|>O9!F!PRwVnigH4U=J?i$(?A#PT+8M zsQ!$iiLVlq6GqGFux#wvQ`usphrASD4_sJWj!024uytiey_=Pu2hSOwWyC&w*UW7x zx2K1{oWaZY7o4Vy5t3%4-vMzv4Xi5j{zVtcu*RBJP%wXyoTr%BgSMR@idJ@(K! z{^|`)7LD9IyzH`NU+=)n)-dvXHP_N?E@mRhXPl^AwBHS2ZuA{xWIb~kfTRBykf3~CFxBy>s*sC18p5k-wVI6gGHj)_ma7u@L z^h%Ov%coLpU0LePfaNGhUt!>RO)jByU}_4Bj?&_@BJVWT#8t5D8BcAhBzJoA&#X%^1~jsk~f2oe+l7FR~X z3S<_h{boY_Glb*(TnKoSRXrTxvKocorK-03;63MhI=T!kD=WP-Y_?RXI%pukQq;KH znw#_1^h0?Kr}X}~D@wOMHM%R`hf}YNpDm{jH-Y-1lofpY4SJy2l+pNH<^)`Qu76Qg z_PobDes9-C8Z6C-sueog=tW^yEyB!wA`7$`krvVG>5>~$#pRxjgCf>Pmyan)aE^=dHT6rpln#Wc(~4QCAI-!{wl(MCaO=7E!uDLNpfl9K(l)=DWhSbiwtc0RuTI^+~x3m61FMXx!J-`OgUDujrd}A zDk%?k-F<0i0=}Uu(NSmL5I?{FKHna>Ea|uIY`PL-pu@Pd39k*X$*_o}Do=LZl@8R) zHNQckB}|7)o-7If)X%BN#wM!gIpM~_?sNE;(}=3rbLOejXNJ^bV6RY*U!Nh;etk#5 zfShJg31|$FueSN=TcMZ5ZHDWa_=zI_s+ASvFIBTp6*Ve(O~z zG zYqgI^aIkU$f{M9;d`}x#!8p(nFWd->MVt2(LO_>>a`3M*QrGt|Ji2i+@TZsny6HI$ zJf~iQG=h~4JaJ}nWCGJf~qf{g(-bZS>_fIJ{|Hoc}@v4|X^X!k}#CC5b zh0ATw0Kk0N&1Z$Uzqqttjwf`y)92IkD?ErZ0e2Ov8|z$9hV2}dSVsvLb8+vf+QwdG z-q*y_)UDJS?7T#e zCDp1|*&Q79?~RYf7jlbf*Z%ePYiX-XQ0Y}&F5zx9U)Om(?aj6^uPL-+&1=8>Gl(N> zJ>hxe8~nSX3+mC#$jQ$z0hxoAjZfzBb$7zV4bYzI(zG$^mVT-#kWwGTp-;g&4TXyN zl}Hoe(&d4%m#z?`&NbxI6X042xGZ}r$DcpYF1tQ|wK@1JOiZ-XH=oiM7z%t4M%7_S z>HQY`QZYDl-taeljUj`P|IlBFAQ=pc?*nZ1Gh)?ZTBJPkH@)@;>Vp;E#LEar(#%)4 z7Cqm5@u_)rAvu!Qv|Q^z;>b%P-l%jbkW9NF%FCg4K<5;@9+3D7=sd>BdQiF>Ce`FJ z;=J4M7~DFA^E=oamcf$G-0>Hx?%x_34Y1jKukO5G1tvPG_eh3Ri*VHK-w;E0reIub zH!_vnEYiuo#ijjuaAe^^5wjzuTm`nS1dl$$%-|5`<-*yYJz37fFl>cZ4_3FUaenCa ziL^mgu2zhy6DeiPsxjfTV1#N3S>XL}p7_(u?x?U`#qKC-Kh~JCymQaHNpR9%z3AMG zi&DI0ScMzvGf%zV{uw*WTh#{dV3u)}Xuf(~=zz-u`m-DnPl07brlq~h&+&eFqFEb4 zeEo0TSFEg;L>Z@jt>&kx2Tdb|B%jj|8J&>ok`v2)vofXy>)4yP?8+Cd3zIpi!~W1} zwXhR$-*m#^HCXTKtZ8uxf_k4#p>;JrnvEiuxwcGVtL7tBU!rk)ZfdpXF-(+~5G zrn@;kD*)+pvfshLPdcdtYizy0qBvf@^id#{&hN0@BqK{r>X9#q{H8DA6uRrv&Kj$Z z`_~t4&nH@xXB13TUL$T?M-BD^EpY)P~uWK=m>#l3&y^JwPL3b4r+GwPLrfd zmFcq8uwiVULB@KHf6Z>Ue-L(Ef{WJxJ{R3rTZt?X+d9fOmqXS@!n19{@nsk2uCA1r zJ@k6}+(v+E(TZpwU<(F(s8qnxXL|O4mpzad@$#a!G%qeTyjY)N=L(IL%7ouQY+w7x z8^6t`@tQJ!N&IT72_KVDUdc)ksz!l2KBwCGWKW-fG`po7UZp}p&x1cvsrV0};>EC2 zT{#^$6405m3gefTP+Tdeh1vYX54T<`HVCjC8A;A$RT(DXh3$KnOb6(7Rn*x%_vuZS zS3Tda*Z)_~vHn;SmI6UmXrUOCuI=30VhX?B?3o1<$DGr!ujux(9ywnDM?afEi;4~I z2fjCyo<7piRF}LsE{FSCXohuOsw6HO6J;seR$Ic2vMX{1j$j=5^WsRgOe&9poIaIK zDTRCqu-RMA8Jdjolihk3k~j+51BVVBdHHzlI~D{r6L#z*gGQb{UKGL1(ltsknn+8D zd@ImPn_tHfu~Qy&vl?Uua{0lPV|ERBc`4v{JS;P z-DgO@JC1Ofb*Z7}A$2ggz5#=VYQ@GorU3rA!e7QyqK&Mf7w?ubN%-g z-Jb}BEN`cdQhvMbA!*JKx4PEKq|h8Uwl3QFsq zZ{j3q6x9idh$CY;AHoFc!_*)^!wjs~v)B5FwY&CG5BHp}D~-ES~OOlb#m!6;vza6faFtz{MD5T&w!C znda5_iC#2%KG^SrP%%2+d)R>E=O&xpg(~3*6zEvEV$>{cqAl1MHC* zSXrN*PanJx<)G&}s*^qajf-=3T}9Fqb+?3+*1D1V@*ciXEzw<@v7kndP_-U(Dv~h$ znp6vyBzS$?n^4f_>U?#~fYDeSKGhd~`|J#R65O&6NR@h@)DNm}E;vl7SXR-`wbaQS z4GOfqcmlj!7~oaFXeBW_1@h*;q>2|by93Pd`G9gIV)7_ej;F5Lrk_VlOm9f7Z2}hJsd}&oXUlJSQbxk5&Ua48q8*R*| zXpfwJ>xEiV=0DReP-$LZZbZheugASst_v4VCOwDE8{HkAX;5fAo#Vw4MZ#$^>>O;2 z-fti^QNn(C`P_r>IR^E)_6tT|wpzSCfIB8<%{D1JYE?O!RO^GvV7M3F%B1=VZ%xmw z%^kyLpaDLsIo1GOVXuzN#T~k3ZW;1BZkre6cRDA?6@U4yW_kUp3%RFe;CRVN}@$jg1ob@ceC4@pi}SAws42K&+2joGy82t z?i_r^V^2vr){Piin6;F6_72%qeq2|P=FF`>u(>#h|9a(pNc&Nf+30)W2FaY$$>>P~ zzujV|mdXX)E(@XoN5V#MiP7e*qz@jNLLq$ouOGt4G2fn=u4}5RtFL}nzUvIJcpuoS zoE55=PMaLQVfY9=YRq%|b;_FkyzVM|b&ogACb-B7*wwkfX{INS{TVFz?q9xn1xS4D z)81!>9mc#5Fj+v)Q$Jj)baFiFW3Fg8xteKyz5Y3a;SCvjtHIA1gkvYy&URH}5NB~} znR-kra{~VqwtUEhI$UX?Hhcd`;Dww@r91n=y3S`GJ$nmjioN9P#Xn?QGQ{xSr7}&HFCz!E2jC{FbsbVu7rG7)7a9(1b7?2*S}vhl&3y%`j$qL@@(ThVO^}uC zy$?V(r6DgvIgXE8{#@=~oQ{7}B!V882FS8i8F*M=OFS9eAqD@o&U0u2^&*6SZK=$= z%inY%xk26?@}a`2?^A2X+w-?XTv+O{6*oNE`IxWkU+;SYrFRs!Z_`Gh#RzFGj_y!; zbwB&jP*kpMopvSLl$HR+dAa6~3yICF%!7CTtX+Ab%(Uiy@V;#Yx#EjKZz&5H#an2z zYSc#Xww6x_PDI;ori@yBajoryh79F|`(Hs#FE)lDcxxZR^LFu-!E7wDwXIi6v&eRe z18kuCGD#-_PXgelr;|75Zxs2&iYmu*(fMf=HyjJWKuuT^1x{H45;e9)QWLuC{M z$}bMP?r=54QTtak`%FKt@a$94H5@T}bk+*1BI)FNes|3itFK-gG62)C()=`A5e5)ly<_)rb1Gib>u4^vQAnAWcCvOw$h6b^5V{C229Cd;Bp{b0qrUqXaZ%yH3wW~~YuKp0?zQ6(Z3IX`Y!0WMn3#|N-N?ai zJTV47$Qn?-;5%+V9+=L703dnB`M|3{Ra5;|8GRzK420Z9@2rAmYrnq@;hrLb?1S8b zEwNn^2uT9aTtplZC%@aMKeV?WQX`E5$VNIZ2jIc82P{-}A4c=zrSA`*s*Gm76316aSI z0c8aPvYyNO5fCRK{}~$oouqfJ0)A3$7J=k}AYN(L168m|fM(m8wex-(7JSkjY=pZ; z(xvZSl(QT?55TvSCA9xHLil+7Vyr{tC2C+5lFA|=EFBKk%&uGdi^1{tbov1o4ahsc zgq{IdvuJ251wfSn)_ikX7k-C20P1+05Yi;57_1shy`G1#B_Y3g5pWr#8_c-mHuxBW zCG{^(oWJpZ0Re$a3Sq0R(|}aSAp3xWw=mj(gF6AyB#42!lE2dm2(dztC*PjC-18nR zMG0C?#}QEtR^a)V`k#B%0-%Y1ru*->Bt-!>gZ;Ken#L3$VFbj?h>_fY8?pg}8A;ri zf2Tj1Mb$^3Sg(V*?`3 zmjDS+fadDI3kDJ(H7MM>H+v#9P`^xa{JE6-FN0Mm{VWOd9Bp!>hvWGsqcEX6! zt83tN*a0!m0d^7WKZt>R$KX**#6>`AjR&4Q}-Ug<>UkK-H3w} z)$fGxeWB-{K?KuYHs@v!oo<*!3xQ6Z42CYPg`-?@(XzAISt)H;QyVToQFLI(u5OI~ zl7{d%Y4vyp3YvX+Uo4d`KEGJFw1I*bd}IYPy%w=5M-N1hkGTstVIUwYf$|1c3$ujNW;jR2**l;ZM+E-9Gkd%^8egX9WbW;TTm}$ zn{s?RxVf=HfHl(iyqur!;@VnDd4w<^fom_oE^!dQz3>lm_;lq_eEp}JqX(n>fyMrZ zkvF`zW_O$+NRMpT8IZFD2p*%`1-RpPr11&036=p0eNLO-%zep@`qhkT|*#c};s zTG&S;YH6BX+0|EWO7*RgTv@`1Ow~l4toI-xhUQudz}(C1-RQ=K-vG6>Vb?kK|cY6uGuX*R5)GyKh|B~BNC;;xU?u;GMYdDeHV#6%r z)aSWF5SOgF8Hda~FZlQm8a3?@FZBRu*70O4-j{$T?EjPQZoD9QtRVtU+#8?=Ny$(H zQvT*%l|Jo@w_4IZl$oGRI89YmRj-gA*tN7iaC9b%J|0#6od(XELrP7x2k{l>zL5+re!4I~Vy;y6Z25Gf6Zt=1IVx z`hOn#Go&v|o%*=`{%ww~qL+%IeMpy2%n(jc|Mobosu6KNy(5OZHE#JN5#(Yhh8`!;2g5$oZCMRV)59zufGTXH9x}d$g_K;Zip(ae5tF%nUjw zZi|FGN)H?^^Iri$|GxF#QG^nKBY~9Voa&{22!iht;om|2$?^YU@-HR<|MJf7!WgI} z|H_l^qV)$j|6=k3iXRXEV)6ru9}oXx@&k$=5C3BF1BxFH|6=k3iXRXEuP`~1D>FRU Vl>O#+@&x!(x~=v{{;fxU{x2rBSbhKi literal 0 HcmV?d00001 diff --git a/tests/block_test.go b/tests/block_test.go deleted file mode 100644 index 5d6669e9d4..0000000000 --- a/tests/block_test.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package tests - -import ( - "math/rand" - "runtime" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/rawdb" -) - -func TestBlockchain(t *testing.T) { - bt := new(testMatcher) - // General state tests are 'exported' as blockchain tests, but we can run them natively. - // For speedier CI-runs, the line below can be uncommented, so those are skipped. - // For now, in hardfork-times (Berlin), we run the tests both as StateTests and - // as blockchain tests, since the latter also covers things like receipt root - // bt.skipLoad(`^GeneralStateTests/`) - - // Skip random failures due to selfish mining test - bt.skipLoad(`.*bcForgedTest/bcForkUncle\.json`) - - // Slow tests - bt.slow(`.*bcExploitTest/DelegateCallSpam.json`) - bt.slow(`.*bcExploitTest/ShanghaiLove.json`) - bt.slow(`.*bcExploitTest/SuicideIssue.json`) - bt.slow(`.*/bcForkStressTest/`) - bt.slow(`.*/bcGasPricerTest/RPC_API_Test.json`) - bt.slow(`.*/bcWalletTest/`) - - // Very slow test - bt.skipLoad(`^GeneralStateTests/VMTests/vmPerformance/.*`) - bt.skipLoad(`.*/stTimeConsuming/.*`) - // test takes a lot for time and goes easily OOM because of sha3 calculation on a huge range, - // using 4.6 TGas - bt.skipLoad(`.*randomStatetest94.json.*`) - - bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { - if runtime.GOARCH == "386" && runtime.GOOS == "windows" && rand.Int63()%2 == 0 { - t.Skip("test (randomly) skipped on 32-bit windows") - } - execBlockTest(t, bt, test) - }) - // There is also a LegacyTests folder, containing blockchain tests generated - // prior to Istanbul. However, they are all derived from GeneralStateTests, - // which run natively, so there's no reason to run them here. -} - -// TestExecutionSpecBlocktests runs the test fixtures from execution-spec-tests. -func TestExecutionSpecBlocktests(t *testing.T) { - if !common.FileExist(executionSpecBlockchainTestDir) { - t.Skipf("directory %s does not exist", executionSpecBlockchainTestDir) - } - bt := new(testMatcher) - - bt.walk(t, executionSpecBlockchainTestDir, func(t *testing.T, name string, test *BlockTest) { - execBlockTest(t, bt, test) - }) -} - -func execBlockTest(t *testing.T, bt *testMatcher, test *BlockTest) { - if err := bt.checkFailure(t, test.Run(false, rawdb.HashScheme, nil, nil)); err != nil { - t.Errorf("test in hash mode without snapshotter failed: %v", err) - return - } - if err := bt.checkFailure(t, test.Run(true, rawdb.HashScheme, nil, nil)); err != nil { - t.Errorf("test in hash mode with snapshotter failed: %v", err) - return - } - if err := bt.checkFailure(t, test.Run(false, rawdb.PathScheme, nil, nil)); err != nil { - t.Errorf("test in path mode without snapshotter failed: %v", err) - return - } - if err := bt.checkFailure(t, test.Run(true, rawdb.PathScheme, nil, nil)); err != nil { - t.Errorf("test in path mode with snapshotter failed: %v", err) - return - } -} diff --git a/tests/difficulty_test.go b/tests/difficulty_test.go deleted file mode 100644 index 03e14df7c4..0000000000 --- a/tests/difficulty_test.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package tests - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/params" -) - -var ( - mainnetChainConfig = params.ChainConfig{ - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(1150000), - DAOForkBlock: big.NewInt(1920000), - DAOForkSupport: true, - EIP150Block: big.NewInt(2463000), - EIP155Block: big.NewInt(2675000), - EIP158Block: big.NewInt(2675000), - ByzantiumBlock: big.NewInt(4370000), - } - - ropstenChainConfig = params.ChainConfig{ - ChainID: big.NewInt(3), - HomesteadBlock: big.NewInt(0), - DAOForkBlock: nil, - DAOForkSupport: true, - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(10), - EIP158Block: big.NewInt(10), - ByzantiumBlock: big.NewInt(1_700_000), - ConstantinopleBlock: big.NewInt(4_230_000), - PetersburgBlock: big.NewInt(4_939_394), - IstanbulBlock: big.NewInt(6_485_846), - MuirGlacierBlock: big.NewInt(7_117_117), - BerlinBlock: big.NewInt(9_812_189), - LondonBlock: big.NewInt(10_499_401), - TerminalTotalDifficulty: new(big.Int).SetUint64(50_000_000_000_000_000), - TerminalTotalDifficultyPassed: true, - } -) - -func TestDifficulty(t *testing.T) { - t.Parallel() - - dt := new(testMatcher) - // Not difficulty-tests - dt.skipLoad("hexencodetest.*") - dt.skipLoad("crypto.*") - dt.skipLoad("blockgenesistest\\.json") - dt.skipLoad("genesishashestest\\.json") - dt.skipLoad("keyaddrtest\\.json") - dt.skipLoad("txtest\\.json") - - // files are 2 years old, contains strange values - dt.skipLoad("difficultyCustomHomestead\\.json") - - dt.config("Ropsten", ropstenChainConfig) - dt.config("Frontier", params.ChainConfig{}) - - dt.config("Homestead", params.ChainConfig{ - HomesteadBlock: big.NewInt(0), - }) - - dt.config("Byzantium", params.ChainConfig{ - ByzantiumBlock: big.NewInt(0), - }) - - dt.config("Frontier", ropstenChainConfig) - dt.config("MainNetwork", mainnetChainConfig) - dt.config("CustomMainNetwork", mainnetChainConfig) - dt.config("Constantinople", params.ChainConfig{ - ConstantinopleBlock: big.NewInt(0), - }) - dt.config("EIP2384", params.ChainConfig{ - MuirGlacierBlock: big.NewInt(0), - }) - dt.config("EIP4345", params.ChainConfig{ - ArrowGlacierBlock: big.NewInt(0), - }) - dt.config("EIP5133", params.ChainConfig{ - GrayGlacierBlock: big.NewInt(0), - }) - dt.config("difficulty.json", mainnetChainConfig) - - dt.walk(t, difficultyTestDir, func(t *testing.T, name string, test *DifficultyTest) { - cfg := dt.findConfig(t) - if test.ParentDifficulty.Cmp(params.MinimumDifficulty) < 0 { - t.Skip("difficulty below minimum") - return - } - if err := dt.checkFailure(t, test.Run(cfg)); err != nil { - t.Error(err) - } - }) -}