From ee322769c92bd9b422266679be90cde0f3680791 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jun 2023 17:01:47 +0200 Subject: [PATCH 01/15] feat: add default max readers 32000 (#3465) --- crates/storage/db/src/implementation/mdbx/mod.rs | 14 +++++++++----- crates/storage/db/src/lib.rs | 2 +- crates/storage/libmdbx-rs/src/environment.rs | 14 +++++++++++--- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 616029acd5745..219a4336a6c56 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -11,11 +11,16 @@ use reth_libmdbx::{ SyncMode, RO, RW, }; use std::{ops::Deref, path::Path}; +use tx::Tx; pub mod cursor; - pub mod tx; -use tx::Tx; + +const GIGABYTE: usize = 1024 * 1024 * 1024; +const TERABYTE: usize = GIGABYTE * 1024; + +/// MDBX allows up to 32767 readers (`MDBX_READERS_LIMIT`), but we limit it to slightly below that +const DEFAULT_MAX_READERS: u64 = 32_000; /// Environment used when opening a MDBX environment. RO/RW. #[derive(Debug)] @@ -52,9 +57,6 @@ impl Database for Env { } } -const GIGABYTE: usize = 1024 * 1024 * 1024; -const TERABYTE: usize = GIGABYTE * 1024; - impl Env { /// Opens the database at the specified path with the given `EnvKind`. /// @@ -85,6 +87,8 @@ impl Env { coalesce: true, ..Default::default() }) + // configure more readers + .set_max_readers(DEFAULT_MAX_READERS) .open(path) .map_err(|e| DatabaseError::FailedToOpen(e.into()))?, }; diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index c2b5d539dcb79..8ce4973f4ab83 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -114,7 +114,7 @@ pub fn init_db>(path: P) -> eyre::Result } #[cfg(feature = "mdbx")] { - let db = Env::::open(rpath, EnvKind::RW)?; + let db = DatabaseEnv::open(rpath, EnvKind::RW)?; db.create_tables()?; Ok(db) } diff --git a/crates/storage/libmdbx-rs/src/environment.rs b/crates/storage/libmdbx-rs/src/environment.rs index abb3457c0bd14..b37285a4d622d 100644 --- a/crates/storage/libmdbx-rs/src/environment.rs +++ b/crates/storage/libmdbx-rs/src/environment.rs @@ -6,7 +6,6 @@ use crate::{ Mode, Transaction, TransactionKind, }; use byteorder::{ByteOrder, NativeEndian}; -use libc::c_uint; use mem::size_of; use std::{ ffi::CString, @@ -376,7 +375,7 @@ where E: EnvironmentKind, { flags: EnvironmentFlags, - max_readers: Option, + max_readers: Option, max_dbs: Option, rp_augment_limit: Option, loose_limit: Option, @@ -453,6 +452,15 @@ where } } + // set max readers if specified + if let Some(max_readers) = self.max_readers { + mdbx_result(ffi::mdbx_env_set_option( + env, + ffi::MDBX_opt_max_readers, + max_readers, + ))?; + } + #[cfg(unix)] fn path_to_bytes>(path: P) -> Vec { use std::os::unix::ffi::OsStrExt; @@ -544,7 +552,7 @@ where /// This defines the number of slots in the lock table that is used to track readers in the /// the environment. The default is 126. Starting a read-only transaction normally ties a lock /// table slot to the [Transaction] object until it or the [Environment] object is destroyed. - pub fn set_max_readers(&mut self, max_readers: c_uint) -> &mut Self { + pub fn set_max_readers(&mut self, max_readers: u64) -> &mut Self { self.max_readers = Some(max_readers); self } From d3465e2e32a2332bc843281d7704eff398ebe6da Mon Sep 17 00:00:00 2001 From: Alexey Shekhirin Date: Thu, 29 Jun 2023 16:04:43 +0100 Subject: [PATCH 02/15] feat(stages, storage): pruning configuration (#3341) Co-authored-by: Roman Krasiuk --- Cargo.lock | 70 ++++++++++++------- crates/config/Cargo.toml | 12 ++-- crates/config/src/config.rs | 41 +++++++++++ crates/primitives/Cargo.toml | 3 +- crates/primitives/src/lib.rs | 2 + crates/primitives/src/prune/checkpoint.rs | 13 ++++ crates/primitives/src/prune/mod.rs | 5 ++ crates/primitives/src/prune/mode.rs | 56 +++++++++++++++ crates/stages/Cargo.toml | 6 ++ crates/stages/src/stage.rs | 30 +++++++- .../storage/db/src/tables/codecs/compact.rs | 3 +- crates/storage/db/src/tables/models/mod.rs | 1 + 12 files changed, 210 insertions(+), 32 deletions(-) create mode 100644 crates/primitives/src/prune/checkpoint.rs create mode 100644 crates/primitives/src/prune/mod.rs create mode 100644 crates/primitives/src/prune/mode.rs diff --git a/Cargo.lock b/Cargo.lock index 06036c4a16af3..ce0c9737001f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -513,7 +513,7 @@ dependencies = [ "bitflags 2.3.2", "boa_interner", "boa_macros", - "indexmap", + "indexmap 1.9.3", "num-bigint", "rustc-hash", ] @@ -535,7 +535,7 @@ dependencies = [ "dashmap", "fast-float", "icu_normalizer", - "indexmap", + "indexmap 1.9.3", "itertools", "num-bigint", "num-integer", @@ -587,7 +587,7 @@ dependencies = [ "boa_gc", "boa_macros", "hashbrown 0.14.0", - "indexmap", + "indexmap 1.9.3", "once_cell", "phf", "rustc-hash", @@ -860,7 +860,7 @@ checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "bitflags 1.3.2", "clap_lex 0.2.4", - "indexmap", + "indexmap 1.9.3", "textwrap", ] @@ -1874,6 +1874,12 @@ dependencies = [ "syn 2.0.18", ] +[[package]] +name = "equivalent" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" + [[package]] name = "errno" version = "0.2.8" @@ -2013,7 +2019,7 @@ dependencies = [ "serde", "serde_json", "syn 2.0.18", - "toml 0.7.3", + "toml 0.7.5", "walkdir", ] @@ -2549,7 +2555,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util", @@ -3074,6 +3080,16 @@ dependencies = [ "serde", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + [[package]] name = "inferno" version = "0.11.15" @@ -3081,7 +3097,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fb7c1b80a1dfa604bb4a649a5c5aeef3d913f7c520cb42b40e534e8a61bcdfc" dependencies = [ "ahash 0.8.3", - "indexmap", + "indexmap 1.9.3", "is-terminal", "itoa", "log", @@ -3651,7 +3667,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8603921e1f54ef386189335f288441af761e0fc61bcb552168d9cedfe63ebc70" dependencies = [ "hyper", - "indexmap", + "indexmap 1.9.3", "ipnet", "metrics", "metrics-util", @@ -3699,7 +3715,7 @@ dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.12.3", - "indexmap", + "indexmap 1.9.3", "metrics", "num_cpus", "ordered-float", @@ -4972,7 +4988,7 @@ dependencies = [ "tempfile", "thiserror", "tokio", - "toml 0.7.3", + "toml 0.7.5", "tracing", "tui", "vergen", @@ -5081,6 +5097,8 @@ dependencies = [ "reth-downloaders", "reth-net-nat", "reth-network", + "reth-primitives", + "reth-stages", "secp256k1", "serde", "serde_json", @@ -5330,7 +5348,7 @@ dependencies = [ "byteorder", "criterion", "derive_more", - "indexmap", + "indexmap 1.9.3", "libc", "lifetimed-bytes", "parking_lot 0.12.1", @@ -5485,6 +5503,7 @@ name = "reth-primitives" version = "0.1.0-alpha.1" dependencies = [ "arbitrary", + "assert_matches", "bytes", "crc", "criterion", @@ -5519,6 +5538,7 @@ dependencies = [ "tiny-keccak", "tokio", "tokio-stream", + "toml 0.7.5", "tracing", "triehash", "url", @@ -5814,6 +5834,8 @@ dependencies = [ "reth-revm", "reth-rlp", "reth-trie", + "serde", + "serde_json", "thiserror", "tokio", "tokio-stream", @@ -6389,9 +6411,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0efd8caf556a6cebd3b285caf480045fcc1ac04f6bd786b09a6f11af30c4fcf4" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] @@ -6417,7 +6439,7 @@ dependencies = [ "base64 0.13.1", "chrono", "hex", - "indexmap", + "indexmap 1.9.3", "serde", "serde_json", "serde_with_macros", @@ -7190,9 +7212,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.7.3" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b403acf6f2bb0859c93c7f0d967cb4a75a7ac552100f9322faf64dc047669b21" +checksum = "1ebafdf5ad1220cb59e7d17cf4d2c72015297b75b19a10472f99b89225089240" dependencies = [ "serde", "serde_spanned", @@ -7202,20 +7224,20 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab8ed2edee10b50132aed5f331333428b011c99402b5a534154ed15746f9622" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.8" +version = "0.19.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239410c8609e8125456927e6707163a3b1fdb40561e4b803bc041f466ccfdc13" +checksum = "266f016b7f039eec8a1a80dfe6156b633d208b9fccca5e4db1d6775b0c4e34a7" dependencies = [ - "indexmap", + "indexmap 2.0.0", "serde", "serde_spanned", "toml_datetime", @@ -7244,7 +7266,7 @@ dependencies = [ "futures-core", "futures-util", "hdrhistogram", - "indexmap", + "indexmap 1.9.3", "pin-project", "pin-project-lite", "rand 0.8.5", @@ -8069,9 +8091,9 @@ checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" [[package]] name = "winnow" -version = "0.4.1" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae8970b36c66498d8ff1d66685dc86b91b29db0c7739899012f63a63814b4b28" +checksum = "ca0ace3845f0d96209f0375e6d367e3eb87eb65d27d445bdc9f1843a26f39448" dependencies = [ "memchr", ] diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index ac8084ae10dda..c278440cff75d 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -10,17 +10,19 @@ repository.workspace = true [dependencies] # reth reth-network = { path = "../net/network" } -reth-net-nat = { path = "../../crates/net/nat" } -reth-discv4 = { path = "../../crates/net/discv4" } -reth-downloaders = { path = "../../crates/net/downloaders" } +reth-net-nat = { path = "../net/nat" } +reth-discv4 = { path = "../net/discv4" } +reth-downloaders = { path = "../net/downloaders" } +reth-stages = { path = "../../crates/stages" } +reth-primitives = { path = "../primitives" } # io serde = { workspace = true } serde_json = { workspace = true } -#crypto +# crypto secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } confy = "0.5" -tempfile = "3.4" +tempfile = "3.4" \ No newline at end of file diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index 95d41e022eea3..4593dedd75df5 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -5,6 +5,7 @@ use reth_downloaders::{ headers::reverse_headers::ReverseHeadersDownloaderBuilder, }; use reth_network::{NetworkConfigBuilder, PeersConfig, SessionsConfig}; +use reth_primitives::PruneMode; use secp256k1::SecretKey; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -16,6 +17,9 @@ pub struct Config { /// Configuration for each stage in the pipeline. // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages? pub stages: StageConfig, + /// Configuration for pruning. + #[serde(skip_serializing_if = "Option::is_none")] + pub prune: Option, /// Configuration for the discovery service. pub peers: PeersConfig, /// Configuration for peer sessions. @@ -276,6 +280,43 @@ impl Default for IndexHistoryConfig { } } +/// Pruning configuration. +#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] +#[serde(default)] +pub struct PruneConfig { + /// Minimum pruning interval measured in blocks. + pub block_interval: u64, + /// Pruning configuration for every part of the data that can be pruned. + pub parts: PruneParts, +} + +impl Default for PruneConfig { + fn default() -> Self { + Self { block_interval: 10, parts: PruneParts::default() } + } +} + +/// Pruning configuration for every part of the data that can be pruned. +#[derive(Debug, Clone, Default, Copy, Deserialize, PartialEq, Serialize)] +#[serde(default)] +pub struct PruneParts { + /// Sender Recovery pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub sender_recovery: Option, + /// Transaction Lookup pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub transaction_lookup: Option, + /// Receipts pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub receipts: Option, + /// Account History pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub account_history: Option, + /// Storage History pruning configuration. + #[serde(skip_serializing_if = "Option::is_none")] + pub storage_history: Option, +} + #[cfg(test)] mod tests { use super::Config; diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index a34e21094d04f..12822404dde03 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -13,7 +13,6 @@ description = "Commonly used types in reth." reth-rlp = { workspace = true, features = ["std", "derive", "ethereum-types"] } reth-rlp-derive = { path = "../rlp/rlp-derive" } reth-codecs = { path = "../storage/codecs" } - revm-primitives = { workspace = true, features = ["serde"] } # ethereum @@ -79,6 +78,8 @@ revm-primitives = { workspace = true, features = ["arbitrary"] } arbitrary = { version = "1.1.7", features = ["derive"] } proptest = { version = "1.0" } proptest-derive = "0.3" +assert_matches = "1.5.0" +toml = "0.7.4" # necessary so we don't hit a "undeclared 'std'": # https://github.com/paradigmxyz/reth/pull/177#discussion_r1021172198 diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 73845c2fd2264..ada92bd096b5f 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -39,6 +39,7 @@ pub mod listener; mod log; mod net; mod peer; +mod prune; mod receipt; pub mod stage; mod storage; @@ -76,6 +77,7 @@ pub use net::{ SEPOLIA_BOOTNODES, }; pub use peer::{PeerId, WithPeerId}; +pub use prune::{PruneCheckpoint, PruneMode}; pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef}; pub use revm_primitives::JumpMap; pub use serde_helper::JsonU256; diff --git a/crates/primitives/src/prune/checkpoint.rs b/crates/primitives/src/prune/checkpoint.rs new file mode 100644 index 0000000000000..a0c445fdfb505 --- /dev/null +++ b/crates/primitives/src/prune/checkpoint.rs @@ -0,0 +1,13 @@ +use crate::{prune::PruneMode, BlockNumber}; +use reth_codecs::{main_codec, Compact}; + +/// Saves the pruning progress of a stage. +#[main_codec] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[cfg_attr(test, derive(Default))] +pub struct PruneCheckpoint { + /// Highest pruned block number. + block_number: BlockNumber, + /// Prune mode. + prune_mode: PruneMode, +} diff --git a/crates/primitives/src/prune/mod.rs b/crates/primitives/src/prune/mod.rs new file mode 100644 index 0000000000000..2814f8bc4b718 --- /dev/null +++ b/crates/primitives/src/prune/mod.rs @@ -0,0 +1,5 @@ +mod checkpoint; +mod mode; + +pub use checkpoint::PruneCheckpoint; +pub use mode::PruneMode; diff --git a/crates/primitives/src/prune/mode.rs b/crates/primitives/src/prune/mode.rs new file mode 100644 index 0000000000000..47e6778e80180 --- /dev/null +++ b/crates/primitives/src/prune/mode.rs @@ -0,0 +1,56 @@ +use crate::BlockNumber; +use reth_codecs::{main_codec, Compact}; + +/// Prune mode. +#[main_codec] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +#[serde(rename_all = "lowercase")] +pub enum PruneMode { + /// Prune all blocks. + Full, + /// Prune blocks before the `head-N` block number. In other words, keep last N blocks. + Distance(u64), + /// Prune blocks before the specified block number. The specified block number is not pruned. + Before(BlockNumber), +} + +#[cfg(test)] +impl Default for PruneMode { + fn default() -> Self { + Self::Distance(0) + } +} + +#[cfg(test)] +mod tests { + use crate::prune::PruneMode; + use assert_matches::assert_matches; + use serde::Deserialize; + + #[test] + fn prune_mode_deserialize() { + #[derive(Debug, Deserialize)] + struct Config { + a: Option, + b: Option, + c: Option, + d: Option, + } + + let toml_str = r#" + a = "full" + b = { distance = 10 } + c = { before = 20 } + "#; + + assert_matches!( + toml::from_str(toml_str), + Ok(Config { + a: Some(PruneMode::Full), + b: Some(PruneMode::Distance(10)), + c: Some(PruneMode::Before(20)), + d: None + }) + ); + } +} diff --git a/crates/stages/Cargo.toml b/crates/stages/Cargo.toml index d60bc3dc22949..bfe2b62884e18 100644 --- a/crates/stages/Cargo.toml +++ b/crates/stages/Cargo.toml @@ -34,6 +34,9 @@ pin-project = { workspace = true } # observability tracing = { workspace = true } +# io +serde = { workspace = true } + # misc thiserror = { workspace = true } aquamarine = "0.3.0" @@ -63,6 +66,9 @@ paste = "1.0" pprof = { version = "0.11", features = ["flamegraph", "frame-pointer", "criterion"] } criterion = { version = "0.4.0", features = ["async_futures"] } +# io +serde_json = { workspace = true } + [features] test-utils = [] diff --git a/crates/stages/src/stage.rs b/crates/stages/src/stage.rs index 7580c6bab4228..71c5f1ad8c26d 100644 --- a/crates/stages/src/stage.rs +++ b/crates/stages/src/stage.rs @@ -3,7 +3,7 @@ use async_trait::async_trait; use reth_db::{cursor::DbCursorRO, database::Database, tables, transaction::DbTx}; use reth_primitives::{ stage::{StageCheckpoint, StageId}, - BlockNumber, TxNumber, + BlockNumber, PruneMode, TxNumber, }; use reth_provider::{BlockReader, DatabaseProviderRW, ProviderError}; use std::{ @@ -194,3 +194,31 @@ pub trait Stage: Send + Sync { input: UnwindInput, ) -> Result; } + +/// Prune target. +#[derive(Debug, Clone, Copy)] +pub enum PruneTarget { + /// Prune all blocks, i.e. not save any data. + All, + /// Prune blocks up to the specified block number, inclusive. + Block(BlockNumber), +} + +impl PruneTarget { + /// Returns new target to prune towards, according to stage prune mode [PruneMode] + /// and current head [BlockNumber]. + pub fn new(prune_mode: PruneMode, head: BlockNumber) -> Self { + match prune_mode { + PruneMode::Full => PruneTarget::All, + PruneMode::Distance(distance) => { + Self::Block(head.saturating_sub(distance).saturating_sub(1)) + } + PruneMode::Before(before_block) => Self::Block(before_block.saturating_sub(1)), + } + } + + /// Returns true if the target is [PruneTarget::All], i.e. prune all blocks. + pub fn is_all(&self) -> bool { + matches!(self, Self::All) + } +} diff --git a/crates/storage/db/src/tables/codecs/compact.rs b/crates/storage/db/src/tables/codecs/compact.rs index 1f12b7ddcf15d..882fe6b317b65 100644 --- a/crates/storage/db/src/tables/codecs/compact.rs +++ b/crates/storage/db/src/tables/codecs/compact.rs @@ -48,7 +48,8 @@ impl_compression_for_compact!( AccountBeforeTx, TransactionSignedNoHash, CompactU256, - StageCheckpoint + StageCheckpoint, + PruneCheckpoint ); macro_rules! impl_compression_fixed_compact { diff --git a/crates/storage/db/src/tables/models/mod.rs b/crates/storage/db/src/tables/models/mod.rs index ddd2c6b1cf947..da746efda686f 100644 --- a/crates/storage/db/src/tables/models/mod.rs +++ b/crates/storage/db/src/tables/models/mod.rs @@ -73,6 +73,7 @@ impl Decode for Address { Ok(Address::from_slice(value.as_ref())) } } + impl Encode for H256 { type Encoded = [u8; 32]; fn encode(self) -> Self::Encoded { From f49feff5a735ec791ea4dcc641f0baa3a73b2516 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jun 2023 17:28:28 +0200 Subject: [PATCH 03/15] chore: phase out some ethers usage (#3467) --- crates/net/network-api/src/test_utils.rs | 4 +- crates/primitives/src/block.rs | 16 +--- crates/primitives/src/chain/mod.rs | 3 +- crates/primitives/src/header.rs | 77 ++++++++++--------- crates/primitives/src/lib.rs | 1 - .../rpc/rpc-types/src/eth/transaction/mod.rs | 6 +- crates/stages/src/stages/tx_lookup.rs | 4 +- 7 files changed, 50 insertions(+), 61 deletions(-) diff --git a/crates/net/network-api/src/test_utils.rs b/crates/net/network-api/src/test_utils.rs index 6b2a1c3768928..f914138b925fb 100644 --- a/crates/net/network-api/src/test_utils.rs +++ b/crates/net/network-api/src/test_utils.rs @@ -3,7 +3,7 @@ use crate::{ }; use async_trait::async_trait; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; -use reth_primitives::{rpc::Chain::Mainnet, NodeRecord, PeerId}; +use reth_primitives::{Chain, NodeRecord, PeerId}; use reth_rpc_types::{EthProtocolInfo, NetworkStatus}; use std::net::{IpAddr, SocketAddr}; @@ -33,7 +33,7 @@ impl NetworkInfo for NoopNetwork { } fn chain_id(&self) -> u64 { - Mainnet.into() + Chain::mainnet().into() } fn is_syncing(&self) -> bool { diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 07741609c770e..f5a2b10b20ec6 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -1,7 +1,6 @@ use crate::{ - Address, BlockHash, BlockNumber, Header, SealedHeader, TransactionSigned, Withdrawal, H256, + Address, BlockHash, BlockNumber, Header, SealedHeader, TransactionSigned, Withdrawal, H256, U64, }; -use ethers_core::types::{BlockNumber as EthersBlockNumber, U64}; use fixed_hash::rustc_hex::FromHexError; use reth_codecs::derive_arbitrary; use reth_rlp::{Decodable, DecodeError, Encodable, RlpDecodable, RlpEncodable}; @@ -564,19 +563,6 @@ impl From for BlockNumberOrTag { } } -impl From for BlockNumberOrTag { - fn from(value: EthersBlockNumber) -> Self { - match value { - EthersBlockNumber::Latest => BlockNumberOrTag::Latest, - EthersBlockNumber::Finalized => BlockNumberOrTag::Finalized, - EthersBlockNumber::Safe => BlockNumberOrTag::Safe, - EthersBlockNumber::Earliest => BlockNumberOrTag::Earliest, - EthersBlockNumber::Pending => BlockNumberOrTag::Pending, - EthersBlockNumber::Number(num) => BlockNumberOrTag::Number(num.as_u64()), - } - } -} - impl From for BlockNumberOrTag { fn from(num: U64) -> Self { num.as_u64().into() diff --git a/crates/primitives/src/chain/mod.rs b/crates/primitives/src/chain/mod.rs index 8a5a5c7d338b5..43f4647a8c990 100644 --- a/crates/primitives/src/chain/mod.rs +++ b/crates/primitives/src/chain/mod.rs @@ -1,8 +1,7 @@ use crate::{ net::{goerli_nodes, mainnet_nodes, sepolia_nodes}, - NodeRecord, U256, + NodeRecord, U256, U64, }; -use ethers_core::types::U64; use reth_codecs::add_arbitrary_tests; use reth_rlp::{Decodable, Encodable}; use serde::{Deserialize, Serialize}; diff --git a/crates/primitives/src/header.rs b/crates/primitives/src/header.rs index d8778de70ca85..5a87c6411b615 100644 --- a/crates/primitives/src/header.rs +++ b/crates/primitives/src/header.rs @@ -2,10 +2,10 @@ use crate::{ basefee::calculate_next_block_base_fee, keccak256, proofs::{EMPTY_LIST_HASH, EMPTY_ROOT}, - BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, U256, + BlockHash, BlockNumHash, BlockNumber, Bloom, Bytes, H160, H256, H64, U256, }; use bytes::{Buf, BufMut, BytesMut}; -use ethers_core::types::{Block, H256 as EthersH256, H64}; + use reth_codecs::{add_arbitrary_tests, derive_arbitrary, main_codec, Compact}; use reth_rlp::{length_of_length, Decodable, Encodable, EMPTY_STRING_CODE}; use serde::{Deserialize, Serialize}; @@ -343,40 +343,6 @@ impl<'a> arbitrary::Arbitrary<'a> for SealedHeader { } } -impl From<&Block> for Header { - fn from(block: &Block) -> Self { - Header { - parent_hash: block.parent_hash.0.into(), - number: block.number.unwrap().as_u64(), - gas_limit: block.gas_limit.as_u64(), - difficulty: block.difficulty.into(), - nonce: block.nonce.unwrap().to_low_u64_be(), - extra_data: block.extra_data.0.clone().into(), - state_root: block.state_root.0.into(), - transactions_root: block.transactions_root.0.into(), - receipts_root: block.receipts_root.0.into(), - timestamp: block.timestamp.as_u64(), - mix_hash: block.mix_hash.unwrap().0.into(), - beneficiary: block.author.unwrap().0.into(), - base_fee_per_gas: block.base_fee_per_gas.map(|fee| fee.as_u64()), - ommers_hash: block.uncles_hash.0.into(), - gas_used: block.gas_used.as_u64(), - withdrawals_root: None, - logs_bloom: block.logs_bloom.unwrap_or_default().0.into(), - } - } -} - -impl From<&Block> for SealedHeader { - fn from(block: &Block) -> Self { - let header = Header::from(block); - match block.hash { - Some(hash) => header.seal(hash.0.into()), - None => header.seal_slow(), - } - } -} - impl Default for SealedHeader { fn default() -> Self { Header::default().seal_slow() @@ -506,6 +472,45 @@ impl From for bool { } } +mod ethers_compat { + use super::*; + use ethers_core::types::{Block, H256 as EthersH256}; + + impl From<&Block> for Header { + fn from(block: &Block) -> Self { + Header { + parent_hash: block.parent_hash.0.into(), + number: block.number.unwrap().as_u64(), + gas_limit: block.gas_limit.as_u64(), + difficulty: block.difficulty.into(), + nonce: block.nonce.unwrap().to_low_u64_be(), + extra_data: block.extra_data.0.clone().into(), + state_root: block.state_root.0.into(), + transactions_root: block.transactions_root.0.into(), + receipts_root: block.receipts_root.0.into(), + timestamp: block.timestamp.as_u64(), + mix_hash: block.mix_hash.unwrap().0.into(), + beneficiary: block.author.unwrap().0.into(), + base_fee_per_gas: block.base_fee_per_gas.map(|fee| fee.as_u64()), + ommers_hash: block.uncles_hash.0.into(), + gas_used: block.gas_used.as_u64(), + withdrawals_root: None, + logs_bloom: block.logs_bloom.unwrap_or_default().0.into(), + } + } + } + + impl From<&Block> for SealedHeader { + fn from(block: &Block) -> Self { + let header = Header::from(block); + match block.hash { + Some(hash) => header.seal(hash.0.into()), + None => header.seal_slow(), + } + } + } +} + #[cfg(test)] mod tests { use super::{Bytes, Decodable, Encodable, Header, H256}; diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index ada92bd096b5f..59c12624794a1 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -115,7 +115,6 @@ pub type StorageValue = U256; pub type Selector = [u8; 4]; pub use ethers_core::{ - types as rpc, types::{BigEndianHash, H128, H64, U64}, utils as rpc_utils, }; diff --git a/crates/rpc/rpc-types/src/eth/transaction/mod.rs b/crates/rpc/rpc-types/src/eth/transaction/mod.rs index 1fc2e3ca8d768..c61d6080b9b93 100644 --- a/crates/rpc/rpc-types/src/eth/transaction/mod.rs +++ b/crates/rpc/rpc-types/src/eth/transaction/mod.rs @@ -11,9 +11,9 @@ pub use signature::Signature; pub use typed::*; use reth_primitives::{ - rpc::transaction::eip2930::AccessListItem, Address, BlockNumber, Bytes, - Transaction as PrimitiveTransaction, TransactionKind as PrimitiveTransactionKind, - TransactionSignedEcRecovered, TxType, H256, U128, U256, U64, + AccessListItem, Address, BlockNumber, Bytes, Transaction as PrimitiveTransaction, + TransactionKind as PrimitiveTransactionKind, TransactionSignedEcRecovered, TxType, H256, U128, + U256, U64, }; use serde::{Deserialize, Serialize}; diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index f572a7f27b6f3..09e0e6d674fa5 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -9,7 +9,7 @@ use reth_db::{ DatabaseError, }; use reth_primitives::{ - rpc_utils::keccak256, + keccak256, stage::{EntitiesCheckpoint, StageCheckpoint, StageId}, TransactionSignedNoHash, TxNumber, H256, }; @@ -178,7 +178,7 @@ fn calculate_hash( ) -> Result<(H256, TxNumber), Box> { let (tx_id, tx) = entry.map_err(|e| Box::new(e.into()))?; tx.transaction.encode_with_signature(&tx.signature, rlp_buf, false); - Ok((H256(keccak256(rlp_buf)), tx_id)) + Ok((keccak256(rlp_buf), tx_id)) } fn stage_checkpoint( From b0df0262e34da72a6ed8f23c5103cc4c4c5d0323 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jun 2023 18:09:13 +0200 Subject: [PATCH 04/15] test: run geth blacklist test also serial (#3469) --- Cargo.lock | 31 +++++++++++++++++++++++--- crates/net/network/Cargo.toml | 2 +- crates/net/network/tests/it/connect.rs | 1 + 3 files changed, 30 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ce0c9737001f9..9d3d294f7747b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5388,7 +5388,7 @@ dependencies = [ "proc-macro2 1.0.60", "quote 1.0.28", "regex", - "serial_test", + "serial_test 0.10.0", "syn 2.0.18", "trybuild", ] @@ -5457,7 +5457,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", - "serial_test", + "serial_test 2.0.0", "tempfile", "thiserror", "tokio", @@ -6469,7 +6469,21 @@ dependencies = [ "lazy_static", "log", "parking_lot 0.12.1", - "serial_test_derive", + "serial_test_derive 0.10.0", +] + +[[package]] +name = "serial_test" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e56dd856803e253c8f298af3f4d7eb0ae5e23a737252cd90bb4f3b435033b2d" +dependencies = [ + "dashmap", + "futures", + "lazy_static", + "log", + "parking_lot 0.12.1", + "serial_test_derive 2.0.0", ] [[package]] @@ -6483,6 +6497,17 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "serial_test_derive" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" +dependencies = [ + "proc-macro2 1.0.60", + "quote 1.0.28", + "syn 2.0.18", +] + [[package]] name = "sha-1" version = "0.9.8" diff --git a/crates/net/network/Cargo.toml b/crates/net/network/Cargo.toml index e1f29f39ba36c..8a62fec063a0e 100644 --- a/crates/net/network/Cargo.toml +++ b/crates/net/network/Cargo.toml @@ -86,7 +86,7 @@ enr = { version = "0.8.1", features = ["serde", "rust-secp256k1"] } # misc hex = "0.4" tempfile = "3.3" -serial_test = "0.10" +serial_test = "2.0" [features] default = ["serde"] diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 7f831d1890a27..e495980535226 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -309,6 +309,7 @@ async fn test_connect_to_trusted_peer() { } #[tokio::test(flavor = "multi_thread")] +#[serial_test::serial] #[cfg_attr(not(feature = "geth-tests"), ignore)] async fn test_incoming_node_id_blacklist() { reth_tracing::init_test_tracing(); From f9d9387c9a2ae43e5e52a2eac918dae70125328c Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jun 2023 19:26:35 +0200 Subject: [PATCH 05/15] docs: update call fees docs on fallback (#3471) Co-authored-by: Alexey Shekhirin --- crates/rpc/rpc/src/eth/revm_utils.rs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/crates/rpc/rpc/src/eth/revm_utils.rs b/crates/rpc/rpc/src/eth/revm_utils.rs index 54e996870af43..cd552b398a549 100644 --- a/crates/rpc/rpc/src/eth/revm_utils.rs +++ b/crates/rpc/rpc/src/eth/revm_utils.rs @@ -374,8 +374,8 @@ pub(crate) struct CallFees { impl CallFees { /// Ensures the fields of a [CallRequest] are not conflicting. /// - /// If no `gasPrice` or `maxFeePerGas` is set, then the `gas_price` in the response will - /// fallback to the given `basefee`. + /// If no `gasPrice` or `maxFeePerGas` is set, then the `gas_price` in the returned `gas_price` + /// will be `0`. See: fn ensure_fees( call_gas_price: Option, call_max_fee: Option, @@ -383,14 +383,10 @@ impl CallFees { base_fee: U256, ) -> EthResult { match (call_gas_price, call_max_fee, call_priority_fee) { - (None, None, None) => { - // when none are specified, they are all set to zero - Ok(CallFees { gas_price: U256::ZERO, max_priority_fee_per_gas: None }) - } (gas_price, None, None) => { - // request for a legacy transaction - // set everything to zero - let gas_price = gas_price.unwrap_or(base_fee); + // either legacy transaction or no fee fields are specified + // when no fields are specified, set gas price to zero + let gas_price = gas_price.unwrap_or(U256::ZERO); Ok(CallFees { gas_price, max_priority_fee_per_gas: None }) } (None, max_fee_per_gas, max_priority_fee_per_gas) => { @@ -525,3 +521,15 @@ where db: Default::default(), } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_ensure_0_fallback() { + let CallFees { gas_price, .. } = + CallFees::ensure_fees(None, None, None, U256::from(99)).unwrap(); + assert_eq!(gas_price, U256::ZERO); + } +} From dcae84a2b1ee9e09f8df2e8942057ec51c773ba6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jun 2023 21:20:22 +0200 Subject: [PATCH 06/15] chore: mirror geth's encoding order for callframe (#3473) --- crates/rpc/rpc-types/src/eth/trace/geth/call.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/crates/rpc/rpc-types/src/eth/trace/geth/call.rs b/crates/rpc/rpc-types/src/eth/trace/geth/call.rs index 8af626d115413..fa873b6fc7b89 100644 --- a/crates/rpc/rpc-types/src/eth/trace/geth/call.rs +++ b/crates/rpc/rpc-types/src/eth/trace/geth/call.rs @@ -4,17 +4,13 @@ use serde::{Deserialize, Serialize}; /// #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct CallFrame { - #[serde(rename = "type")] - pub typ: String, pub from: Address, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub to: Option
, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub value: Option, #[serde(default, deserialize_with = "from_int_or_hex")] pub gas: U256, #[serde(default, deserialize_with = "from_int_or_hex", rename = "gasUsed")] pub gas_used: U256, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub to: Option
, pub input: Bytes, #[serde(default, skip_serializing_if = "Option::is_none")] pub output: Option, @@ -26,6 +22,10 @@ pub struct CallFrame { pub calls: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub logs: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub value: Option, + #[serde(rename = "type")] + pub typ: String, } #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] From 0ec32255e90e2b420db8992210b14c06315647e4 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jun 2023 21:30:15 +0200 Subject: [PATCH 07/15] test: set --authrpc.port to 0 for Geth instance (#3476) --- crates/net/network/tests/it/connect.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index e495980535226..11ceb5f8a8261 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -318,7 +318,7 @@ async fn test_incoming_node_id_blacklist() { // instantiate geth and add ourselves as a peer let temp_dir = tempfile::tempdir().unwrap().into_path(); - let geth = Geth::new().data_dir(temp_dir).disable_discovery().spawn(); + let geth = Geth::new().data_dir(temp_dir).disable_discovery().authrpc_port(0).spawn(); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()); let provider = Provider::::try_from(format!("http://{geth_endpoint}")).unwrap(); @@ -371,7 +371,7 @@ async fn test_incoming_connect_with_single_geth() { // instantiate geth and add ourselves as a peer let temp_dir = tempfile::tempdir().unwrap().into_path(); - let geth = Geth::new().data_dir(temp_dir).disable_discovery().spawn(); + let geth = Geth::new().data_dir(temp_dir).disable_discovery().authrpc_port(0).spawn(); let geth_endpoint = SocketAddr::new([127, 0, 0, 1].into(), geth.port()); let provider = Provider::::try_from(format!("http://{geth_endpoint}")).unwrap(); @@ -429,7 +429,7 @@ async fn test_outgoing_connect_with_single_geth() { // instantiate geth and add ourselves as a peer let temp_dir = tempfile::tempdir().unwrap().into_path(); - let geth = Geth::new().disable_discovery().data_dir(temp_dir).spawn(); + let geth = Geth::new().disable_discovery().data_dir(temp_dir).authrpc_port(0).spawn(); let geth_p2p_port = geth.p2p_port().unwrap(); let geth_socket = SocketAddr::new([127, 0, 0, 1].into(), geth_p2p_port); @@ -474,7 +474,7 @@ async fn test_geth_disconnect() { // instantiate geth and add ourselves as a peer let temp_dir = tempfile::tempdir().unwrap().into_path(); - let geth = Geth::new().disable_discovery().data_dir(temp_dir).spawn(); + let geth = Geth::new().disable_discovery().data_dir(temp_dir).authrpc_port(0).spawn(); let geth_p2p_port = geth.p2p_port().unwrap(); let geth_socket = SocketAddr::new([127, 0, 0, 1].into(), geth_p2p_port); From 8dedf0f2ea89f123e63027f294173efb1609c955 Mon Sep 17 00:00:00 2001 From: Bjerg Date: Thu, 29 Jun 2023 21:42:25 +0200 Subject: [PATCH 08/15] chore: fix update prios link (#3478) --- .github/workflows/release.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c40eaa34d9751..b39f04fa145dc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -199,9 +199,7 @@ jobs: | Payload Builders | | | Non-Payload Builders | | - *See [Update - Priorities](https://paradigmxyz.github.io/reth/installation-priorities.html) - more information about this table.* + *See [Update Priorities](https://paradigmxyz.github.io/reth/installation/priorities.html) for more information about this table.* ## All Changes From 26b80f4f9878684f69f382d9d6f83f985f06aa20 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jun 2023 22:54:14 +0200 Subject: [PATCH 09/15] fix: set missing record logs (#3472) --- crates/rpc/rpc/src/debug.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index 6f997172c14db..528da093e45db 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -380,7 +380,8 @@ where .map_err(|_| EthApiError::InvalidTracerConfig)?; let mut inspector = TracingInspector::new( - TracingInspectorConfig::from_geth_config(&config), + TracingInspectorConfig::from_geth_config(&config) + .set_record_logs(call_config.with_log.unwrap_or_default()), ); let (res, _) = inspect(db, env, &mut inspector)?; From 40f2a510087905ea5c93780ac6e67dac3ca81efe Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Thu, 29 Jun 2023 23:22:08 +0200 Subject: [PATCH 10/15] chore: replace Option with just vec (#3474) --- .../src/tracing/builder/geth.rs | 2 +- .../revm/revm-inspectors/src/tracing/types.rs | 25 +++++++++---------- .../rpc/rpc-types/src/eth/trace/geth/call.rs | 11 ++++---- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs index 95f7a79534aff..9786eb6a9c04c 100644 --- a/crates/revm/revm-inspectors/src/tracing/builder/geth.rs +++ b/crates/revm/revm-inspectors/src/tracing/builder/geth.rs @@ -137,7 +137,7 @@ impl GethTraceBuilder { // we need to ensure that calls are in order they are called: the last child node is // the last call, but since we walk up the tree, we need to always // insert at position 0 - parent_frame.1.calls.get_or_insert_with(Vec::new).insert(0, call); + parent_frame.1.calls.insert(0, call); } else { debug_assert!(call_frames.is_empty(), "only one root node has no parent"); return call diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index 43681a5b8e51e..feb3524e4135a 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -378,8 +378,8 @@ impl CallTraceNode { output: Some(self.trace.output.clone().into()), error: None, revert_reason: None, - calls: None, - logs: None, + calls: Default::default(), + logs: Default::default(), }; // we need to populate error and revert reason @@ -388,17 +388,16 @@ impl CallTraceNode { call_frame.error = self.trace.as_error(); } - if include_logs { - call_frame.logs = Some( - self.logs - .iter() - .map(|log| CallLogFrame { - address: Some(self.trace.address), - topics: Some(log.topics.clone()), - data: Some(log.data.clone().into()), - }) - .collect(), - ); + if include_logs && !self.logs.is_empty() { + call_frame.logs = self + .logs + .iter() + .map(|log| CallLogFrame { + address: Some(self.trace.address), + topics: Some(log.topics.clone()), + data: Some(log.data.clone().into()), + }) + .collect(); } call_frame diff --git a/crates/rpc/rpc-types/src/eth/trace/geth/call.rs b/crates/rpc/rpc-types/src/eth/trace/geth/call.rs index fa873b6fc7b89..99085208f1448 100644 --- a/crates/rpc/rpc-types/src/eth/trace/geth/call.rs +++ b/crates/rpc/rpc-types/src/eth/trace/geth/call.rs @@ -18,10 +18,10 @@ pub struct CallFrame { pub error: Option, #[serde(default, skip_serializing_if = "Option::is_none")] pub revert_reason: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub calls: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub logs: Option>, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub calls: Vec, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub logs: Vec, #[serde(default, skip_serializing_if = "Option::is_none")] pub value: Option, #[serde(rename = "type")] @@ -83,7 +83,6 @@ mod tests { let _trace: CallFrame = serde_json::from_str(DEFAULT).unwrap(); let _trace: CallFrame = serde_json::from_str(LEGACY).unwrap(); let _trace: CallFrame = serde_json::from_str(ONLY_TOP_CALL).unwrap(); - let trace: CallFrame = serde_json::from_str(WITH_LOG).unwrap(); - let _logs = trace.logs.unwrap(); + let _trace: CallFrame = serde_json::from_str(WITH_LOG).unwrap(); } } From 793838975b528d6151aed5a12d5e0d95c0dd905b Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 30 Jun 2023 00:07:31 +0200 Subject: [PATCH 11/15] chore: put EthPubSubInner in Arc (#3463) --- crates/rpc/rpc/src/eth/pubsub.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/crates/rpc/rpc/src/eth/pubsub.rs b/crates/rpc/rpc/src/eth/pubsub.rs index ac76d4fabbe1a..a3b5e0a03d031 100644 --- a/crates/rpc/rpc/src/eth/pubsub.rs +++ b/crates/rpc/rpc/src/eth/pubsub.rs @@ -7,6 +7,7 @@ use reth_primitives::TxHash; use reth_provider::{BlockReader, CanonStateSubscriptions, EvmEnvProvider}; use reth_rpc_api::EthPubSubApiServer; use reth_rpc_types::FilteredParams; +use std::sync::Arc; use reth_rpc_types::{ pubsub::{ @@ -29,7 +30,7 @@ use tokio_stream::{ #[derive(Clone)] pub struct EthPubSub { /// All nested fields bundled together. - inner: EthPubSubInner, + inner: Arc>, /// The type that's used to spawn subscription tasks. subscription_task_spawner: Box, } @@ -59,7 +60,7 @@ impl EthPubSub subscription_task_spawner: Box, ) -> Self { let inner = EthPubSubInner { provider, pool, chain_events, network }; - Self { inner, subscription_task_spawner } + Self { inner: Arc::new(inner), subscription_task_spawner } } } @@ -91,7 +92,7 @@ where /// The actual handler for and accepted [`EthPubSub::subscribe`] call. async fn handle_accepted( - pubsub: EthPubSubInner, + pubsub: Arc>, accepted_sink: SubscriptionSink, kind: SubscriptionKind, params: Option, @@ -105,7 +106,7 @@ where match kind { SubscriptionKind::NewHeads => { let stream = pubsub - .into_new_headers_stream() + .new_headers_stream() .map(|block| EthSubscriptionResult::Header(Box::new(block.into()))); pipe_from_stream(accepted_sink, stream).await } @@ -116,13 +117,12 @@ where _ => FilteredParams::default(), }; let stream = - pubsub.into_log_stream(filter).map(|log| EthSubscriptionResult::Log(Box::new(log))); + pubsub.log_stream(filter).map(|log| EthSubscriptionResult::Log(Box::new(log))); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::NewPendingTransactions => { - let stream = pubsub - .into_pending_transaction_stream() - .map(EthSubscriptionResult::TransactionHash); + let stream = + pubsub.pending_transaction_stream().map(EthSubscriptionResult::TransactionHash); pipe_from_stream(accepted_sink, stream).await } SubscriptionKind::Syncing => { @@ -241,7 +241,7 @@ where Pool: TransactionPool + 'static, { /// Returns a stream that yields all transactions emitted by the txpool. - fn into_pending_transaction_stream(self) -> impl Stream { + fn pending_transaction_stream(&self) -> impl Stream { ReceiverStream::new(self.pool.pending_transactions_listener()) } } @@ -254,7 +254,7 @@ where Pool: 'static, { /// Returns a stream that yields all new RPC blocks. - fn into_new_headers_stream(self) -> impl Stream { + fn new_headers_stream(&self) -> impl Stream { BroadcastStream::new(self.chain_events.subscribe_to_canonical_state()) .map(|new_block| { let new_chain = new_block.expect("new block subscription never ends; qed"); @@ -274,7 +274,7 @@ where } /// Returns a stream that yields all logs that match the given filter. - fn into_log_stream(self, filter: FilteredParams) -> impl Stream { + fn log_stream(&self, filter: FilteredParams) -> impl Stream { BroadcastStream::new(self.chain_events.subscribe_to_canonical_state()) .map(move |canon_state| { canon_state.expect("new block subscription never ends; qed").block_receipts() From 02059387b03c78215d5a8c6c476c85fddb32a874 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 30 Jun 2023 00:12:58 +0200 Subject: [PATCH 12/15] chore: rename invalid header metrics scope (#3468) --- crates/consensus/beacon/src/engine/invalid_headers.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/consensus/beacon/src/engine/invalid_headers.rs b/crates/consensus/beacon/src/engine/invalid_headers.rs index a07908cddeb63..6006c047adaed 100644 --- a/crates/consensus/beacon/src/engine/invalid_headers.rs +++ b/crates/consensus/beacon/src/engine/invalid_headers.rs @@ -60,7 +60,7 @@ impl InvalidHeaderCache { // update metrics self.metrics.known_ancestor_inserts.increment(1); - self.metrics.invalid_headers.set(self.headers.len() as f64); + self.metrics.count.set(self.headers.len() as f64); } } @@ -74,7 +74,7 @@ impl InvalidHeaderCache { // update metrics self.metrics.unique_inserts.increment(1); - self.metrics.invalid_headers.set(self.headers.len() as f64); + self.metrics.count.set(self.headers.len() as f64); } } } @@ -88,10 +88,10 @@ struct HeaderEntry { /// Metrics for the invalid headers cache. #[derive(Metrics)] -#[metrics(scope = "invalid_header_cache")] +#[metrics(scope = "consensus.engine.beacon.invalid_headers")] struct InvalidHeaderCacheMetrics { /// The total number of invalid headers in the cache. - invalid_headers: Gauge, + count: Gauge, /// The number of inserts with a known ancestor. known_ancestor_inserts: Counter, /// The number of unique invalid header inserts (i.e. without a known ancestor). From 7850cc0b8d0a0cdc40c715933e1c66fb2f94b9fa Mon Sep 17 00:00:00 2001 From: Bjerg Date: Fri, 30 Jun 2023 00:58:15 +0200 Subject: [PATCH 13/15] test: disable flakey test (#3486) --- crates/net/network/tests/it/connect.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/net/network/tests/it/connect.rs b/crates/net/network/tests/it/connect.rs index 11ceb5f8a8261..f8ffd584db84e 100644 --- a/crates/net/network/tests/it/connect.rs +++ b/crates/net/network/tests/it/connect.rs @@ -363,7 +363,8 @@ async fn test_incoming_node_id_blacklist() { #[tokio::test(flavor = "multi_thread")] #[serial_test::serial] -#[cfg_attr(not(feature = "geth-tests"), ignore)] +// #[cfg_attr(not(feature = "geth-tests"), ignore)] +#[ignore] // TODO: Re-enable once we figure out why this test is flakey async fn test_incoming_connect_with_single_geth() { reth_tracing::init_test_tracing(); tokio::time::timeout(GETH_TIMEOUT, async move { From c3ea430e4a75afe8954addc36191710fcb1a8695 Mon Sep 17 00:00:00 2001 From: joshieDo <93316087+joshieDo@users.noreply.github.com> Date: Fri, 30 Jun 2023 00:52:26 +0100 Subject: [PATCH 14/15] refactor: move `mdbx::test-utils` to `reth_db::test-utils` and add `DatabaseEnvRO` (#3466) --- bin/reth/src/db/list.rs | 11 +- bin/reth/src/db/mod.rs | 24 ++--- bin/reth/src/p2p/mod.rs | 4 +- bin/reth/src/stage/drop.rs | 10 +- bin/reth/src/stage/dump/execution.rs | 5 +- bin/reth/src/stage/dump/hashing_account.rs | 4 +- bin/reth/src/stage/dump/hashing_storage.rs | 4 +- bin/reth/src/stage/dump/merkle.rs | 4 +- bin/reth/src/stage/dump/mod.rs | 4 +- bin/reth/src/stage/unwind.rs | 10 +- crates/blockchain-tree/src/blockchain_tree.rs | 2 +- crates/consensus/beacon/src/engine/mod.rs | 2 +- crates/net/downloaders/src/bodies/bodies.rs | 10 +- crates/net/downloaders/src/bodies/task.rs | 6 +- .../net/downloaders/src/bodies/test_utils.rs | 8 +- .../downloaders/src/test_utils/file_client.rs | 8 +- crates/staged-sync/src/utils/init.rs | 2 +- crates/stages/src/lib.rs | 2 +- crates/stages/src/pipeline/mod.rs | 14 +-- crates/stages/src/sets.rs | 2 +- crates/stages/src/stages/execution.rs | 17 ++- crates/stages/src/stages/hashing_storage.rs | 5 +- crates/stages/src/test_utils/runner.rs | 5 +- crates/stages/src/test_utils/test_db.rs | 24 ++--- crates/storage/db/benches/criterion.rs | 14 ++- crates/storage/db/benches/hash_keys.rs | 3 +- crates/storage/db/benches/utils.rs | 8 +- .../storage/db/src/implementation/mdbx/mod.rs | 102 ++++++++---------- crates/storage/db/src/lib.rs | 82 +++++++++++++- crates/storage/provider/src/post_state/mod.rs | 19 ++-- .../provider/src/providers/database/mod.rs | 11 +- .../src/providers/state/historical.rs | 2 +- crates/storage/provider/src/transaction.rs | 2 +- crates/trie/src/hashed_cursor/post_state.rs | 2 +- crates/trie/src/trie.rs | 2 +- crates/trie/src/trie_cursor/account_cursor.rs | 2 +- crates/trie/src/trie_cursor/storage_cursor.rs | 2 +- crates/trie/src/walker.rs | 2 +- testing/ef-tests/src/cases/blockchain_test.rs | 2 +- 39 files changed, 228 insertions(+), 214 deletions(-) diff --git a/bin/reth/src/db/list.rs b/bin/reth/src/db/list.rs index ee9de7eeb448f..b7fe572ccc42b 100644 --- a/bin/reth/src/db/list.rs +++ b/bin/reth/src/db/list.rs @@ -3,12 +3,7 @@ use clap::Parser; use super::tui::DbListTUI; use eyre::WrapErr; -use reth_db::{ - database::Database, - mdbx::{Env, NoWriteMap}, - table::Table, - TableType, TableViewer, Tables, -}; +use reth_db::{database::Database, table::Table, DatabaseEnvRO, TableType, TableViewer, Tables}; use tracing::error; const DEFAULT_NUM_ITEMS: &str = "5"; @@ -34,7 +29,7 @@ pub struct Command { impl Command { /// Execute `db list` command - pub fn execute(self, tool: &DbTool<'_, Env>) -> eyre::Result<()> { + pub fn execute(self, tool: &DbTool<'_, DatabaseEnvRO>) -> eyre::Result<()> { if self.table.table_type() == TableType::DupSort { error!(target: "reth::cli", "Unsupported table."); } @@ -46,7 +41,7 @@ impl Command { } struct ListTableViewer<'a> { - tool: &'a DbTool<'a, Env>, + tool: &'a DbTool<'a, DatabaseEnvRO>, args: &'a Command, } diff --git a/bin/reth/src/db/mod.rs b/bin/reth/src/db/mod.rs index d44a977aba8d3..1ce1bc3262de2 100644 --- a/bin/reth/src/db/mod.rs +++ b/bin/reth/src/db/mod.rs @@ -10,12 +10,12 @@ use eyre::WrapErr; use human_bytes::human_bytes; use reth_db::{ database::Database, - mdbx::{Env, NoWriteMap, WriteMap}, + open_db, open_db_read_only, version::{get_db_version, DatabaseVersionError, DB_VERSION}, - DatabaseEnv, Tables, + Tables, }; use reth_primitives::ChainSpec; -use std::{path::Path, sync::Arc}; +use std::sync::Arc; mod get; mod list; @@ -84,7 +84,7 @@ impl Command { match self.command { // TODO: We'll need to add this on the DB trait. Subcommands::Stats { .. } => { - let db = read_only_db(&db_path)?; + let db = open_db_read_only(&db_path)?; let tool = DbTool::new(&db, self.chain.clone())?; let mut stats_table = ComfyTable::new(); stats_table.load_preset(comfy_table::presets::ASCII_MARKDOWN); @@ -135,17 +135,17 @@ impl Command { println!("{stats_table}"); } Subcommands::List(command) => { - let db = read_only_db(&db_path)?; + let db = open_db_read_only(&db_path)?; let tool = DbTool::new(&db, self.chain.clone())?; command.execute(&tool)?; } Subcommands::Get(command) => { - let db = read_only_db(&db_path)?; + let db = open_db_read_only(&db_path)?; let tool = DbTool::new(&db, self.chain.clone())?; command.execute(&tool)?; } Subcommands::Drop => { - let db = read_write_db(&db_path)?; + let db = open_db(&db_path)?; let mut tool = DbTool::new(&db, self.chain.clone())?; tool.drop(db_path)?; } @@ -173,16 +173,6 @@ impl Command { } } -fn read_only_db(path: &Path) -> eyre::Result> { - Env::::open(path, reth_db::mdbx::EnvKind::RO) - .with_context(|| format!("Could not open database at path: {}", path.display())) -} - -fn read_write_db(path: &Path) -> eyre::Result { - Env::::open(path, reth_db::mdbx::EnvKind::RW) - .with_context(|| format!("Could not open database at path: {}", path.display())) -} - #[cfg(test)] mod tests { use super::*; diff --git a/bin/reth/src/p2p/mod.rs b/bin/reth/src/p2p/mod.rs index 48c6d2f0e232f..c853bedafa922 100644 --- a/bin/reth/src/p2p/mod.rs +++ b/bin/reth/src/p2p/mod.rs @@ -11,7 +11,7 @@ use crate::{ use backon::{ConstantBuilder, Retryable}; use clap::{Parser, Subcommand}; use reth_config::Config; -use reth_db::mdbx::{Env, EnvKind, WriteMap}; +use reth_db::open_db; use reth_discv4::NatResolver; use reth_interfaces::p2p::bodies::client::BodiesClient; use reth_primitives::{BlockHashOrNumber, ChainSpec, NodeRecord}; @@ -101,7 +101,7 @@ impl Command { /// Execute `p2p` command pub async fn execute(&self) -> eyre::Result<()> { let tempdir = tempfile::TempDir::new()?; - let noop_db = Arc::new(Env::::open(&tempdir.into_path(), EnvKind::RW)?); + let noop_db = Arc::new(open_db(&tempdir.into_path())?); // add network name to data dir let data_dir = self.datadir.unwrap_or_chain_default(self.chain.chain); diff --git a/bin/reth/src/stage/drop.rs b/bin/reth/src/stage/drop.rs index 4c2ed15857b66..cd6e94436c6ca 100644 --- a/bin/reth/src/stage/drop.rs +++ b/bin/reth/src/stage/drop.rs @@ -5,13 +5,7 @@ use crate::{ utils::DbTool, }; use clap::Parser; -use reth_db::{ - database::Database, - mdbx::{Env, WriteMap}, - tables, - transaction::DbTxMut, - DatabaseEnv, -}; +use reth_db::{database::Database, open_db, tables, transaction::DbTxMut, DatabaseEnv}; use reth_primitives::{stage::StageId, ChainSpec}; use reth_staged_sync::utils::init::{insert_genesis_header, insert_genesis_state}; use std::sync::Arc; @@ -58,7 +52,7 @@ impl Command { let db_path = data_dir.db_path(); std::fs::create_dir_all(&db_path)?; - let db = Env::::open(db_path.as_ref(), reth_db::mdbx::EnvKind::RW)?; + let db = open_db(db_path.as_ref())?; let tool = DbTool::new(&db, self.chain.clone())?; diff --git a/bin/reth/src/stage/dump/execution.rs b/bin/reth/src/stage/dump/execution.rs index de0499c04993f..9200065d5517f 100644 --- a/bin/reth/src/stage/dump/execution.rs +++ b/bin/reth/src/stage/dump/execution.rs @@ -3,6 +3,7 @@ use crate::utils::DbTool; use eyre::Result; use reth_db::{ cursor::DbCursorRO, database::Database, table::TableImporter, tables, transaction::DbTx, + DatabaseEnv, }; use reth_primitives::{stage::StageCheckpoint, ChainSpec}; use reth_provider::ProviderFactory; @@ -33,7 +34,7 @@ pub(crate) async fn dump_execution_stage( /// Imports all the tables that can be copied over a range. fn import_tables_with_range( - output_db: &reth_db::mdbx::Env, + output_db: &DatabaseEnv, db_tool: &mut DbTool<'_, DB>, from: u64, to: u64, @@ -92,7 +93,7 @@ async fn unwind_and_copy( db_tool: &mut DbTool<'_, DB>, from: u64, tip_block_number: u64, - output_db: &reth_db::mdbx::Env, + output_db: &DatabaseEnv, ) -> eyre::Result<()> { let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone()); let provider = factory.provider_rw()?; diff --git a/bin/reth/src/stage/dump/hashing_account.rs b/bin/reth/src/stage/dump/hashing_account.rs index 83b1ae39f3ae5..cc0e72536d312 100644 --- a/bin/reth/src/stage/dump/hashing_account.rs +++ b/bin/reth/src/stage/dump/hashing_account.rs @@ -1,7 +1,7 @@ use super::setup; use crate::utils::DbTool; use eyre::Result; -use reth_db::{database::Database, table::TableImporter, tables}; +use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec}; use reth_provider::ProviderFactory; use reth_stages::{stages::AccountHashingStage, Stage, UnwindInput}; @@ -36,7 +36,7 @@ async fn unwind_and_copy( db_tool: &mut DbTool<'_, DB>, from: u64, tip_block_number: u64, - output_db: &reth_db::mdbx::Env, + output_db: &DatabaseEnv, ) -> eyre::Result<()> { let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone()); let provider = factory.provider_rw()?; diff --git a/bin/reth/src/stage/dump/hashing_storage.rs b/bin/reth/src/stage/dump/hashing_storage.rs index c8e0252195aca..1af985281d76d 100644 --- a/bin/reth/src/stage/dump/hashing_storage.rs +++ b/bin/reth/src/stage/dump/hashing_storage.rs @@ -1,7 +1,7 @@ use super::setup; use crate::utils::DbTool; use eyre::Result; -use reth_db::{database::Database, table::TableImporter, tables}; +use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; use reth_primitives::{stage::StageCheckpoint, ChainSpec}; use reth_provider::ProviderFactory; use reth_stages::{stages::StorageHashingStage, Stage, UnwindInput}; @@ -31,7 +31,7 @@ async fn unwind_and_copy( db_tool: &mut DbTool<'_, DB>, from: u64, tip_block_number: u64, - output_db: &reth_db::mdbx::Env, + output_db: &DatabaseEnv, ) -> eyre::Result<()> { let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone()); let provider = factory.provider_rw()?; diff --git a/bin/reth/src/stage/dump/merkle.rs b/bin/reth/src/stage/dump/merkle.rs index eb73259ac23a8..601abd569cf31 100644 --- a/bin/reth/src/stage/dump/merkle.rs +++ b/bin/reth/src/stage/dump/merkle.rs @@ -1,7 +1,7 @@ use super::setup; use crate::utils::DbTool; use eyre::Result; -use reth_db::{database::Database, table::TableImporter, tables}; +use reth_db::{database::Database, table::TableImporter, tables, DatabaseEnv}; use reth_primitives::{stage::StageCheckpoint, BlockNumber, ChainSpec}; use reth_provider::ProviderFactory; use reth_stages::{ @@ -45,7 +45,7 @@ async fn unwind_and_copy( db_tool: &mut DbTool<'_, DB>, range: (u64, u64), tip_block_number: u64, - output_db: &reth_db::mdbx::Env, + output_db: &DatabaseEnv, ) -> eyre::Result<()> { let (from, to) = range; let factory = ProviderFactory::new(db_tool.db, db_tool.chain.clone()); diff --git a/bin/reth/src/stage/dump/mod.rs b/bin/reth/src/stage/dump/mod.rs index 38add81dabae3..d9a33ccc61386 100644 --- a/bin/reth/src/stage/dump/mod.rs +++ b/bin/reth/src/stage/dump/mod.rs @@ -6,7 +6,7 @@ use crate::{ use clap::Parser; use reth_db::{ cursor::DbCursorRO, database::Database, init_db, table::TableImporter, tables, - transaction::DbTx, + transaction::DbTx, DatabaseEnv, }; use reth_primitives::ChainSpec; use std::{path::PathBuf, sync::Arc}; @@ -129,7 +129,7 @@ pub(crate) fn setup( to: u64, output_db: &PathBuf, db_tool: &mut DbTool<'_, DB>, -) -> eyre::Result<(reth_db::mdbx::Env, u64)> { +) -> eyre::Result<(DatabaseEnv, u64)> { assert!(from < to, "FROM block should be bigger than TO block."); info!(target: "reth::cli", ?output_db, "Creating separate db"); diff --git a/bin/reth/src/stage/unwind.rs b/bin/reth/src/stage/unwind.rs index 143fac13a9103..9300f598af51c 100644 --- a/bin/reth/src/stage/unwind.rs +++ b/bin/reth/src/stage/unwind.rs @@ -5,13 +5,7 @@ use crate::{ dirs::{DataDirPath, MaybePlatformPath}, }; use clap::{Parser, Subcommand}; -use reth_db::{ - cursor::DbCursorRO, - database::Database, - mdbx::{Env, WriteMap}, - tables, - transaction::DbTx, -}; +use reth_db::{cursor::DbCursorRO, database::Database, open_db, tables, transaction::DbTx}; use reth_primitives::{BlockHashOrNumber, ChainSpec}; use reth_provider::{BlockExecutionWriter, ProviderFactory}; use std::{ops::RangeInclusive, sync::Arc}; @@ -61,7 +55,7 @@ impl Command { eyre::bail!("Database {db_path:?} does not exist.") } - let db = Env::::open(db_path.as_ref(), reth_db::mdbx::EnvKind::RW)?; + let db = open_db(db_path.as_ref())?; let range = self.command.unwind_range(&db)?; diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index d951939f4986b..31c1e4c568983 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -1085,7 +1085,7 @@ mod tests { use crate::block_buffer::BufferedBlocks; use assert_matches::assert_matches; use linked_hash_set::LinkedHashSet; - use reth_db::{mdbx::test_utils::create_test_rw_db, transaction::DbTxMut, DatabaseEnv}; + use reth_db::{test_utils::create_test_rw_db, transaction::DbTxMut, DatabaseEnv}; use reth_interfaces::test_utils::TestConsensus; use reth_primitives::{ proofs::EMPTY_ROOT, stage::StageCheckpoint, ChainSpecBuilder, H256, MAINNET, diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 7dcbba6f68987..7f6c23bcf99e6 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -1381,7 +1381,7 @@ mod tests { config::BlockchainTreeConfig, externals::TreeExternals, post_state::PostState, BlockchainTree, ShareableBlockchainTree, }; - use reth_db::{mdbx::test_utils::create_test_rw_db, DatabaseEnv}; + use reth_db::{test_utils::create_test_rw_db, DatabaseEnv}; use reth_interfaces::{ sync::NoopSyncStateUpdater, test_utils::{NoopFullBlockClient, TestConsensus}, diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index ddc00a48185e5..e2a108ba9f96c 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -595,7 +595,7 @@ mod tests { }; use assert_matches::assert_matches; use futures_util::stream::StreamExt; - use reth_db::mdbx::{test_utils::create_test_db, EnvKind, WriteMap}; + use reth_db::test_utils::create_test_rw_db; use reth_interfaces::test_utils::{generators, generators::random_block_range, TestConsensus}; use reth_primitives::{BlockBody, H256}; use std::{collections::HashMap, sync::Arc}; @@ -605,7 +605,7 @@ mod tests { #[tokio::test] async fn streams_bodies_in_order() { // Generate some random blocks - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let (headers, mut bodies) = generate_bodies(0..=19); insert_headers(&db, &headers); @@ -632,7 +632,7 @@ mod tests { #[tokio::test] async fn requests_correct_number_of_times() { // Generate some random blocks - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let mut rng = generators::rng(); let blocks = random_block_range(&mut rng, 0..=199, H256::zero(), 1..2); @@ -665,7 +665,7 @@ mod tests { #[tokio::test] async fn streams_bodies_in_order_after_range_reset() { // Generate some random blocks - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let (headers, mut bodies) = generate_bodies(0..=99); insert_headers(&db, &headers); @@ -698,7 +698,7 @@ mod tests { #[tokio::test] async fn can_download_new_range_after_termination() { // Generate some random blocks - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let (headers, mut bodies) = generate_bodies(0..=199); insert_headers(&db, &headers); diff --git a/crates/net/downloaders/src/bodies/task.rs b/crates/net/downloaders/src/bodies/task.rs index 617312b0ada28..30aabe3a6ab14 100644 --- a/crates/net/downloaders/src/bodies/task.rs +++ b/crates/net/downloaders/src/bodies/task.rs @@ -179,7 +179,7 @@ mod tests { test_utils::{generate_bodies, TestBodiesClient}, }; use assert_matches::assert_matches; - use reth_db::mdbx::{test_utils::create_test_db, EnvKind, WriteMap}; + use reth_db::test_utils::create_test_rw_db; use reth_interfaces::{p2p::error::DownloadError, test_utils::TestConsensus}; use std::sync::Arc; @@ -187,7 +187,7 @@ mod tests { async fn download_one_by_one_on_task() { reth_tracing::init_test_tracing(); - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let (headers, mut bodies) = generate_bodies(0..=19); insert_headers(&db, &headers); @@ -216,7 +216,7 @@ mod tests { async fn set_download_range_error_returned() { reth_tracing::init_test_tracing(); - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let downloader = BodiesDownloaderBuilder::default().build( Arc::new(TestBodiesClient::default()), Arc::new(TestConsensus::default()), diff --git a/crates/net/downloaders/src/bodies/test_utils.rs b/crates/net/downloaders/src/bodies/test_utils.rs index 6aefe2e066ba8..45e5db1e3c178 100644 --- a/crates/net/downloaders/src/bodies/test_utils.rs +++ b/crates/net/downloaders/src/bodies/test_utils.rs @@ -1,12 +1,6 @@ #![allow(unused)] //! Test helper impls for generating bodies -use reth_db::{ - database::Database, - mdbx::{Env, WriteMap}, - tables, - transaction::DbTxMut, - DatabaseEnv, -}; +use reth_db::{database::Database, tables, transaction::DbTxMut, DatabaseEnv}; use reth_interfaces::{db, p2p::bodies::response::BlockResponse}; use reth_primitives::{Block, BlockBody, SealedBlock, SealedHeader, H256}; use std::collections::HashMap; diff --git a/crates/net/downloaders/src/test_utils/file_client.rs b/crates/net/downloaders/src/test_utils/file_client.rs index c1f8197767217..64aee95cd9bf3 100644 --- a/crates/net/downloaders/src/test_utils/file_client.rs +++ b/crates/net/downloaders/src/test_utils/file_client.rs @@ -258,7 +258,7 @@ mod tests { use assert_matches::assert_matches; use futures::SinkExt; use futures_util::stream::StreamExt; - use reth_db::mdbx::{test_utils::create_test_db, EnvKind, WriteMap}; + use reth_db::test_utils::create_test_rw_db; use reth_interfaces::{ p2p::{ bodies::downloader::BodyDownloader, @@ -278,7 +278,7 @@ mod tests { #[tokio::test] async fn streams_bodies_from_buffer() { // Generate some random blocks - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let (headers, mut bodies) = generate_bodies(0..=19); insert_headers(&db, &headers); @@ -336,7 +336,7 @@ mod tests { #[tokio::test] async fn test_download_headers_from_file() { // Generate some random blocks - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let (file, headers, mut bodies) = generate_bodies_file(0..=19).await; // now try to read them back @@ -361,7 +361,7 @@ mod tests { #[tokio::test] async fn test_download_bodies_from_file() { // Generate some random blocks - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let (file, headers, mut bodies) = generate_bodies_file(0..=19).await; // now try to read them back diff --git a/crates/staged-sync/src/utils/init.rs b/crates/staged-sync/src/utils/init.rs index b80554e142e57..e3c1d70b2acdc 100644 --- a/crates/staged-sync/src/utils/init.rs +++ b/crates/staged-sync/src/utils/init.rs @@ -178,9 +178,9 @@ mod tests { use super::*; use reth_db::{ - mdbx::test_utils::create_test_rw_db, models::{storage_sharded_key::StorageShardedKey, ShardedKey}, table::Table, + test_utils::create_test_rw_db, DatabaseEnv, }; use reth_primitives::{ diff --git a/crates/stages/src/lib.rs b/crates/stages/src/lib.rs index ef0a673ac5a88..a1694a6b7426a 100644 --- a/crates/stages/src/lib.rs +++ b/crates/stages/src/lib.rs @@ -26,7 +26,7 @@ //! //! ``` //! # use std::sync::Arc; -//! # use reth_db::mdbx::test_utils::create_test_rw_db; +//! # use reth_db::test_utils::create_test_rw_db; //! # use reth_downloaders::bodies::bodies::BodiesDownloaderBuilder; //! # use reth_downloaders::headers::reverse_headers::ReverseHeadersDownloaderBuilder; //! # use reth_interfaces::consensus::Consensus; diff --git a/crates/stages/src/pipeline/mod.rs b/crates/stages/src/pipeline/mod.rs index f5b089bfc0183..454fa377b22a7 100644 --- a/crates/stages/src/pipeline/mod.rs +++ b/crates/stages/src/pipeline/mod.rs @@ -480,7 +480,7 @@ mod tests { use super::*; use crate::{test_utils::TestStage, UnwindOutput}; use assert_matches::assert_matches; - use reth_db::mdbx::{self, test_utils, EnvKind}; + use reth_db::test_utils::create_test_rw_db; use reth_interfaces::{ consensus, provider::ProviderError, @@ -519,7 +519,7 @@ mod tests { /// Runs a simple pipeline. #[tokio::test] async fn run_pipeline() { - let db = test_utils::create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let mut pipeline = Pipeline::builder() .add_stage( @@ -574,7 +574,7 @@ mod tests { /// Unwinds a simple pipeline. #[tokio::test] async fn unwind_pipeline() { - let db = test_utils::create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let mut pipeline = Pipeline::builder() .add_stage( @@ -690,7 +690,7 @@ mod tests { /// Unwinds a pipeline with intermediate progress. #[tokio::test] async fn unwind_pipeline_with_intermediate_progress() { - let db = test_utils::create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let mut pipeline = Pipeline::builder() .add_stage( @@ -777,7 +777,7 @@ mod tests { /// - The pipeline finishes #[tokio::test] async fn run_pipeline_with_unwind() { - let db = test_utils::create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let mut pipeline = Pipeline::builder() .add_stage( @@ -871,7 +871,7 @@ mod tests { #[tokio::test] async fn pipeline_error_handling() { // Non-fatal - let db = test_utils::create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let mut pipeline = Pipeline::builder() .add_stage( TestStage::new(StageId::Other("NonFatal")) @@ -884,7 +884,7 @@ mod tests { assert_matches!(result, Ok(())); // Fatal - let db = test_utils::create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let mut pipeline = Pipeline::builder() .add_stage(TestStage::new(StageId::Other("Fatal")).add_exec(Err( StageError::DatabaseIntegrity(ProviderError::BlockBodyIndicesNotFound(5)), diff --git a/crates/stages/src/sets.rs b/crates/stages/src/sets.rs index ee4858eaa2d1b..f49714e0133e0 100644 --- a/crates/stages/src/sets.rs +++ b/crates/stages/src/sets.rs @@ -14,7 +14,7 @@ //! # use reth_stages::sets::{OfflineStages}; //! # use reth_revm::Factory; //! # use reth_primitives::MAINNET; -//! use reth_db::mdbx::test_utils::create_test_rw_db; +//! use reth_db::test_utils::create_test_rw_db; //! //! # let factory = Factory::new(MAINNET.clone()); //! # let db = create_test_rw_db(); diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 06440410c3dd3..e54e7a446d215 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -417,10 +417,7 @@ mod tests { use super::*; use crate::test_utils::TestTransaction; use assert_matches::assert_matches; - use reth_db::{ - mdbx::{test_utils::create_test_db, EnvKind, WriteMap}, - models::AccountBeforeTx, - }; + use reth_db::{models::AccountBeforeTx, test_utils::create_test_rw_db}; use reth_primitives::{ hex_literal::hex, keccak256, stage::StageUnitCheckpoint, Account, Bytecode, ChainSpecBuilder, SealedBlock, StorageEntry, H160, H256, MAINNET, U256, @@ -441,7 +438,7 @@ mod tests { #[test] fn execution_checkpoint_matches() { - let state_db = create_test_db::(EnvKind::RW); + let state_db = create_test_rw_db(); let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone()); let tx = factory.provider_rw().unwrap(); @@ -466,7 +463,7 @@ mod tests { #[test] fn execution_checkpoint_precedes() { - let state_db = create_test_db::(EnvKind::RW); + let state_db = create_test_rw_db(); let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); @@ -502,7 +499,7 @@ mod tests { #[test] fn execution_checkpoint_recalculate_full_previous_some() { - let state_db = create_test_db::(EnvKind::RW); + let state_db = create_test_rw_db(); let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); @@ -538,7 +535,7 @@ mod tests { #[test] fn execution_checkpoint_recalculate_full_previous_none() { - let state_db = create_test_db::(EnvKind::RW); + let state_db = create_test_rw_db(); let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); @@ -568,7 +565,7 @@ mod tests { async fn sanity_execution_of_block() { // TODO cleanup the setup after https://github.com/paradigmxyz/reth/issues/332 // is merged as it has similar framework - let state_db = create_test_db::(EnvKind::RW); + let state_db = create_test_rw_db(); let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); let input = ExecInput { @@ -678,7 +675,7 @@ mod tests { // TODO cleanup the setup after https://github.com/paradigmxyz/reth/issues/332 // is merged as it has similar framework - let state_db = create_test_db::(EnvKind::RW); + let state_db = create_test_rw_db(); let factory = ProviderFactory::new(state_db.as_ref(), MAINNET.clone()); let provider = factory.provider_rw().unwrap(); let input = ExecInput { diff --git a/crates/stages/src/stages/hashing_storage.rs b/crates/stages/src/stages/hashing_storage.rs index f7ee40661d281..88a0043dccd2f 100644 --- a/crates/stages/src/stages/hashing_storage.rs +++ b/crates/stages/src/stages/hashing_storage.rs @@ -233,7 +233,6 @@ mod tests { use rand::Rng; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, - mdbx::{tx::Tx, WriteMap, RW}, models::{BlockNumberAddress, StoredBlockBodyIndices}, }; use reth_interfaces::test_utils::{ @@ -621,9 +620,9 @@ mod tests { .map_err(|e| e.into()) } - fn insert_storage_entry( + fn insert_storage_entry<'a, TX: DbTxMut<'a>>( &self, - tx: &Tx<'_, RW, WriteMap>, + tx: &'a TX, tid_address: BlockNumberAddress, entry: StorageEntry, hash: bool, diff --git a/crates/stages/src/test_utils/runner.rs b/crates/stages/src/test_utils/runner.rs index e726948e1cc6d..0626ca084863c 100644 --- a/crates/stages/src/test_utils/runner.rs +++ b/crates/stages/src/test_utils/runner.rs @@ -1,9 +1,6 @@ use super::TestTransaction; use crate::{ExecInput, ExecOutput, Stage, StageError, UnwindInput, UnwindOutput}; -use reth_db::{ - mdbx::{Env, WriteMap}, - DatabaseEnv, -}; +use reth_db::DatabaseEnv; use reth_primitives::MAINNET; use reth_provider::ProviderFactory; use std::{borrow::Borrow, sync::Arc}; diff --git a/crates/stages/src/test_utils/test_db.rs b/crates/stages/src/test_utils/test_db.rs index ef2c04c5ae7fe..d5358c3027131 100644 --- a/crates/stages/src/test_utils/test_db.rs +++ b/crates/stages/src/test_utils/test_db.rs @@ -1,15 +1,12 @@ use reth_db::{ common::KeyValue, cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - mdbx::{ - test_utils::{create_test_db, create_test_db_with_path}, - tx::Tx, - Env, EnvKind, WriteMap, RO, RW, - }, + database::DatabaseGAT, models::{AccountBeforeTx, StoredBlockBodyIndices}, table::Table, tables, - transaction::{DbTx, DbTxMut}, + test_utils::create_test_rw_db, + transaction::{DbTx, DbTxGAT, DbTxMut, DbTxMutGAT}, DatabaseEnv, DatabaseError as DbError, }; use reth_primitives::{ @@ -34,7 +31,7 @@ use std::{ /// ``` #[derive(Debug)] pub struct TestTransaction { - /// WriteMap DB + /// DB pub tx: Arc, pub path: Option, pub factory: ProviderFactory>, @@ -43,14 +40,14 @@ pub struct TestTransaction { impl Default for TestTransaction { /// Create a new instance of [TestTransaction] fn default() -> Self { - let tx = create_test_db::(EnvKind::RW); + let tx = create_test_rw_db(); Self { tx: tx.clone(), path: None, factory: ProviderFactory::new(tx, MAINNET.clone()) } } } impl TestTransaction { pub fn new(path: &Path) -> Self { - let tx = create_test_db::(EnvKind::RW); + let tx = create_test_rw_db(); Self { tx: tx.clone(), path: Some(path.to_path_buf()), @@ -76,7 +73,7 @@ impl TestTransaction { /// Invoke a callback with transaction committing it afterwards pub fn commit(&self, f: F) -> Result<(), DbError> where - F: FnOnce(&Tx<'_, RW, WriteMap>) -> Result<(), DbError>, + F: FnOnce(&>::TXMut) -> Result<(), DbError>, { let mut tx = self.inner_rw(); f(tx.tx_ref())?; @@ -87,7 +84,7 @@ impl TestTransaction { /// Invoke a callback with a read transaction pub fn query(&self, f: F) -> Result where - F: FnOnce(&Tx<'_, RO, WriteMap>) -> Result, + F: FnOnce(&>::TX) -> Result, { f(self.inner().tx_ref()) } @@ -200,7 +197,10 @@ impl TestTransaction { } /// Inserts a single [SealedHeader] into the corresponding tables of the headers stage. - fn insert_header(tx: &Tx<'_, RW, WriteMap>, header: &SealedHeader) -> Result<(), DbError> { + fn insert_header<'a, TX: DbTxMut<'a> + DbTx<'a>>( + tx: &'a TX, + header: &SealedHeader, + ) -> Result<(), DbError> { tx.put::(header.number, header.hash())?; tx.put::(header.hash(), header.number)?; tx.put::(header.number, header.clone().unseal()) diff --git a/crates/storage/db/benches/criterion.rs b/crates/storage/db/benches/criterion.rs index 3780917087655..c1a078f2ceb46 100644 --- a/crates/storage/db/benches/criterion.rs +++ b/crates/storage/db/benches/criterion.rs @@ -131,7 +131,10 @@ where || { // Reset DB let _ = std::fs::remove_dir_all(bench_db_path); - (input.clone(), create_test_db_with_path::(EnvKind::RW, bench_db_path)) + ( + input.clone(), + Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap(), + ) }, |(input, db)| { // Create TX @@ -154,7 +157,7 @@ where || { // Reset DB let _ = std::fs::remove_dir_all(bench_db_path); - (input, create_test_db_with_path::(EnvKind::RW, bench_db_path)) + (input, Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap()) }, |(input, db)| { // Create TX @@ -225,7 +228,10 @@ where || { // Reset DB let _ = std::fs::remove_dir_all(bench_db_path); - (input.clone(), create_test_db_with_path::(EnvKind::RW, bench_db_path)) + ( + input.clone(), + Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap(), + ) }, |(input, db)| { // Create TX @@ -249,7 +255,7 @@ where // Reset DB let _ = std::fs::remove_dir_all(bench_db_path); - (input, create_test_db_with_path::(EnvKind::RW, bench_db_path)) + (input, Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap()) }, |(input, db)| { // Create TX diff --git a/crates/storage/db/benches/hash_keys.rs b/crates/storage/db/benches/hash_keys.rs index 7b8fe3dc1ebfc..a08547f94a63a 100644 --- a/crates/storage/db/benches/hash_keys.rs +++ b/crates/storage/db/benches/hash_keys.rs @@ -12,7 +12,6 @@ use proptest::{ }; use reth_db::{ cursor::{DbCursorRW, DbDupCursorRO, DbDupCursorRW}, - mdbx::Env, TxHashNumber, }; use std::{collections::HashSet, time::Instant}; @@ -86,7 +85,7 @@ where let setup = || { // Reset DB let _ = std::fs::remove_dir_all(bench_db_path); - let db = create_test_db_with_path::(EnvKind::RW, bench_db_path); + let db = Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap(); let mut unsorted_input = unsorted_input.clone(); if scenario_str == "append_all" { diff --git a/crates/storage/db/benches/utils.rs b/crates/storage/db/benches/utils.rs index 30f8f6ac49853..362e48eda8d20 100644 --- a/crates/storage/db/benches/utils.rs +++ b/crates/storage/db/benches/utils.rs @@ -1,12 +1,12 @@ -use reth_db::DatabaseEnv; #[allow(unused_imports)] use reth_db::{ database::Database, - mdbx::{test_utils::create_test_db_with_path, EnvKind, WriteMap}, table::*, + test_utils::create_test_rw_db_with_path, transaction::{DbTx, DbTxMut}, + DatabaseEnv, }; -use std::path::Path; +use std::{path::Path, sync::Arc}; /// Path where the DB is initialized for benchmarks. #[allow(unused)] @@ -60,7 +60,7 @@ where { // Reset DB let _ = std::fs::remove_dir_all(bench_db_path); - let db = create_test_db_with_path::(EnvKind::RW, bench_db_path); + let db = Arc::try_unwrap(create_test_rw_db_with_path(bench_db_path)).unwrap(); { // Prepare data to be read diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 219a4336a6c56..4bf66c8b42095 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -124,56 +124,38 @@ impl Deref for Env { } } -/// Collection of database test utilities -#[cfg(any(test, feature = "test-utils"))] -pub mod test_utils { - use super::*; - use reth_libmdbx::WriteMap; - use std::sync::Arc; - - /// Error during database creation - pub const ERROR_DB_CREATION: &str = "Not able to create the mdbx file."; - /// Error during table creation - pub const ERROR_TABLE_CREATION: &str = "Not able to create tables in the database."; - /// Error during tempdir creation - pub const ERROR_TEMPDIR: &str = "Not able to create a temporary directory."; - - /// Create rw database for testing - pub fn create_test_rw_db() -> Arc> { - create_test_db(EnvKind::RW) - } - /// Create database for testing - pub fn create_test_db(kind: EnvKind) -> Arc> { - Arc::new(create_test_db_with_path( - kind, - &tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), - )) - } - - /// Create database for testing with specified path - pub fn create_test_db_with_path(kind: EnvKind, path: &Path) -> Env { - let env = Env::::open(path, kind).expect(ERROR_DB_CREATION); - env.create_tables().expect(ERROR_TABLE_CREATION); - env - } -} - #[cfg(test)] mod tests { - use super::{test_utils, Env, EnvKind}; + use super::*; use crate::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO, DbDupCursorRW, ReverseWalker, Walker}, database::Database, models::{AccountBeforeTx, ShardedKey}, tables::{AccountHistory, CanonicalHeaders, Headers, PlainAccountState, PlainStorageState}, + test_utils::*, transaction::{DbTx, DbTxMut}, AccountChangeSet, DatabaseError, }; use reth_libmdbx::{NoWriteMap, WriteMap}; use reth_primitives::{Account, Address, Header, IntegerList, StorageEntry, H160, H256, U256}; - use std::{str::FromStr, sync::Arc}; + use std::{path::Path, str::FromStr, sync::Arc}; use tempfile::TempDir; + /// Create database for testing + fn create_test_db(kind: EnvKind) -> Arc> { + Arc::new(create_test_db_with_path( + kind, + &tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(), + )) + } + + /// Create database for testing with specified path + fn create_test_db_with_path(kind: EnvKind, path: &Path) -> Env { + let env = Env::::open(path, kind).expect(ERROR_DB_CREATION); + env.create_tables().expect(ERROR_TABLE_CREATION); + env + } + const ERROR_DB_CREATION: &str = "Not able to create the mdbx file."; const ERROR_PUT: &str = "Not able to insert value into table."; const ERROR_APPEND: &str = "Not able to append the value to the table."; @@ -186,12 +168,12 @@ mod tests { #[test] fn db_creation() { - test_utils::create_test_db::(EnvKind::RW); + create_test_db::(EnvKind::RW); } #[test] fn db_manual_put_get() { - let env = test_utils::create_test_db::(EnvKind::RW); + let env = create_test_db::(EnvKind::RW); let value = Header::default(); let key = 1u64; @@ -210,7 +192,7 @@ mod tests { #[test] fn db_cursor_walk() { - let env = test_utils::create_test_db::(EnvKind::RW); + let env = create_test_db::(EnvKind::RW); let value = Header::default(); let key = 1u64; @@ -235,7 +217,7 @@ mod tests { #[test] fn db_cursor_walk_range() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT (0, 0), (1, 0), (2, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -299,7 +281,7 @@ mod tests { #[test] fn db_cursor_walk_range_on_dup_table() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); let address0 = Address::zero(); let address1 = Address::from_low_u64_be(1); @@ -341,7 +323,7 @@ mod tests { #[allow(clippy::reversed_empty_ranges)] #[test] fn db_cursor_walk_range_invalid() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT (0, 0), (1, 0), (2, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -369,7 +351,7 @@ mod tests { #[test] fn db_walker() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT (0, 0), (1, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -399,7 +381,7 @@ mod tests { #[test] fn db_reverse_walker() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT (0, 0), (1, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -429,7 +411,7 @@ mod tests { #[test] fn db_walk_back() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT (0, 0), (1, 0), (3, 0) let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -468,7 +450,7 @@ mod tests { #[test] fn db_cursor_seek_exact_or_previous_key() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -494,7 +476,7 @@ mod tests { #[test] fn db_cursor_insert() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -528,7 +510,7 @@ mod tests { #[test] fn db_cursor_insert_dup() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut dup_cursor = tx.cursor_dup_write::().unwrap(); @@ -546,7 +528,7 @@ mod tests { #[test] fn db_cursor_delete_current_non_existent() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); let tx = db.tx_mut().expect(ERROR_INIT_TX); let key1 = Address::from_low_u64_be(1); @@ -574,7 +556,7 @@ mod tests { #[test] fn db_cursor_insert_wherever_cursor_is() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); let tx = db.tx_mut().expect(ERROR_INIT_TX); // PUT @@ -607,7 +589,7 @@ mod tests { #[test] fn db_cursor_append() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -634,7 +616,7 @@ mod tests { #[test] fn db_cursor_append_failure() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); // PUT let tx = db.tx_mut().expect(ERROR_INIT_TX); @@ -662,7 +644,7 @@ mod tests { #[test] fn db_cursor_upsert() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); let tx = db.tx_mut().expect(ERROR_INIT_TX); let mut cursor = tx.cursor_write::().unwrap(); @@ -697,7 +679,7 @@ mod tests { #[test] fn db_cursor_dupsort_append() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); let transition_id = 2; @@ -743,7 +725,7 @@ mod tests { #[test] fn db_closure_put_get() { - let path = TempDir::new().expect(test_utils::ERROR_TEMPDIR).into_path(); + let path = TempDir::new().expect(ERROR_TEMPDIR).into_path(); let value = Account { nonce: 18446744073709551615, @@ -754,7 +736,7 @@ mod tests { .expect(ERROR_ETH_ADDRESS); { - let env = test_utils::create_test_db_with_path::(EnvKind::RW, &path); + let env = create_test_db_with_path::(EnvKind::RW, &path); // PUT let result = env.update(|tx| { @@ -775,7 +757,7 @@ mod tests { #[test] fn db_dup_sort() { - let env = test_utils::create_test_db::(EnvKind::RW); + let env = create_test_db::(EnvKind::RW); let key = Address::from_str("0xa2c122be93b0074270ebee7f6b7292c7deb45047") .expect(ERROR_ETH_ADDRESS); @@ -819,7 +801,7 @@ mod tests { #[test] fn db_iterate_over_all_dup_values() { - let env = test_utils::create_test_db::(EnvKind::RW); + let env = create_test_db::(EnvKind::RW); let key1 = Address::from_str("0x1111111111111111111111111111111111111111") .expect(ERROR_ETH_ADDRESS); let key2 = Address::from_str("0x2222222222222222222222222222222222222222") @@ -865,7 +847,7 @@ mod tests { #[test] fn dup_value_with_same_subkey() { - let env = test_utils::create_test_db::(EnvKind::RW); + let env = create_test_db::(EnvKind::RW); let key1 = H160([0x11; 20]); let key2 = H160([0x22; 20]); @@ -908,7 +890,7 @@ mod tests { #[test] fn db_sharded_key() { - let db: Arc> = test_utils::create_test_db(EnvKind::RW); + let db: Arc> = create_test_db(EnvKind::RW); let real_key = Address::from_str("0xa2c122be93b0074270ebee7f6b7292c7deb45047").unwrap(); for i in 1..5 { diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 8ce4973f4ab83..511d5d2ac1c0b 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -89,16 +89,23 @@ pub use tables::*; pub use utils::is_database_empty; #[cfg(feature = "mdbx")] -use mdbx::{Env, EnvKind, WriteMap}; +use mdbx::{Env, EnvKind, NoWriteMap, WriteMap}; #[cfg(feature = "mdbx")] -/// Alias type for the database engine in use. +/// Alias type for the database environment in use. Read/Write mode. pub type DatabaseEnv = Env; -/// Opens up an existing database or creates a new one at the specified path. -pub fn init_db>(path: P) -> eyre::Result { +#[cfg(feature = "mdbx")] +/// Alias type for the database engine in use. Read only mode. +pub type DatabaseEnvRO = Env; + +use eyre::WrapErr; +use std::path::Path; + +/// Opens up an existing database or creates a new one at the specified path. Creates tables if +/// necessary. Read/Write mode. +pub fn init_db>(path: P) -> eyre::Result { use crate::version::{check_db_version_file, create_db_version_file, DatabaseVersionError}; - use eyre::WrapErr; let rpath = path.as_ref(); if is_database_empty(rpath) { @@ -124,6 +131,71 @@ pub fn init_db>(path: P) -> eyre::Result } } +/// Opens up an existing database. Read only mode. It doesn't create it or create tables if missing. +pub fn open_db_read_only(path: &Path) -> eyre::Result { + #[cfg(feature = "mdbx")] + { + Env::::open(path, mdbx::EnvKind::RO) + .with_context(|| format!("Could not open database at path: {}", path.display())) + } + #[cfg(not(feature = "mdbx"))] + { + unimplemented!(); + } +} + +/// Opens up an existing database. Read/Write mode. It doesn't create it or create tables if +/// missing. +pub fn open_db(path: &Path) -> eyre::Result { + #[cfg(feature = "mdbx")] + { + Env::::open(path, mdbx::EnvKind::RW) + .with_context(|| format!("Could not open database at path: {}", path.display())) + } + #[cfg(not(feature = "mdbx"))] + { + unimplemented!(); + } +} + +/// Collection of database test utilities +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils { + use super::*; + use std::sync::Arc; + + /// Error during database open + pub const ERROR_DB_OPEN: &str = "Not able to open the database file."; + /// Error during database creation + pub const ERROR_DB_CREATION: &str = "Not able to create the database file."; + /// Error during table creation + pub const ERROR_TABLE_CREATION: &str = "Not able to create tables in the database."; + /// Error during tempdir creation + pub const ERROR_TEMPDIR: &str = "Not able to create a temporary directory."; + + /// Create read/write database for testing + pub fn create_test_rw_db() -> Arc { + Arc::new( + init_db(tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path()) + .expect(ERROR_DB_CREATION), + ) + } + + /// Create read/write database for testing + pub fn create_test_rw_db_with_path>(path: P) -> Arc { + Arc::new(init_db(path.as_ref()).expect(ERROR_DB_CREATION)) + } + + /// Create read only database for testing + pub fn create_test_ro_db() -> Arc { + let path = tempfile::TempDir::new().expect(ERROR_TEMPDIR).into_path(); + { + init_db(path.as_path()).expect(ERROR_DB_CREATION); + } + Arc::new(open_db_read_only(path.as_path()).expect(ERROR_DB_OPEN)) + } +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/storage/provider/src/post_state/mod.rs b/crates/storage/provider/src/post_state/mod.rs index 1ab52b1b93473..d16d2db07a27f 100644 --- a/crates/storage/provider/src/post_state/mod.rs +++ b/crates/storage/provider/src/post_state/mod.rs @@ -223,10 +223,10 @@ impl PostState { /// ``` /// use reth_primitives::{Address, Account}; /// use reth_provider::PostState; - /// use reth_db::{mdbx::{EnvKind, WriteMap, test_utils::create_test_db}, database::Database}; + /// use reth_db::{test_utils::create_test_rw_db, database::Database}; /// /// // Initialize the database - /// let db = create_test_db::(EnvKind::RW); + /// let db = create_test_rw_db(); /// /// // Initialize the post state /// let mut post_state = PostState::new(); @@ -642,10 +642,7 @@ mod tests { use super::*; use crate::{AccountReader, ProviderFactory}; use reth_db::{ - database::Database, - mdbx::{test_utils, EnvKind}, - transaction::DbTx, - DatabaseEnv, + database::Database, test_utils::create_test_rw_db, transaction::DbTx, DatabaseEnv, }; use reth_primitives::{proofs::EMPTY_ROOT, MAINNET}; use reth_trie::test_utils::state_root; @@ -1067,7 +1064,7 @@ mod tests { #[test] fn write_to_db_account_info() { - let db: Arc = test_utils::create_test_db(EnvKind::RW); + let db: Arc = create_test_rw_db(); let factory = ProviderFactory::new(db, MAINNET.clone()); let provider = factory.provider_rw().unwrap(); @@ -1136,7 +1133,7 @@ mod tests { #[test] fn write_to_db_storage() { - let db: Arc = test_utils::create_test_db(EnvKind::RW); + let db: Arc = create_test_rw_db(); let tx = db.tx_mut().expect("Could not get database tx"); let mut post_state = PostState::new(); @@ -1272,7 +1269,7 @@ mod tests { #[test] fn write_to_db_multiple_selfdestructs() { - let db: Arc = test_utils::create_test_db(EnvKind::RW); + let db: Arc = create_test_rw_db(); let tx = db.tx_mut().expect("Could not get database tx"); let address1 = Address::random(); @@ -1821,7 +1818,7 @@ mod tests { #[test] fn empty_post_state_state_root() { - let db: Arc = test_utils::create_test_db(EnvKind::RW); + let db: Arc = create_test_rw_db(); let tx = db.tx().unwrap(); let post_state = PostState::new(); @@ -1840,7 +1837,7 @@ mod tests { }) .collect(); - let db: Arc = test_utils::create_test_db(EnvKind::RW); + let db: Arc = create_test_rw_db(); // insert initial state to the database db.update(|tx| { diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index acfdff010c37f..5170f4b21dc5d 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -348,10 +348,7 @@ mod tests { use super::ProviderFactory; use crate::{BlockHashReader, BlockNumReader}; use reth_db::{ - mdbx::{ - test_utils::{create_test_db, ERROR_TEMPDIR}, - EnvKind, WriteMap, - }, + test_utils::{create_test_rw_db, ERROR_TEMPDIR}, DatabaseEnv, }; use reth_primitives::{ChainSpecBuilder, H256}; @@ -360,7 +357,7 @@ mod tests { #[test] fn common_history_provider() { let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let provider = ProviderFactory::new(db, Arc::new(chain_spec)); let _ = provider.latest(); } @@ -368,7 +365,7 @@ mod tests { #[test] fn default_chain_info() { let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let factory = ProviderFactory::new(db, Arc::new(chain_spec)); let provider = factory.provider().unwrap(); @@ -380,7 +377,7 @@ mod tests { #[test] fn provider_flow() { let chain_spec = ChainSpecBuilder::mainnet().build(); - let db = create_test_db::(EnvKind::RW); + let db = create_test_rw_db(); let factory = ProviderFactory::new(db, Arc::new(chain_spec)); let provider = factory.provider().unwrap(); provider.block_hash(0).unwrap(); diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index 048d523297ce6..f62ab7b98c17b 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -223,9 +223,9 @@ mod tests { }; use reth_db::{ database::Database, - mdbx::test_utils::create_test_rw_db, models::{storage_sharded_key::StorageShardedKey, AccountBeforeTx, ShardedKey}, tables, + test_utils::create_test_rw_db, transaction::{DbTx, DbTxMut}, BlockNumberList, }; diff --git a/crates/storage/provider/src/transaction.rs b/crates/storage/provider/src/transaction.rs index 0d3ef4453d253..2ce2643c3b24d 100644 --- a/crates/storage/provider/src/transaction.rs +++ b/crates/storage/provider/src/transaction.rs @@ -7,9 +7,9 @@ use std::fmt::Debug; mod test { use crate::{test_utils::blocks::*, ProviderFactory, TransactionsProvider}; use reth_db::{ - mdbx::test_utils::create_test_rw_db, models::{storage_sharded_key::StorageShardedKey, ShardedKey}, tables, + test_utils::create_test_rw_db, }; use reth_primitives::{ChainSpecBuilder, IntegerList, H160, MAINNET, U256}; use std::sync::Arc; diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index 9dfe933de7539..ed04570df87c1 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -372,7 +372,7 @@ where mod tests { use super::*; use proptest::prelude::*; - use reth_db::{database::Database, mdbx::test_utils::create_test_rw_db, transaction::DbTxMut}; + use reth_db::{database::Database, test_utils::create_test_rw_db, transaction::DbTxMut}; fn assert_account_cursor_order<'a, 'b>( factory: &'a impl HashedCursorFactory<'b>, diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index 679d98a764b7a..9ec5ff4cfde73 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -514,8 +514,8 @@ mod tests { use proptest::{prelude::ProptestConfig, proptest}; use reth_db::{ cursor::{DbCursorRO, DbCursorRW, DbDupCursorRO}, - mdbx::test_utils::create_test_rw_db, tables, + test_utils::create_test_rw_db, transaction::DbTxMut, DatabaseEnv, }; diff --git a/crates/trie/src/trie_cursor/account_cursor.rs b/crates/trie/src/trie_cursor/account_cursor.rs index e34b074ef645e..f7f3bd759f322 100644 --- a/crates/trie/src/trie_cursor/account_cursor.rs +++ b/crates/trie/src/trie_cursor/account_cursor.rs @@ -42,8 +42,8 @@ mod tests { use super::*; use reth_db::{ cursor::{DbCursorRO, DbCursorRW}, - mdbx::test_utils::create_test_rw_db, tables, + test_utils::create_test_rw_db, transaction::DbTxMut, }; use reth_primitives::{hex_literal::hex, MAINNET}; diff --git a/crates/trie/src/trie_cursor/storage_cursor.rs b/crates/trie/src/trie_cursor/storage_cursor.rs index 800691d1c6457..8929e09ae286e 100644 --- a/crates/trie/src/trie_cursor/storage_cursor.rs +++ b/crates/trie/src/trie_cursor/storage_cursor.rs @@ -58,7 +58,7 @@ mod tests { use super::*; use reth_db::{ - cursor::DbCursorRW, mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut, + cursor::DbCursorRW, tables, test_utils::create_test_rw_db, transaction::DbTxMut, }; use reth_primitives::{ trie::{BranchNodeCompact, StorageTrieEntry}, diff --git a/crates/trie/src/walker.rs b/crates/trie/src/walker.rs index f10c1cee22755..b8290109c444f 100644 --- a/crates/trie/src/walker.rs +++ b/crates/trie/src/walker.rs @@ -260,7 +260,7 @@ mod tests { use super::*; use crate::trie_cursor::{AccountTrieCursor, StorageTrieCursor}; use reth_db::{ - cursor::DbCursorRW, mdbx::test_utils::create_test_rw_db, tables, transaction::DbTxMut, + cursor::DbCursorRW, tables, test_utils::create_test_rw_db, transaction::DbTxMut, }; use reth_primitives::{trie::StorageTrieEntry, MAINNET}; use reth_provider::ProviderFactory; diff --git a/testing/ef-tests/src/cases/blockchain_test.rs b/testing/ef-tests/src/cases/blockchain_test.rs index 16d609ea440e6..1bb4b37a06f67 100644 --- a/testing/ef-tests/src/cases/blockchain_test.rs +++ b/testing/ef-tests/src/cases/blockchain_test.rs @@ -4,7 +4,7 @@ use crate::{ models::{BlockchainTest, ForkSpec, RootOrState}, Case, Error, Suite, }; -use reth_db::mdbx::test_utils::create_test_rw_db; +use reth_db::test_utils::create_test_rw_db; use reth_primitives::{BlockBody, SealedBlock}; use reth_provider::{BlockWriter, ProviderFactory}; use reth_rlp::Decodable; From 3dcc76e2b0aba962dd130934f698231250ef5ae6 Mon Sep 17 00:00:00 2001 From: Matthias Seitz Date: Fri, 30 Jun 2023 02:44:11 +0200 Subject: [PATCH 15/15] fix: use correct address for log (#3481) --- .../revm/revm-inspectors/src/tracing/types.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/crates/revm/revm-inspectors/src/tracing/types.rs b/crates/revm/revm-inspectors/src/tracing/types.rs index feb3524e4135a..8809de608550e 100644 --- a/crates/revm/revm-inspectors/src/tracing/types.rs +++ b/crates/revm/revm-inspectors/src/tracing/types.rs @@ -35,6 +35,11 @@ impl CallKind { pub fn is_any_create(&self) -> bool { matches!(self, CallKind::Create | CallKind::Create2) } + + /// Returns true if the call is a delegate of some sorts + pub fn is_delegate(&self) -> bool { + matches!(self, CallKind::DelegateCall | CallKind::CallCode) + } } impl std::fmt::Display for CallKind { @@ -204,6 +209,17 @@ pub(crate) struct CallTraceNode { } impl CallTraceNode { + /// Returns the call context's execution address + /// + /// See `Inspector::call` impl of [TracingInspector](crate::tracing::TracingInspector) + pub(crate) fn execution_address(&self) -> Address { + if self.trace.kind.is_delegate() { + self.trace.caller + } else { + self.trace.address + } + } + /// Pushes all steps onto the stack in reverse order /// so that the first step is on top of the stack pub(crate) fn push_steps_on_stack<'a>( @@ -393,7 +409,7 @@ impl CallTraceNode { .logs .iter() .map(|log| CallLogFrame { - address: Some(self.trace.address), + address: Some(self.execution_address()), topics: Some(log.topics.clone()), data: Some(log.data.clone().into()), })