diff --git a/README.md b/README.md index 744e1523..ac68bb7f 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ [![CI](https://github.com/HerodotusDev/hdp/actions/workflows/ci.yml/badge.svg)](https://github.com/HerodotusDev/hdp/actions/workflows/ci.yml) -HDP stands for Herodotus Data Processor, which able to process range of block data and retrieve valid value from proving ZK-STARK proof. CLI is mainly used for process human readable request to Cairo-Program acceptable format file. Additionally some useful features supported for develop. +HDP stands for Herodotus Data Processor, which is able to process a range of block data and retrieve valid values from proving ZK-STARK proof. CLI is mainly used for processing human-readable requests to Cairo-Program acceptable format files. Additionally, some useful features are supported for development. ## Supported Features @@ -20,20 +20,11 @@ HDP stands for Herodotus Data Processor, which able to process range of block da - [x] Compile datalake 1: Fetch relevant header data and proofs from Herodotus Indexer - [x] Compile datalake 2: Fetch relevant account and storage data and proofs from RPC provider - [x] Compute aggregated function (ex. `SUM`, `AVG`) over compiled datalake result -- [x] Return general ( human readable ) and cairo formatted ( all chunked with felt size ) file - -## HDP Support - -Note : `SUM` and `AVG` expect to get number as input. - -| | SUM | AVG | -| ---------------------------- | --- | --- | -| account.nonce | ✅ | ✅ | -| account.balance | ✅ | ✅ | -| account.storage_root | - | - | -| account.code_hash | - | - | -| storage.key ( value is num ) | ✅ | ✅ | -| storage.key (value is hash ) | - | - | +- [x] Return general ( human-readable ) and Cairo formatted ( all chunked with felt size ) file +- [x] Support multi tasks process, with [Standard Merkle Tree](https://github.com/rkdud007/alloy-merkle-tree/blob/main/src/standard_binary_tree.rs) aggregation +- [ ] Support more datalake types: DynamicLayoutDatalake, TransactionsBySenderDatalake ... etc +- [ ] Multichain support +- [ ] Support More ZKVM as a backend option ([CAIRO](https://eprint.iacr.org/2021/1063), [RISC0](https://github.com/risc0/risc0), [SP1](https://github.com/succinctlabs/sp1)... etc) ## Install HDP @@ -54,7 +45,7 @@ Note : `SUM` and `AVG` expect to get number as input. ❯ git clone https://github.com/HerodotusDev/hdp.git # install hdp -❯ cargo install --path cli +❯ cargo install --path cli -f # Run the HDP ❯ hdp run --help @@ -88,7 +79,7 @@ Support passing argument as env variable or as arguments. hdp run # run herodotus data processing -hdp run 0x0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000018000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000060617667000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006073756d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000606d696e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000606d6178000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000 0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000000280000000000000000000000000000000000000000000000000000000000000038000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009eb0f600000000000000000000000000000000000000000000000000000000009eb100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002010f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009eb0f600000000000000000000000000000000000000000000000000000000009eb100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002010f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009eb0f600000000000000000000000000000000000000000000000000000000009eb100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002010f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000009eb0f600000000000000000000000000000000000000000000000000000000009eb100000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000002010f000000000000000000000000000000000000000000000000000000000000 https://eth-goerli.g.alchemy.com/v2/wTjM2yJBF9bitPNwk5ZGvSkwIKWtuuqm +hdp run 0x000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006073756d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000 0x00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004b902400000000000000000000000000000000000000000000000000000000004b9027000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000016027f2c6f930306d3aa736b3a6c6a98f512f74036d40000000000000000000000 ${Input your RPC Provider -- this example is Etherum Sepolia} ``` @@ -119,12 +110,33 @@ Options: -V, --version Print version ``` -Generate encoded task and datalake for testing purpose. The format is same as what smart contract emits (consider as batched tasks and datalakes). +Generate encoded tasks and datalakes for testing purposes. The format is the same as what smart contract emits (considered as batched tasks and datalakes). ### Encode -```bash -# e.g. hdp encode "avg" -b 10399900 10400000 "header.base_fee_per_gas" 1 +some examples: + +Header value with `AVG` + +``` +hdp encode "avg" -b 4952100 4952110 "header.base_fee_per_gas" 1 +``` + +Account value with `SUM` + +``` +hdp encode "sum" -b 4952100 4952110 "account.0x7f2c6f930306d3aa736b3a6c6a98f512f74036d4.nonce" 2 +``` + +Storage value with `AVG` + +``` +hdp encode "avg" -b 5382810 5382820 "storage.0x75CeC1db9dCeb703200EAa6595f66885C962B920.0x0000000000000000000000000000000000000000000000000000000000000002" 1 +``` + +Check out the encode command for how to generate the encoded value of the targeted task and its corresponding datalake: + +```console ❯ hdp help encode Encode the task and data lake in batched format test purposes @@ -144,7 +156,7 @@ Options: ### Decode -```bash +```console ❯ hdp help decode Decode batch tasks and data lakes @@ -166,7 +178,7 @@ Options: ### Decode non-batched format -```bash +```console ❯ hdp help decode-one Decode one task and one data lake (not batched format) diff --git a/cli/src/main.rs b/cli/src/main.rs index 33ce18ce..2635ef14 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -25,19 +25,32 @@ struct Cli { #[derive(Debug, Subcommand)] enum Commands { - /// Encode the task and data lake in batched format test purposes + /// Encode the task and datalake in batched format test purposes #[command(arg_required_else_help = true)] Encode { + /// Decide if want to run evaluator as follow step or not (default: false) + #[arg(short, long, action = clap::ArgAction::SetTrue)] + allow_run: bool, + /// The aggregate function id e.g. "sum", "min", "avg" aggregate_fn_id: String, /// The aggregate function context. It depends on the aggregate function aggregate_fn_ctx: Option, #[command(subcommand)] command: DataLakeCommands, + + /// The RPC URL to fetch the data + rpc_url: Option, + /// Path to the file to save the output result + #[arg(short, long)] + output_file: Option, + /// Path to the file to save the input.json in cairo format + #[arg(short, long)] + cairo_input: Option, }, - /// Decode batch tasks and data lakes + /// Decode batch tasks and datalakes /// - /// Note: Batch tasks and data lakes should be encoded in bytes[] format + /// Note: Batch tasks and datalakes should be encoded in bytes[] format #[command(arg_required_else_help = true)] Decode { /// Batched tasks bytes @@ -46,7 +59,7 @@ enum Commands { datalakes: String, }, - /// Decode one task and one data lake (not batched format) + /// Decode one task and one datalake (not batched format) #[command(arg_required_else_help = true)] DecodeOne { task: String, datalake: String }, /// Run the evaluator @@ -82,13 +95,55 @@ enum DataLakeCommands { }, } +async fn handle_run( + tasks: Option, + datalakes: Option, + rpc_url: Option, + output_file: Option, + cairo_input: Option, +) { + let start_run = std::time::Instant::now(); + let config = Config::init(rpc_url, datalakes, tasks).await; + let abstract_fetcher = AbstractFetcher::new(config.rpc_url.clone()); + let tasks = tasks_decoder(config.tasks.clone()).unwrap(); + let datalakes = datalakes_decoder(config.datalakes.clone()).unwrap(); + + println!("tasks: \n{:?}\n", tasks); + println!("datalakes: \n{:?}\n", datalakes); + + if tasks.len() != datalakes.len() { + panic!("Tasks and datalakes must have the same length"); + } + + let res = evaluator( + tasks, + Some(datalakes), + Arc::new(RwLock::new(abstract_fetcher)), + ) + .await + .unwrap(); + + let duration_run = start_run.elapsed(); + println!("Time elapsed in run evaluator is: {:?}", duration_run); + + if let Some(output_file) = output_file { + res.save_to_file(&output_file, false).unwrap(); + } + if let Some(cairo_input) = cairo_input { + res.save_to_file(&cairo_input, true).unwrap(); + } +} + #[tokio::main] async fn main() { - let start = std::time::Instant::now(); let cli = Cli::parse(); dotenv::dotenv().ok(); match cli.command { Commands::Encode { + allow_run, + rpc_url, + output_file, + cairo_input, aggregate_fn_id, aggregate_fn_ctx, command, @@ -118,6 +173,18 @@ async fn main() { println!("Original task: \n{:?}\n", tasks); let encoded_task = tasks_encoder(vec![tasks]).unwrap(); println!("Encoded task: \n{}\n", encoded_task); + + // if allow_run is true, then run the evaluator + if allow_run { + handle_run( + Some(encoded_task), + Some(encoded_datalake), + rpc_url, + output_file, + cairo_input, + ) + .await; + } } Commands::Decode { tasks, datalakes } => { let datalakes = datalakes_decoder(datalakes.clone()).unwrap(); @@ -144,35 +211,7 @@ async fn main() { output_file, cairo_input, } => { - let config = Config::init(rpc_url, datalakes, tasks).await; - let abstract_fetcher = AbstractFetcher::new(config.rpc_url.clone()); - let tasks = tasks_decoder(config.tasks.clone()).unwrap(); - let datalakes = datalakes_decoder(config.datalakes.clone()).unwrap(); - - println!("tasks: \n{:?}\n", tasks); - println!("datalakes: \n{:?}\n", datalakes); - - if tasks.len() != datalakes.len() { - panic!("Tasks and datalakes must have the same length"); - } - - let res = evaluator( - tasks, - Some(datalakes), - Arc::new(RwLock::new(abstract_fetcher)), - ) - .await - .unwrap(); - - let duration = start.elapsed(); - println!("Time elapsed in main() is: {:?}", duration); - - if let Some(output_file) = output_file { - res.save_to_file(&output_file, false).unwrap(); - } - if let Some(cairo_input) = cairo_input { - res.save_to_file(&cairo_input, true).unwrap(); - } + handle_run(tasks, datalakes, rpc_url, output_file, cairo_input).await; } } } diff --git a/crates/common/src/codec.rs b/crates/common/src/codec.rs index 550cb744..6ca3c937 100644 --- a/crates/common/src/codec.rs +++ b/crates/common/src/codec.rs @@ -5,7 +5,7 @@ use crate::{ task::ComputationalTask, }; use alloy_dyn_abi::{DynSolType, DynSolValue}; -use alloy_primitives::hex::{self, FromHex}; +use alloy_primitives::hex::FromHex; use anyhow::{bail, Ok, Result}; /// Decode a batch of tasks @@ -100,8 +100,7 @@ pub fn datalakes_encoder(datalakes: Vec) -> Result { let array_encoded_datalakes = DynSolValue::Array(encoded_datalakes); let encoded_datalakes = array_encoded_datalakes.abi_encode(); - let hex_string = hex::encode(encoded_datalakes); - Ok(format!("0x{}", hex_string)) + Ok(bytes_to_hex_string(&encoded_datalakes)) } /// Encode batch of tasks @@ -116,6 +115,5 @@ pub fn tasks_encoder(tasks: Vec) -> Result { let array_encoded_tasks = DynSolValue::Array(encoded_tasks); let encoded_tasks = array_encoded_tasks.abi_encode(); - let hex_string = hex::encode(encoded_tasks); - Ok(format!("0x{}", hex_string)) + Ok(bytes_to_hex_string(&encoded_tasks)) } diff --git a/crates/common/src/compiler/block_sampled.rs b/crates/common/src/compiler/block_sampled.rs index 10a1aa07..80917f06 100644 --- a/crates/common/src/compiler/block_sampled.rs +++ b/crates/common/src/compiler/block_sampled.rs @@ -19,7 +19,7 @@ pub async fn compile_block_sampled_datalake( block_range_end: u64, sampled_property: &str, increment: u64, - fetcher: Arc>, + fetcher: &Arc>, ) -> Result { let mut abstract_fetcher = fetcher.write().await; let property_parts: Vec<&str> = sampled_property.split('.').collect(); diff --git a/crates/common/src/datalake/base.rs b/crates/common/src/datalake/base.rs index 9104df92..ff735d65 100644 --- a/crates/common/src/datalake/base.rs +++ b/crates/common/src/datalake/base.rs @@ -10,68 +10,82 @@ use crate::{ use super::Datalake; -//============================================================================== -// format for input.json -// 1 task = batched blocks +/// Datalake result from compilation process +/// +/// It contains compiled_results, headers, accounts, storages, and mmr_meta +/// +/// All of these data are required to execute the datalake #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct DatalakeResult { + /// Targeted datalake's compiled results pub compiled_results: Vec, + /// Headers required for datalake pub headers: Vec
, + /// Accounts required for datalake pub accounts: Vec, + /// Storages required for datalake pub storages: Vec, + /// MMR meta data that stores headers data pub mmr_meta: MMRMeta, } -//============================================================================== - -/// DatalakeBase is a type that can be used to store data +/// DatalakeBase is unified datalake structure that contains commitment, datalake type, and result +/// +/// It is used to identify the datalake and store the result from compilation process pub struct DatalakeBase { /// Datalake commitment. It is used to identify the datalake pub commitment: String, - pub datalakes_pipeline: Vec, - pub datapoints: Vec, + /// Datalake type + pub datalake_type: Option, + /// Datalake result from compilation process + pub result: Option, } impl fmt::Debug for DatalakeBase { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("DatalakeBase") .field("commitment", &self.commitment) - .field("datalakes_pipeline", &"datalakes_pipeline") - .field("datapoints", &"datapoints") + .field("datalakes_pipeline", &self.datalake_type) + .field("result", &self.result) .finish() } } impl DatalakeBase { + /// initialize DatalakeBase with commitment and datalake type pub fn new(commitment: &str, datalake_type: Datalake) -> Self { Self { commitment: commitment.to_string(), - datalakes_pipeline: vec![datalake_type], - datapoints: Vec::new(), + datalake_type: Some(datalake_type), + result: None, } } - // TODO: decide if we want to merge datalakes - // fn merge(&mut self, other: DatalakeBase) { - // self.compilation_pipeline.extend(other.compilation_pipeline); - // self.identifier = format!("{}{}", self.identifier, other.identifier); - // } - - // returns the result of the compilation of the datalake + /// Compile the datalake meaning, fetching relevant headers, accounts, storages, and mmr_meta data. + /// + /// Plus, it will combine target datalake's datapoints in compiled_results. pub async fn compile( &mut self, - fetcher: Arc>, + fetcher: &Arc>, ) -> Result { - let datalake_type = self.datalakes_pipeline.first().unwrap(); - let result_datapoints = match datalake_type { - Datalake::BlockSampled(datalake) => datalake.compile(fetcher.clone()).await?, - Datalake::DynamicLayout(_) => bail!("dynamic datalake type doesn't support"), - Datalake::Unknown => { - bail!("Unknown datalake type"); - } - }; + let datalake_type = &self.datalake_type; + match datalake_type { + Some(datalake) => { + let result_datapoints = match datalake { + Datalake::BlockSampled(datalake) => datalake.compile(fetcher).await?, + Datalake::DynamicLayout(_) => { + bail!("dynamic datalake type doesn't support yet") + } + Datalake::Unknown => { + bail!("Unknown datalake type"); + } + }; - Ok(result_datapoints) + self.result = Some(result_datapoints.clone()); + Ok(result_datapoints) + } + None => bail!("Datalake type is not defined"), + } } } diff --git a/crates/common/src/datalake/block_sampled.rs b/crates/common/src/datalake/block_sampled.rs index 565b9f42..078e23a7 100644 --- a/crates/common/src/datalake/block_sampled.rs +++ b/crates/common/src/datalake/block_sampled.rs @@ -67,7 +67,7 @@ impl BlockSampledDatalake { ]); let encoded_datalake = tuple_value.abi_encode_sequence().unwrap(); - Ok(format!("0x{}", hex::encode(encoded_datalake))) + Ok(bytes_to_hex_string(&encoded_datalake)) } /// Get the commitment hash of the block sampled datalake @@ -104,7 +104,7 @@ impl BlockSampledDatalake { }) } - pub async fn compile(&self, fetcher: Arc>) -> Result { + pub async fn compile(&self, fetcher: &Arc>) -> Result { compile_block_sampled_datalake( self.block_range_start, self.block_range_end, diff --git a/crates/common/src/datalake/dynamic_layout.rs b/crates/common/src/datalake/dynamic_layout.rs index 950a9319..c3bac7cb 100644 --- a/crates/common/src/datalake/dynamic_layout.rs +++ b/crates/common/src/datalake/dynamic_layout.rs @@ -1,17 +1,15 @@ use alloy_dyn_abi::{DynSolType, DynSolValue}; -use alloy_primitives::{ - hex::{self, FromHex}, - keccak256, U256, -}; +use alloy_primitives::{hex::FromHex, keccak256, U256}; use anyhow::{bail, Result}; -use crate::compiler::test::test_closer; +use crate::{compiler::test::test_closer, utils::bytes_to_hex_string}; use super::{ base::{DatalakeBase, Derivable}, Datalake, }; +// TODO: DynamicLayoutDatalake is incomplete #[derive(Debug, Clone, PartialEq)] pub struct DynamicLayoutDatalake { pub block_number: u64, @@ -70,7 +68,7 @@ impl DynamicLayoutDatalake { ]); let encoded_datalake = tuple_value.abi_encode(); - Ok(format!("0x{}", hex::encode(encoded_datalake))) + Ok(bytes_to_hex_string(&encoded_datalake)) } pub fn deserialize(serialized: String) -> Result { diff --git a/crates/common/src/datalake/mod.rs b/crates/common/src/datalake/mod.rs index 00f8a544..6bd30a78 100644 --- a/crates/common/src/datalake/mod.rs +++ b/crates/common/src/datalake/mod.rs @@ -9,6 +9,7 @@ pub mod base; pub mod block_sampled; pub mod dynamic_layout; +/// Type of datalake #[derive(Debug, Clone, PartialEq)] pub enum Datalake { BlockSampled(BlockSampledDatalake), @@ -16,6 +17,7 @@ pub enum Datalake { Unknown, } +/// Transform different datalake types into DatalakeBase impl Derivable for Datalake { fn derive(&self) -> DatalakeBase { match self { @@ -29,7 +31,7 @@ impl Derivable for Datalake { } impl Datalake { - pub fn serialize(&self) -> Result { + pub fn encode(&self) -> Result { match self { Datalake::BlockSampled(datalake) => datalake.encode(), Datalake::DynamicLayout(_) => bail!("Unsupported datalake type"), diff --git a/crates/common/src/fetcher/mod.rs b/crates/common/src/fetcher/mod.rs index 0ef82876..7b90e7c7 100644 --- a/crates/common/src/fetcher/mod.rs +++ b/crates/common/src/fetcher/mod.rs @@ -1,5 +1,8 @@ -use anyhow::Result; -use std::{collections::HashMap, time::Instant}; +use anyhow::{bail, Result}; +use std::{ + collections::{HashMap, HashSet}, + time::Instant, +}; use crate::{ block::{account::Account, header::BlockHeader}, @@ -18,18 +21,13 @@ pub mod rpc; /// `AbstractFetcher` abstracts the fetching of data from the RPC and memory. /// It uses a `MemoryFetcher` and a `RpcFetcher` to fetch data. /// -/// TODO: Lock only rpc fetcher and keep the memory fetcher unlocked +/// TODO: Optimization idea, Lock only rpc fetcher and keep the memory fetcher unlocked /// but handle requests so that it would not make duplicate requests pub struct AbstractFetcher { memory: MemoryFetcher, rpc: RpcFetcher, } -pub struct ChunkHeaderResult { - pub headers: Vec, - pub missing_blocks: Vec, -} - impl AbstractFetcher { pub fn new(rpc_url: String) -> Self { Self { @@ -46,13 +44,8 @@ impl AbstractFetcher { ) -> Result<(StoredHeaders, MMRMeta)> { //? A map of block numbers to a boolean indicating whether the block was fetched. let mut blocks_map: HashMap = HashMap::new(); - // TODO: in v0 we assume all the blocks in data lake are exist in 1 MMR - let mut mmr_assume_for_now = MMRMeta { - id: 0, - root: "".to_string(), - size: 0, - peaks: vec![], - }; + + let mut relevant_mmr: HashSet = HashSet::new(); // 1. Fetch headers from memory for block_number in &block_numbers { @@ -93,12 +86,14 @@ impl AbstractFetcher { mmr_meta.mmr_size, mmr_meta.mmr_peaks.clone(), ); - mmr_assume_for_now = MMRMeta { + + relevant_mmr.insert(MMRMeta { id: mmr_meta.mmr_id, root: mmr_meta.mmr_root.clone(), size: mmr_meta.mmr_size, peaks: mmr_meta.mmr_peaks.clone(), - }; + }); + blocks_map.insert( *block_number, ( @@ -121,6 +116,7 @@ impl AbstractFetcher { "❌ Something went wrong while fetching MMR data from indexer: {}", e ); + return Err(e); } } @@ -133,7 +129,18 @@ impl AbstractFetcher { stored_headers.insert(*block_number, header.clone()); } }); - Ok((stored_headers, mmr_assume_for_now)) + + // TODO: in v1 allowed to handle all the blocks in datalake are exist in 1 MMR + let mmr_meta_result = match relevant_mmr.len() { + 0 => None, + 1 => relevant_mmr.iter().next().cloned(), + _ => relevant_mmr.iter().next().cloned(), + }; + + match mmr_meta_result { + Some(mmr_meta) => Ok((stored_headers, mmr_meta)), + None => bail!("No MMR metadata found"), + } } // Unoptimized version of get_rlp_header, just for testing purposes diff --git a/crates/common/src/task.rs b/crates/common/src/task.rs index 39101b20..cf452df8 100644 --- a/crates/common/src/task.rs +++ b/crates/common/src/task.rs @@ -1,15 +1,12 @@ use std::str::FromStr; use alloy_dyn_abi::{DynSolType, DynSolValue}; -use alloy_primitives::{ - hex::{self, FromHex}, - keccak256, FixedBytes, -}; +use alloy_primitives::{hex::FromHex, keccak256, FixedBytes}; use anyhow::Result; use crate::{ datalake::base::DatalakeBase, - utils::{bytes32_to_utf8_str, utf8_to_fixed_bytes32}, + utils::{bytes32_to_utf8_str, bytes_to_hex_string, utf8_str_to_fixed_bytes32}, }; /// ComputationalTask represents a task for certain datalake with a specified aggregate function @@ -45,7 +42,7 @@ impl ComputationalTask { match &self.datalake { None => { let aggregate_fn_id_value = DynSolValue::FixedBytes( - alloy_primitives::FixedBytes(utf8_to_fixed_bytes32(&self.aggregate_fn_id)), + alloy_primitives::FixedBytes(utf8_str_to_fixed_bytes32(&self.aggregate_fn_id)), 32, ); @@ -58,7 +55,7 @@ impl ComputationalTask { DynSolValue::Tuple(vec![aggregate_fn_id_value, aggregate_fn_ctx_value]); let encoded_datalake = header_tuple_value.abi_encode(); - Ok(format!("0x{}", hex::encode(encoded_datalake))) + Ok(bytes_to_hex_string(&encoded_datalake)) } Some(datalake) => { let identifier_value = DynSolValue::FixedBytes( @@ -67,7 +64,7 @@ impl ComputationalTask { ); let aggregate_fn_id_value = DynSolValue::FixedBytes( - FixedBytes(utf8_to_fixed_bytes32(&self.aggregate_fn_id)), + FixedBytes(utf8_str_to_fixed_bytes32(&self.aggregate_fn_id)), 32, ); let aggregate_fn_ctx_value = match &self.aggregate_fn_ctx { @@ -82,7 +79,7 @@ impl ComputationalTask { ]); let encoded_datalake = header_tuple_value.abi_encode_sequence().unwrap(); - Ok(format!("0x{}", hex::encode(encoded_datalake))) + Ok(bytes_to_hex_string(&encoded_datalake)) } } } @@ -97,8 +94,8 @@ impl ComputationalTask { let datalake_value = if let Some(datalake) = value[0].as_uint() { let datalake = DatalakeBase { commitment: format!("0x{:x}", datalake.0), - datalakes_pipeline: vec![], - datapoints: vec![], + datalake_type: None, + result: None, }; Some(datalake) diff --git a/crates/common/src/types.rs b/crates/common/src/types.rs index 77b429ff..b704698e 100644 --- a/crates/common/src/types.rs +++ b/crates/common/src/types.rs @@ -3,6 +3,8 @@ use alloy_primitives::FixedBytes; use reth_primitives::hex::FromHex; use serde::{Deserialize, Serialize}; +use crate::utils::bytes_to_hex_string; + //============================================================================== // for int type, use uint type // for string type, if formatted, use chunk[] to store field elements @@ -291,12 +293,10 @@ pub fn split_little_endian_hex_into_parts(hex_str: &str) -> Uint256 { let high_part = fix_hex[..16].to_vec(); let low_part = fix_hex[16..].to_vec(); - let high = hex::encode(high_part); - let low = hex::encode(low_part); Uint256 { - high: format!("0x{}", high), - low: format!("0x{}", low), + high: bytes_to_hex_string(&high_part), + low: bytes_to_hex_string(&low_part), } } diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 4cbdf7ad..ac336b08 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -1,7 +1,6 @@ use alloy_primitives::hex::{self}; use alloy_primitives::keccak256; use anyhow::{bail, Result}; -use std::fmt::Write; use std::str::from_utf8; const U256_BYTE_SIZE: usize = 32; @@ -26,12 +25,9 @@ pub fn bytes32_to_utf8_str(bytes32: &[u8]) -> Result { Ok(from_utf8(&bytes32[..end_pos]).map(|s| s.to_string())?) } -pub fn utf8_to_fixed_bytes32(s: &str) -> [u8; 32] { - let mut fixed_bytes = [0u8; 32]; // Initialize a fixed-size byte array with zeros - let bytes = s.as_bytes(); // Convert the string to a byte slice - - // Copy the bytes of the string into the beginning of the fixed-size byte array, - // truncating if necessary to fit into 32 bytes. +pub fn utf8_str_to_fixed_bytes32(s: &str) -> [u8; 32] { + let mut fixed_bytes = [0u8; 32]; + let bytes = s.as_bytes(); for (i, &byte) in bytes.iter().enumerate().take(32) { fixed_bytes[i] = byte; } @@ -40,15 +36,7 @@ pub fn utf8_to_fixed_bytes32(s: &str) -> [u8; 32] { } pub fn bytes_to_hex_string(bytes: &[u8]) -> String { - // Start with "0x" prefix for the hex string - let mut hex_str = String::from("0x"); - - // Convert each byte to hex and append to the string - for &byte in bytes.iter() { - write!(hex_str, "{:02x}", byte).expect("Failed to write"); - } - - hex_str + format!("0x{}", hex::encode(bytes)) } pub fn last_byte_to_u8(bytes: &[u8]) -> u8 { diff --git a/crates/evaluator/src/aggregation_functions/integer.rs b/crates/evaluator/src/aggregation_functions/integer.rs index 135a83ae..82511350 100644 --- a/crates/evaluator/src/aggregation_functions/integer.rs +++ b/crates/evaluator/src/aggregation_functions/integer.rs @@ -165,6 +165,7 @@ pub fn count_if(values: &[String], ctx: &str) -> Result { Ok(condition_satisfiability_count.to_string()) } +// Handle division properly using U256 type fn divide(a: U256, b: U256) -> String { if b.is_zero() { return "Division by zero error".to_string(); diff --git a/crates/evaluator/src/aggregation_functions/mod.rs b/crates/evaluator/src/aggregation_functions/mod.rs index 6179c982..d2ac71cf 100644 --- a/crates/evaluator/src/aggregation_functions/mod.rs +++ b/crates/evaluator/src/aggregation_functions/mod.rs @@ -62,7 +62,6 @@ impl AggregationFunction { pub fn operation(&self, values: &[String], ctx: Option) -> Result { // Remove the "0x" prefix if exist, so that integer functions can parse integer values - let inputs: Vec = values .iter() .map(|hex_str| { @@ -76,11 +75,11 @@ impl AggregationFunction { .collect(); match self { + // Aggregation functions for integer values AggregationFunction::AVG => integer::average(&inputs), AggregationFunction::BLOOM => integer::bloom_filterize(&inputs), AggregationFunction::MAX => integer::find_max(&inputs), AggregationFunction::MIN => integer::find_min(&inputs), - AggregationFunction::MERKLE => string::merkleize(values), AggregationFunction::STD => integer::standard_deviation(&inputs), AggregationFunction::SUM => integer::sum(&inputs), AggregationFunction::COUNTIF => { @@ -90,6 +89,8 @@ impl AggregationFunction { bail!("Context not provided for COUNTIF") } } + // Aggregation functions for string values + AggregationFunction::MERKLE => string::merkleize(values), } } } diff --git a/crates/evaluator/src/lib.rs b/crates/evaluator/src/lib.rs index 44faab48..efc8d1d6 100644 --- a/crates/evaluator/src/lib.rs +++ b/crates/evaluator/src/lib.rs @@ -44,11 +44,11 @@ pub struct EvaluationResult { #[derive(Serialize, Deserialize, Debug)] pub struct EvaluatedDatalake { /// encoded datalake - encoded_datalake: String, + pub encoded_datalake: String, /// ex. dynamic datalake / block sampled datalake - datalake_type: u8, + pub datalake_type: u8, /// ex. "header", "account", "storage" - property_type: u8, + pub property_type: u8, } impl EvaluationResult { @@ -61,6 +61,7 @@ impl EvaluationResult { encoded_datalakes: HashMap::new(), } } + pub fn build_merkle_tree(&self) -> (StandardMerkleTree, StandardMerkleTree) { let mut tasks_leaves = Vec::new(); let mut results_leaves = Vec::new(); @@ -248,7 +249,7 @@ impl Default for EvaluationResult { } pub async fn evaluator( - mut compute_expressions: Vec, + mut computational_tasks: Vec, datalake_for_tasks: Option>, fetcher: Arc>, ) -> Result { @@ -257,7 +258,7 @@ pub async fn evaluator( // If optional datalake_for_tasks is provided, need to assign the datalake to the corresponding task if let Some(datalake) = datalake_for_tasks { for (datalake_idx, datalake) in datalake.iter().enumerate() { - let task = &mut compute_expressions[datalake_idx]; + let task = &mut computational_tasks[datalake_idx]; task.datalake = match datalake { Datalake::BlockSampled(block_datalake) => Some(block_datalake.derive()), @@ -270,37 +271,51 @@ pub async fn evaluator( } // Evaulate the compute expressions - for compute_expression in compute_expressions { - let computation_task_id = compute_expression.to_string(); - let encoded_task = compute_expression.encode()?; - let mut datalake = compute_expression.datalake.unwrap(); - // TODO: in v0 we consider datalake pipeline is single datalake - let encoded_datalake = datalake.datalakes_pipeline[0].serialize()?; - let datalake_result = datalake.compile(fetcher.clone()).await?; - let aggregation_fn = AggregationFunction::from_str(&compute_expression.aggregate_fn_id)?; - let aggregation_fn_ctx = &compute_expression.aggregate_fn_ctx; - let result = aggregation_fn.operation( - &datalake_result.compiled_results, - aggregation_fn_ctx.clone(), - )?; - results - .compiled_results - .insert(computation_task_id.clone(), result); - results.ordered_tasks.push(computation_task_id.clone()); - results - .fetched_datalake_results - .insert(computation_task_id.clone(), datalake_result); - results - .encoded_tasks - .insert(computation_task_id.clone(), encoded_task); - results.encoded_datalakes.insert( - computation_task_id, - EvaluatedDatalake { - encoded_datalake, - datalake_type: datalake.datalakes_pipeline[0].get_datalake_type(), - property_type: datalake.datalakes_pipeline[0].get_property_type(), - }, - ); + for task in computational_tasks { + // task_commitment is the unique identifier for the task + let task_commitment = task.to_string(); + // Encode the task + let encoded_task = task.encode()?; + let mut datalake_base = match task.datalake { + Some(datalake) => datalake, + None => bail!("Task is not filled with datalake"), + }; + + let datalake_result = datalake_base.compile(&fetcher).await?; + match datalake_base.datalake_type { + Some(datalake) => { + let encoded_datalake = datalake.encode()?; + let aggregation_fn = AggregationFunction::from_str(&task.aggregate_fn_id)?; + let aggregation_fn_ctx = task.aggregate_fn_ctx; + // Compute datalake over specified aggregation function + let result = aggregation_fn + .operation(&datalake_result.compiled_results, aggregation_fn_ctx)?; + // Save the datalake results + results + .compiled_results + .insert(task_commitment.to_string(), result); + // Save order of tasks + results.ordered_tasks.push(task_commitment.to_string()); + // Save the fetched datalake results + results + .fetched_datalake_results + .insert(task_commitment.to_string(), datalake_result); + // Save the task data + results + .encoded_tasks + .insert(task_commitment.to_string(), encoded_task); + // Save the datalake data + results.encoded_datalakes.insert( + task_commitment, + EvaluatedDatalake { + encoded_datalake, + datalake_type: datalake.get_datalake_type(), + property_type: datalake.get_property_type(), + }, + ); + } + None => bail!("Datalake base is not filled with specific datalake"), + } } Ok(results) diff --git a/crates/evaluator/tests/evaluation_test.rs b/crates/evaluator/tests/evaluation_test.rs new file mode 100644 index 00000000..0aaec999 --- /dev/null +++ b/crates/evaluator/tests/evaluation_test.rs @@ -0,0 +1,103 @@ +#[cfg(test)] +mod test { + use common::{ + datalake::base::DatalakeResult, + types::{Account, Header, HeaderProof, MMRMeta, MPTProof, Storage}, + }; + use evaluator::{EvaluatedDatalake, EvaluationResult}; + + fn setup() -> EvaluationResult { + let mut init_eval_result = EvaluationResult::new(); + init_eval_result.fetched_datalake_results.insert( + "0x242fe0d1fa98c743f84a168ff10abbcca83cb9e0424f4541fab5041cd63d3387".to_string(), + DatalakeResult { + compiled_results: vec!["0x9184e72a000".to_string()], + headers: vec![Header { + rlp: "f90253a008a4f6a7d5055ce465e285415779bc338134600b750c06396531ce6a29d09f4ba01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347941268ad189526ac0b386faf06effc46779c340ee6a0fa23637d8a5d4a624479b33410895951995bae67f7c16b00859f9ac630b9e020a0792c487bc3176e482c995a9a1a16041d456db8d52e0db6fb73b540a64e96feaca04406def0dad7a6c6ef8c41a59be6b5b89124391a5b0491c8a5339e859e24d7acb901001a820024432050a200d1bc129162042984e09002002806340a14630c0aca5060c140a0608e043199e90280a1418cb89f1020085394a48f412d00d05041ad00a09002801a30b50d10c008522a2203284384841e055052404040710462e48103580026004a4e6842518210c2060c0729944118e4d0801936d020008811bb0c0464028a0008219056543b1111890cac50c04805000a400040401089904927409ec6720b8001c80a204628d8400064b402a1220480c21418480c24d00446a743000180a880128245028010a00103a8036b06c119a20124c32482280cc14021b430082a9408840030d46c062010f0b290c194040888189e081100c1070280304c0a01808352229a8401c9c38084017f9a188465df90188a4e65746865726d696e64a0178bae25662326acf0824d8441db8493865a53b8c627dc8aea5eb50ed2102fdc8800000000000000008401d76098a06eb2bc6208c3733aa1158ff8a100cb5c7ad1706ac6c3fb95d28f28007a770403808404c20000a0195eac87285a920cb37eb2b2dcf6eb9853efa2547c386bfe58ca2ff0fe167eb5".to_string(), + proof: HeaderProof { + leaf_idx: 660751, + mmr_path: vec![ + "0x50b38c27a0e12585ae387c5fd218ccaea57f7ff72cd4739ed9ff0a29ba6fe7a".to_string(), + "0x072bda07a8e5d5e8ad4b09bdc39c842d2ae64c7e27da33030f7ae9ad0f67295".to_string(), + "0x4a52af19f212ccf13b343003c02169115c68af56a0ed3f8efe8f24fc6e7be30".to_string(), + "0x5b6148cde08614b2c03dca0ff0cc89ab6f68f42ed3a7039e8fb4bb96d18dab4".to_string(), + "0x123a65b944f582989bed238cfbc60a4b05b06a279e2618941075abb075262ae".to_string(), + ], + }, + }], + accounts: vec![ + Account { + address: "0x75cec1db9dceb703200eaa6595f66885c962b920".to_string(), + account_key: "0x962f445fc8476432660877b666f653759ea69189b60d2f4a7008e70555746ad1".to_string(), + proofs: vec![MPTProof { + block_number: 5382810, + proof: vec![ + "0xf90211a004a07b0ced9c4e49cf3574d029b4aca46893aaead889508635b766b8bd9ff49aa035557e7ab5adda1f7876e96caf874a825a03267c9bcbd85e14f3578f7b80980ba05190d1fdc6e8506a5cc08e7291498d62aafc44913c4b47dc998d3cff5a7fee29a0cc16f65cc93a89251834e9e703f7bca425ad644dcb8d7502870439a47e7377c3a014623f34ab8b17adca3cf7648bac3f59b67fccf9082cf8bfd1a5f58a3cc5483da07f046112f9c54206ecf2379d2c75c6a343e19f19563a615163d7f032e54b70baa0869bb928152f852a8fb130ba8b95597a49a7c4b53cb6ab7af56f0f2e0a9d22f9a0effce50b7901262428133a0829fc11baf319f1a0a5388ddb0546a55d26ddb01ca096bedf7371a32ebbfbf159c3688efb85b675fc9968274d30f436025735633fd9a09ae43877fde992c6b39eed307abcea8a922918ceb672cc5257f3a2f1d23210d3a0c496cfc0e6c6d082ad5d80d827ad3fd748da5fef22e321f98d110f55166c0104a012155dffd30839241dad6bdba97b30b3b0368ceaf46069aba61cf63c7735ef8aa02b78b3f87d1c29a10c6b584e47f2df8b2f6d333b6c03f62413db4fc732d8b543a05bd948e3417c7e63795702d6d8b249d6bad8f5b47e05058ad83f869e719c76d9a037f216a3e0a186c53c6216a6990f422bf38c28e68e4d29f65feca1d9d518acdca0e56801140a1beffc0a88cad0b104d40024b5bf9586d175496821aff4a649cb3380".to_string(), + "0xf90211a03f3580144cee82b6906ca3c934415e6b729e301bc1d2267f115e55ae0bd863b0a07a99440f7094ed5913f8b84f4adfed5f7c5a6fb472aca8c6c121f21c4c2568bea071880cb0a7afa1ff0ef3e4925dd813108ce7ba0ab307e6360d686f05f6c3ad64a07beefec05c82c8047f804931cc7c4641f6336b89b26f133afd7d4e02e58c32eea04c75cf284cd38baff74dcd2af18f1826aa7324d0d71fedbf686bfb9cd75c49e1a02c46c881f9ee6f9b938848e291cf72a18b622380e7033c191562cafddf528b2ca0acf9ae3071a0c5e58391f4b89c6e0a208c7b59bccb1b924dc969ced2c63d8105a01161e667fed4e9e05cdc6fd2fd7e684088ac4ab7f53ca3f1d78a34672651de83a0b6fb07d86d8868648d9a590bd28b7729143a42bca173002a599ca97935beb5caa060c35a900422f89e0d198859069c318255a6a3bbde2de3c2a82c4616febe648ba05fa9f9f55d1b1df4727a469f8c5fed99f5fa03c41e85e5910fd862edebb223a7a0f0ccedb4844f906a3f625945e8b2e1f8c89427612449e93d5c5bd98405bcc7aaa0b19a8af79ddf60dc01378271193e1dc2eda727c10cd76981924457ab8c420543a04961963e7ecd9f398dc7fdebfe8430e5a177c994d3a174a549f622190f5513c6a027c3e382e26b35145fe33fce2342fbd99e2ee6c84da7d4e2bffb228b7d11d3c2a02f082b9dabf679dabe90c6846a161b86e05ed5ceed7d0a90df02cd5bd7f2d51a80".to_string(), + "0xf90211a0a6da5c364983d1e2e687865457299aa8c13081d115a3d51f35662dfa7eac6256a06bb734ec57173f79b02c57330bef05932888d62ad7fc82741db70e5de9bd61ada0f89e110878f6c10309a06f3dbca32dda58d6d2b486388641a19b928eb1b92421a0b9cac6354d99f071864ff640d89808b5750410e160a4c5b58de993b33229c552a097ad71a3972c6ce83fbe80e5c9b47afe3ba4e6adf960b496418c4b89197534a0a06266177614cce023fe7930547821cf882abd5ac9a3b4ca603cbd0f947d892261a02258a7be0550675c2c7367a365a9af7c5cbd08bdcc305bb36002b7da420c30a8a0a91e6bb57b51a7013223f6f1dfba9d12eecf493c53d455676b705baa5136dedda0b9b8694642db41a63b34184bcbf5de6fc4991edf2a0443ec46423e517a6d86e4a049facc47454f6ba7873c4895e8e6f50d7f3d1d1b2d4786f7736d425be3479455a088aac371d4bbe7d3e5bc347b1d2880510788344255ba94ef91abf01c035f0ed5a02603ae41fec37271217afae503906ba86bd4061fce8432c3a5e37eb033a6df55a0009eb7c64ce5f55c615f00c1e627a91a029bccc4e4d01a45f03ca9645d1b4d21a0c993a83a7a5ddd4cccfbdc5594b2302ca2d0502a0355d8e8cb21df916b0a2ce4a0368450b4c14c0b24756db1a6d49a0c442cfba70b5c1c52d0a09b39345d8a4375a0e113f47236545878f0bfdf607ab2b981e7de092b493ce2844aa6770ee37bddcb80".to_string(), + "0xf90211a0b177b8f619430bc90376aae7049633893351c83434be732535d151c4c4fc6c9da0d58aeb638a0025a352f5b1918dc8813fe8d1263d014a08fcfa6ab18ef2ea3c54a069fc4557f533512e960ad4368e59bfc120a45e4208cb281a216c8785dd12eb09a029077a02afab0f25435cc5d45073b8040e08b240f94be2e8ece71448acc6c43ea0e232165d9892eaefbde628466c4632dbd222a3004b44c51074d599d38238b7b3a0203f833ce8ee72688f9624a8c221f5751ed4299d4f1b5d2a61ee3873fda6252ca0e451721694b4709cd9d2943ea9c5b456b25ce27f5ad02f27651306af1cdd106ea0aae712f99614bd0e461d39d2a69dc3c11ba1cd105c0f03e1970bcc3c1a821738a078c74f82f76a85da907851c7416ba8f70468f019ee44d42c083ab2d2b727d819a0fa1b0278b6a8e9e4b39ec8feae00a3e327fad7a55b1d7f7234470d0f731967baa095570c2a2a5baef7263820ea08e8c6d612f94ff93a5c884f72d78ad0c687a149a0ec02904efceee3647640953fe1e9b895beb240993a8da4b9eb8f17a77ad54258a0dd3df430eead5f118197fa38f4f9847068d452c17871d817af9214091d710ec3a0877c37633ca01e4032496811068cb14d0fe64aea330f583c355ef222bb04125ca000dbb8ae704b67bd38551538260e29601961c0ce53769e55246b290d5c00509ba05bffed67919c8cce7e9752face925963512290eb66118b7503bbe8df74e7253180".to_string(), + "0xf90211a0821aeff02ca301eb7e092e275fa6b8c115e72ec56ed690a06bbeb2f5647f0d69a0e8a97331932d7037d648a31c2b4d888221ac650520d37e2add9b96c537098c27a0b0dec54e94587c350f4f7dff5f03d0e9cd9745619a60ff4dcc7279ecf104a158a063892f2952a504af354873f1cccc86dc32a909db3755a37d14fcf07ffae7f784a0bce4c1d4e9fde0d96db53a73de689f53022c61e743b4fee13a2bda25eac1c26fa02d579a264ff31036f717769ed3f6ee42710abba3ef9c6208fff18e906f941baaa01aa910553b488a0ca451158abdbf2ca69f29d3358d4eb54bef355f8762e7d2b2a0f78e2963f6d8ccd226c3a92688b32795ec34a5ea0d486707dd038904ddd45ec2a0df5ec404a1c485dfe5a34e39a62381e175a02017be80d7aa42d86771a5522c82a056a3f06aecdfaa0c55db6a8c792431bfba2c3b3c2bf7a41122c293351c9f5220a0145bb193f9c192bd370b654e5c7b3e723332f622e791ff9a26228a55b66ae1f5a02a954bd07252e904225825a28dfd869aa3c52ad5519e4a76e3f0c8ff3dd719afa073c266d70e9614bbb1eac3a95e15239388cb47cac7a5acea7ccd40a036c65c64a0499ea61f4f8a09c34cc8ab7b72b934878c0bfc1bd918e9593bda3cccd20a5a84a074bc941cd484458b298c0117c1585835040b6b65384ccaeda58771c4155f1e8ca0aa91a0b89bba3017911a95315e0e2a7d33927d4d29e3b4a5c35e4e7fff5bafd780".to_string(), + "0xf901b1a06d6223af2401971b5d3667a3a58a872ea759e76582fb375e4f2de0e420df275ea0f158252d20b99f3aa36e5b97c87644eaabc50f582e769ea41cf18f7ae3602227a0a4faeacc33284fdd0eafce32e0776543e3ac349de68dfcb7abcc40b0ae82df5fa0245f6fda91c1d0dd6036c8799a9338cbf85cbbca8a3a45f35a27bb076d10cb65a080d306d21c5efccfa655b98f48b2811000fe6f60a9aebd8fdcbde7589c748e96a077499f3ba879737a73d6628cbe5d5b8ad598430031ca879cdcb3a2509d3f7d5fa0c91ebaef1a0e560845ba673efd813a313e8b9e870524adc4aa4cb6ce4eb47358a0d099e0247546af785a392f5799fb7a3eb712ca9e32dde36f49d65a61d57426e2a02aaaa42933c19eec648bef646eda705a1e84cffbe4ecd62a31862aee16e05241a06e516cdf1f81d33ffae52ca5bf785219501710b5302738b2c405127406ef3c9480a0e412c32035edec4058b02f8137c18a403f5d0274e1ca1f0beff3257f61788af8a0be49c166207007fd651f379fdd6a16bea5854e57e6fcf0109551e5d7f28f883680a04086d5b652c856858cefec39b45e2601955efa89cfcfc8d42583f954f97bcf1e8080".to_string(), + "0xf8518080808080a01922ad14def89076bde0011d514a50cae7632d617136bb83c1b2fcbed3383c7380808080808080a0e81a4320e846af94db949f1a5298f425864e8eecbe8b72342b0aea33c0ea6e3c808080".to_string(), + "0xf86c9d3fc8476432660877b666f653759ea69189b60d2f4a7008e70555746ad1b84cf84a018612309ce54000a069bbf0407f9d5438512c6218768a9581f377fa5dc119ea1409b917b75c242e1ca0eab3448e22d0f75e09ed849b2e87ac6739db4104db4eaeeffcc66cfa819755fd".to_string(), + ]} + ]}], + storages: vec![ + Storage { + address: "0x75cec1db9dceb703200eaa6595f66885c962b920".to_string(), + slot: "0x0000000000000000000000000000000000000000000000000000000000000002".to_string(), + storage_key: "0x405787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace".to_string(), + proofs: vec![MPTProof { + block_number: 5382810, + proof: vec![ + "0xf8918080a0b7a7c859e6ddbad6c18adb60b9f48842e652021b4f8b875894b8b879568629f880a0e7f9c6d331c7d110c992550a7baa3e051adc1e26a53d928dbd517a313d221863808080808080a0e40cf9c20b1e8e4aaf3201dd3cb84ab06d2bac34e8dc3e918626e5c44c4f0707808080a0c01a2f302bfc71151daac60eeb4c1b73470845d4fe219e71644752abaafb02ab80".to_string(),"0xe9a0305787fa12a823e0f2b7631cc41b3ba8828b3321ca811111fa75cd3aa3bb5ace878609184e72a000".to_string(), + ], + }], + } + ], + mmr_meta: MMRMeta { + root: "0x7e956408569267d909a31fa404a972db1360a7e02bc3858e97c7c21ff394057".to_string(), + size: 660813, + id:19, + peaks: vec!["0x06a2bfcd354f679b547aa151e4462b6bae75fd80a2a92e3767b24eab609d1d4".to_string(), "0x5967364928f2fee43c8244dc6290cd9d3ea8e9dcb4e072ef6a099e9605f241d".to_string(), "0x30d5538138ec908e6f3b6429ae49702607432c224ef10be72d23c11556f06a0".to_string(), "0x308f10140fbc6043127353ee21fab20d6c12f00ac7a8928911611b71ce5b1ab".to_string(), "0x122a500639912a0a918dc32a73b1268f3417abf6b72a6a0dc814f0986f5124d".to_string(), "0x3b2087462ad3d5c84593fdfeb72f7972695a35097e184d017470b5f99c411fd".to_string(), "0x75c56dd4e70cac0dd54944d78632700da4329824239eb1be974e3b66b56c8b9".to_string(), "0x00225132138a053a102fab30cdd9e04cdcb25ded860d7d93c2a288c7532273e".to_string(), "0x6e5d1c234047cd531f2a1406ab894f4c9487dbef207cf870cca897dea3cf5ee".to_string()], + }, + }, + + ); + init_eval_result.compiled_results.insert( + "0x242fe0d1fa98c743f84a168ff10abbcca83cb9e0424f4541fab5041cd63d3387".to_string(), + "10000000000000".to_string(), + ); + init_eval_result.ordered_tasks = + vec!["0x242fe0d1fa98c743f84a168ff10abbcca83cb9e0424f4541fab5041cd63d3387".to_string()]; + init_eval_result.encoded_tasks.insert( + "0x242fe0d1fa98c743f84a168ff10abbcca83cb9e0424f4541fab5041cd63d3387".to_string(), + "".to_string(), + ); + init_eval_result.encoded_datalakes.insert( + "0x242fe0d1fa98c743f84a168ff10abbcca83cb9e0424f4541fab5041cd63d3387".to_string(), + EvaluatedDatalake { + encoded_datalake: "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000052229a000000000000000000000000000000000000000000000000000000000052229a000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000350375cec1db9dceb703200eaa6595f66885c962b92000000000000000000000000000000000000000000000000000000000000000020000000000000000000000".to_string(), + datalake_type:0, + property_type:3, + } + ); + + init_eval_result + } + + #[test] + fn test_build_merkle_tree() { + let evaluatio_result = setup(); + + let (task_merkle_tree, results_merkle_tree) = evaluatio_result.build_merkle_tree(); + assert_eq!( + task_merkle_tree.root().to_string(), + "0x663d096802271660f33286d812ee13f3cda273bdf1d183d06a0119b9421151e7".to_string() + ); + assert_eq!( + results_merkle_tree.root().to_string(), + "0xb540014ad1d08106489adb9d8c893947841c505f1f5794525f4cc8e5d3a92395".to_string() + ); + } +} diff --git a/crates/evaluator/tests/lib.rs b/crates/evaluator/tests/lib.rs new file mode 100644 index 00000000..1e31340e --- /dev/null +++ b/crates/evaluator/tests/lib.rs @@ -0,0 +1,2 @@ +pub mod aggregation_functions; +pub mod evaluation_test;