From b191e08724029b67290737c736be7f9399f27c16 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 15 Oct 2024 16:26:24 +0200 Subject: [PATCH 01/23] Update p2p mock and add a test for the save of data --- .../test_helpers/pressure_peer_to_peer.rs | 26 +- crates/services/sync/src/import/tests.rs | 448 ++++++++++++------ crates/services/sync/src/ports.rs | 4 +- crates/services/sync/src/service/tests.rs | 18 +- 4 files changed, 333 insertions(+), 163 deletions(-) diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index d01998e13d1..e77e94ff4ad 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -73,19 +73,23 @@ impl PressurePeerToPeer { pub fn new(counts: SharedCounts, delays: [Duration; 2]) -> Self { let mut mock = MockPeerToPeerPort::default(); mock.expect_get_sealed_block_headers().returning(|range| { - let peer = random_peer(); - let headers = range - .clone() - .map(BlockHeight::from) - .map(empty_header) - .collect(); - let headers = peer.bind(Some(headers)); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = range + .clone() + .map(BlockHeight::from) + .map(empty_header) + .collect(); + let headers = peer.bind(Some(headers)); + Ok(headers) + }) }); mock.expect_get_transactions().returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); Self { p2p: mock, diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 0db7d8c5532..4a8a8762b19 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -14,7 +14,11 @@ use crate::{ }, }; use fuel_core_types::services::p2p::Transactions; -use std::time::Duration; +use mockall::Sequence; +use std::{ + ops::Deref, + time::Duration, +}; use super::*; @@ -38,17 +42,21 @@ async fn test_import_0_to_5() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(1) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let params = Config { @@ -84,17 +92,21 @@ async fn test_import_3_to_5() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(1) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let params = Config { @@ -147,10 +159,12 @@ async fn test_import_0_to_499() { p2p.expect_get_sealed_block_headers() .times(times) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); // Happens once for each batch @@ -158,9 +172,11 @@ async fn test_import_0_to_499() { p2p.expect_get_transactions() .times(times) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let params = Config { @@ -196,17 +212,21 @@ async fn import__signature_fails_on_header_5_only() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(1) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let state = State::new(3, 5).into(); @@ -227,6 +247,84 @@ async fn import__signature_fails_on_header_5_only() { assert_eq!((State::new(4, None), false), res); } +#[tokio::test] +async fn import__keep_blocks_p2p_asked_in_fail_cases() { + // given + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 1, + }; + + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(3) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(3) + .returning(|_| Ok(())); + + let mut p2p = MockPeerToPeerPort::default(); + let mut seq = Sequence::new(); + p2p.expect_get_sealed_block_headers() + .times(1) + .in_sequence(&mut seq) + .returning(|_| { + Box::pin(async move { + tokio::time::sleep(Duration::from_millis(300)).await; + Err(anyhow::anyhow!("Some network error")) + }) + }); + p2p.expect_get_sealed_block_headers() + .times(2) + .returning(|range| { + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) + }); + p2p.expect_get_transactions() + .times(3) + .returning(|block_ids| { + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) + }); + p2p.expect_report_peer().returning(|_, _| Ok(())); + + let p2p = Arc::new(p2p); + let executor: Arc = Arc::new(DefaultMocks::times([3])); + let consensus = Arc::new(consensus_port); + let notify = Arc::new(Notify::new()); + let state: SharedMutex = State::new(3, 6).into(); + + let import = Import { + state: state.clone(), + notify: notify.clone(), + params, + p2p, + executor, + consensus, + }; + let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); + let mut watcher = shutdown.into(); + notify.notify_one(); + let res = import.import(&mut watcher).await.is_ok(); + assert_eq!(false, res); + assert_eq!(&State::new(3, None), state.lock().deref()); + // Reset the state for a next call + *state.lock() = State::new(3, 6); + // Should re-ask to P2P only block 4 and for now it reask for all blocks + let res = import.import(&mut watcher).await.is_ok(); + assert_eq!(true, res); + assert_eq!(&State::new(6, None), state.lock().deref()); +} + #[tokio::test] async fn import__signature_fails_on_header_4_only() { // given @@ -244,17 +342,21 @@ async fn import__signature_fails_on_header_4_only() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(0) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let state = State::new(3, 5).into(); @@ -282,10 +384,12 @@ async fn import__header_not_found() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| { - let peer = random_peer(); - let headers = Some(Vec::new()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(Vec::new()); + let headers = peer.bind(headers); + Ok(headers) + }) }); let state = State::new(3, 5).into(); @@ -313,10 +417,12 @@ async fn import__header_response_incomplete() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| { - let peer = random_peer(); - let headers = None; - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = None; + let headers = peer.bind(headers); + Ok(headers) + }) }); let state = State::new(3, 5).into(); @@ -344,18 +450,22 @@ async fn import__header_5_not_found() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| { - let peer = random_peer(); - let headers = Some(vec![empty_header(4)]); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(vec![empty_header(4)]); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(1) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let state = State::new(3, 5).into(); @@ -383,10 +493,12 @@ async fn import__header_4_not_found() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|_| { - let peer = random_peer(); - let headers = Some(vec![empty_header(5)]); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(vec![empty_header(5)]); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions().times(0); @@ -425,14 +537,16 @@ async fn import__transactions_not_found() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(1) - .returning(|_| Ok(None)); + .returning(|_| Box::pin(async move { Ok(None) })); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -469,23 +583,27 @@ async fn import__transactions_not_found_for_header_4() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); let mut height = 3; p2p.expect_get_transactions() .times(1) .returning(move |block_ids| { - height += 1; - if height == 4 { - Ok(None) - } else { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) - } + Box::pin(async move { + height += 1; + if height == 4 { + Ok(None) + } else { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + } + }) }); let state = State::new(3, 5).into(); @@ -523,14 +641,18 @@ async fn import__transactions_not_found_for_header_5() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions().times(1).returning(move |_| { - let v = vec![Transactions::default()]; - Ok(Some(v)) + Box::pin(async move { + let v = vec![Transactions::default()]; + Ok(Some(v)) + }) }); let state = State::new(3, 5).into(); @@ -557,7 +679,9 @@ async fn import__p2p_error() { let mut p2p = MockPeerToPeerPort::default(); p2p.expect_get_sealed_block_headers() .times(1) - .returning(|_| Err(anyhow::anyhow!("Some network error"))); + .returning(|_| { + Box::pin(async move { Err(anyhow::anyhow!("Some network error")) }) + }); p2p.expect_get_transactions().times(0); let state = State::new(3, 5).into(); @@ -595,14 +719,16 @@ async fn import__p2p_error_on_4_transactions() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); - p2p.expect_get_transactions() - .times(1) - .returning(|_| Err(anyhow::anyhow!("Some network error"))); + p2p.expect_get_transactions().times(1).returning(|_| { + Box::pin(async move { Err(anyhow::anyhow!("Some network error")) }) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -645,10 +771,12 @@ async fn import__consensus_error_on_4() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions().times(0); @@ -693,17 +821,21 @@ async fn import__consensus_error_on_5() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(1) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let state = State::new(3, 5).into(); @@ -741,17 +873,21 @@ async fn import__execution_error_on_header_4() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(1) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let mut executor = MockBlockImporterPort::default(); @@ -801,17 +937,21 @@ async fn import__execution_error_on_header_5() { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(1) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let mut executor = MockBlockImporterPort::default(); @@ -892,18 +1032,23 @@ async fn import__can_work_in_two_loops() { p2p.expect_get_sealed_block_headers() .times(2) .returning(move |range| { - state.apply(|s| s.observe(6)); - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + let state = state.clone(); + Box::pin(async move { + state.apply(|s| s.observe(6)); + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(2) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let c = DefaultMocks::times([2]); @@ -1040,15 +1185,19 @@ async fn import__execution_error_on_header_4_when_awaits_for_1000000_blocks() { let mut p2p = MockPeerToPeerPort::default(); p2p.expect_get_sealed_block_headers().returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions().returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let mut executor = MockBlockImporterPort::default(); @@ -1189,28 +1338,37 @@ impl PeerReportTestBuilder { if let Some(get_headers) = self.get_sealed_headers.clone() { p2p.expect_get_sealed_block_headers().returning(move |_| { let peer: PeerId = peer_id.clone().into(); - let headers = peer.bind(get_headers.clone()); - Ok(headers) + let get_headers = get_headers.clone(); + Box::pin(async move { + let headers = peer.bind(get_headers); + Ok(headers) + }) }); } else { p2p.expect_get_sealed_block_headers() .returning(move |range| { let peer: PeerId = peer_id.clone().into(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); } let transactions = self.get_transactions.clone(); if let Some(t) = transactions { - p2p.expect_get_transactions() - .returning(move |_| Ok(t.clone())); + p2p.expect_get_transactions().returning(move |_| { + let t = t.clone(); + Box::pin(async move { Ok(t) }) + }); } else { p2p.expect_get_transactions().returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); } @@ -1326,18 +1484,22 @@ impl DefaultMocks for MockPeerToPeerPort { p2p.expect_get_sealed_block_headers() .times(1) .returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions() .times(t.next().unwrap()) .returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); p2p } diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 86f8280489d..2d6e3f76eb3 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -34,8 +34,8 @@ pub enum PeerReportReason { InvalidTransactions, } -#[cfg_attr(any(test, feature = "benchmarking"), mockall::automock)] #[async_trait::async_trait] +#[cfg_attr(any(test, feature = "benchmarking"), mockall::automock)] /// Port for communication with the network. pub trait PeerToPeerPort { /// Stream of newly observed block heights. @@ -47,7 +47,7 @@ pub trait PeerToPeerPort { block_height_range: Range, ) -> anyhow::Result>>>; - /// Request transactions from the network for the given block + /// Request transactions from the network for the given block range /// and source peer. async fn get_transactions( &self, diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 57440afae24..91f3460a632 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -39,15 +39,19 @@ async fn test_new_service() { .into_boxed() }); p2p.expect_get_sealed_block_headers().returning(|range| { - let peer = random_peer(); - let headers = Some(range.map(empty_header).collect::>()); - let headers = peer.bind(headers); - Ok(headers) + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect::>()); + let headers = peer.bind(headers); + Ok(headers) + }) }); p2p.expect_get_transactions().returning(|block_ids| { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) }); let mut importer = MockBlockImporterPort::default(); importer From 704a5837ec4677d40ee4f68d427ef15c9a8ccdda Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Tue, 15 Oct 2024 16:29:19 +0200 Subject: [PATCH 02/23] Fix a test --- crates/services/sync/src/import/back_pressure_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index 7974890015f..cc8aa446997 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -50,7 +50,7 @@ struct Input { block_stream_buffer_size: 10, header_batch_size: 5, } - => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 50 } + => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 51 } ; "1000 headers with max 5 size and max 10 requests when slow headers" )] #[test_case( From e94bee2c31c35635ebfe993544e1beb683fe3ee8 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 16 Oct 2024 18:36:36 +0200 Subject: [PATCH 03/23] Add caching of headers or blocks already fetched. Removed when successfully imported --- crates/services/sync/src/import.rs | 182 +++++++++++++----- .../sync/src/import/back_pressure_tests.rs | 1 + crates/services/sync/src/import/tests.rs | 124 +++++++++++- 3 files changed, 254 insertions(+), 53 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index a0aebc6dc8f..17e3c402d1c 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -27,6 +27,7 @@ use futures::{ Stream, }; use std::{ + collections::HashMap, future::Future, ops::{ Range, @@ -98,6 +99,26 @@ pub struct Import { executor: Arc, /// Consensus port. consensus: Arc, + /// A cache of already validated header or blocks. + cache: SharedMutex, CachedData>>, +} + +/// The data that is cached for a range of headers or blocks. +#[derive(Debug, Clone)] +enum CachedData { + // Only headers fetch + check has been done + Headers(Batch), + // Both headers fetch + check and blocks fetch + check has been done + Blocks(Batch), +} + +/// The data that is fetched either in the network or in the cache for a range of headers or blocks. +#[derive(Debug, Clone)] +enum BlockHeaderData { + /// The headers (or full blocks) have been fetched and checked. + Cached(CachedData), + /// The headers has just been fetched from the network. + Fetched(Batch), } impl Import { @@ -118,6 +139,7 @@ impl Import { p2p, executor, consensus, + cache: SharedMutex::new(HashMap::new()), } } @@ -127,7 +149,7 @@ impl Import { } } -#[derive(Debug)] +#[derive(Debug, Clone)] struct Batch { peer: Option, range: Range, @@ -199,6 +221,7 @@ where params, p2p, consensus, + cache, .. } = &self; @@ -208,11 +231,17 @@ where let params = *params; let p2p = p2p.clone(); let consensus = consensus.clone(); + let cache = cache.clone(); let block_stream_buffer_size = params.block_stream_buffer_size; let mut shutdown_signal = shutdown.clone(); async move { - let block_stream = - get_block_stream(range.clone(), params, p2p, consensus); + let block_stream = get_block_stream( + range.clone(), + params, + p2p, + consensus, + cache.clone(), + ); let shutdown_future = { let mut s = shutdown_signal.clone(); @@ -234,7 +263,9 @@ where // return an empty response _ = shutdown_signal.while_started() => None, // Stream a batch of blocks - blocks = stream_block_batch => Some(blocks), + blocks = stream_block_batch => { + Some(blocks) + }, } }).map(|task| { task.trace_err("Failed to join the task").ok().flatten() @@ -327,9 +358,13 @@ where }; } - let batch = Batch::new(peer.clone(), range, done); + let batch = Batch::new(peer.clone(), range.clone(), done); if !batch.is_err() { + { + let mut cache = self.cache.lock(); + cache.remove(&range); + } report_peer(p2p, peer, PeerReportReason::SuccessfulBlockImport); } @@ -364,28 +399,72 @@ fn get_block_stream< params: Config, p2p: Arc

, consensus: Arc, + cache: SharedMutex, CachedData>>, ) -> impl Stream> { - let header_stream = get_header_batch_stream(range.clone(), params, p2p.clone()); - header_stream + let ranges = range_chunks(range, params.header_batch_size); + futures::stream::iter(ranges) + .map({ + let p2p = p2p.clone(); + let cache = cache.clone(); + move |range| { + let p2p = p2p.clone(); + let cache = cache.clone(); + async move { + let cached_data = { + let cache = cache.lock(); + cache.get(&range).cloned() + }; + + match cached_data { + Some(cached_data) => BlockHeaderData::Cached(cached_data), + None => BlockHeaderData::Fetched( + get_headers_batch(range.clone(), &p2p).await, + ), + } + } + } + }) .map({ let p2p = p2p.clone(); let consensus = consensus.clone(); + let cache = cache.clone(); move |header_batch| { let p2p = p2p.clone(); let consensus = consensus.clone(); + let cache = cache.clone(); async move { - let Batch { - peer, - range, - results, - } = header_batch.await; - let checked_headers = results - .into_iter() - .take_while(|header| { - check_sealed_header(header, peer.clone(), &p2p, &consensus) - }) - .collect::>(); - Batch::new(peer, range, checked_headers) + match header_batch.await { + BlockHeaderData::Cached(cached_data) => { + BlockHeaderData::Cached(cached_data) + } + BlockHeaderData::Fetched(fetched_batch) => { + let Batch { + peer, + range, + results, + } = fetched_batch; + let checked_headers = results + .into_iter() + .take_while(|header| { + check_sealed_header( + header, + peer.clone(), + &p2p, + &consensus, + ) + }) + .collect::>(); + let batch = Batch::new(peer, range.clone(), checked_headers); + if !batch.is_err() { + let mut cache = cache.lock(); + cache.insert( + range.clone(), + CachedData::Headers(batch.clone()), + ); + } + BlockHeaderData::Fetched(batch) + } + } } } }) @@ -395,24 +474,40 @@ fn get_block_stream< move |headers| { let p2p = p2p.clone(); let consensus = consensus.clone(); + let cache = cache.clone(); async move { - let Batch { - peer, - range, - results, - } = headers.await; - if results.is_empty() { - SealedBlockBatch::new(peer, range, vec![]) - } else { - await_da_height( - results - .last() - .expect("We checked headers are not empty above"), - &consensus, - ) - .await; - let headers = SealedHeaderBatch::new(peer, range, results); - get_blocks(&p2p, headers).await + match headers.await { + BlockHeaderData::Cached(CachedData::Blocks(batch)) => batch, + BlockHeaderData::Cached(CachedData::Headers(batch)) + | BlockHeaderData::Fetched(batch) => { + let Batch { + peer, + range, + results, + } = batch; + if results.is_empty() { + SealedBlockBatch::new(peer, range, vec![]) + } else { + await_da_height( + results + .last() + .expect("We checked headers are not empty above"), + &consensus, + ) + .await; + let headers = + SealedHeaderBatch::new(peer, range.clone(), results); + let batch = get_blocks(&p2p, headers).await; + if !batch.is_err() { + let mut cache = cache.lock(); + cache.insert( + range.clone(), + CachedData::Blocks(batch.clone()), + ); + } + batch + } + } } } .instrument(tracing::debug_span!("consensus_and_transactions")) @@ -421,21 +516,6 @@ fn get_block_stream< }) } -fn get_header_batch_stream( - range: RangeInclusive, - params: Config, - p2p: Arc

, -) -> impl Stream> { - let Config { - header_batch_size, .. - } = params; - let ranges = range_chunks(range, header_batch_size); - futures::stream::iter(ranges).map(move |range| { - let p2p = p2p.clone(); - async move { get_headers_batch(range, &p2p).await } - }) -} - fn range_chunks( range: RangeInclusive, chunk_size: usize, diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index cc8aa446997..dce3ae39aee 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -113,6 +113,7 @@ async fn test_back_pressure(input: Input, state: State, params: Config) -> Count p2p, executor, consensus, + cache: SharedMutex::new(HashMap::new()), }; import.notify.notify_one(); diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 4a8a8762b19..ae7e6fdc5d7 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -248,18 +248,19 @@ async fn import__signature_fails_on_header_5_only() { } #[tokio::test] -async fn import__keep_blocks_p2p_asked_in_fail_cases() { - // given +async fn import__keep_data_asked_in_fail_ask_header_cases() { let params = Config { block_stream_buffer_size: 10, header_batch_size: 1, }; let mut consensus_port = MockConsensusPort::default(); + // No reask consensus_port .expect_check_sealed_header() .times(3) .returning(|_| Ok(true)); + // No reask consensus_port .expect_await_da_height() .times(3) @@ -267,6 +268,8 @@ async fn import__keep_blocks_p2p_asked_in_fail_cases() { let mut p2p = MockPeerToPeerPort::default(); let mut seq = Sequence::new(); + // Given + // Fail to get headers for block 4 p2p.expect_get_sealed_block_headers() .times(1) .in_sequence(&mut seq) @@ -278,6 +281,20 @@ async fn import__keep_blocks_p2p_asked_in_fail_cases() { }); p2p.expect_get_sealed_block_headers() .times(2) + .in_sequence(&mut seq) + .returning(|range| { + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) + }); + // Then + // Reask only for block 4 + p2p.expect_get_sealed_block_headers() + .times(1) + .in_sequence(&mut seq) .returning(|range| { Box::pin(async move { let peer = random_peer(); @@ -286,6 +303,7 @@ async fn import__keep_blocks_p2p_asked_in_fail_cases() { Ok(headers) }) }); + // No reask p2p.expect_get_transactions() .times(3) .returning(|block_ids| { @@ -310,6 +328,105 @@ async fn import__keep_blocks_p2p_asked_in_fail_cases() { p2p, executor, consensus, + cache: SharedMutex::new(HashMap::new()), + }; + let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); + let mut watcher = shutdown.into(); + notify.notify_one(); + let res = import.import(&mut watcher).await.is_ok(); + assert_eq!(false, res); + assert_eq!(&State::new(3, None), state.lock().deref()); + // Reset the state for a next call + *state.lock() = State::new(3, 6); + + // When + // Should re-ask to P2P only block 4 and for now it reask for all blocks + let res = import.import(&mut watcher).await.is_ok(); + assert_eq!(true, res); + assert_eq!(&State::new(6, None), state.lock().deref()); +} + + +#[tokio::test] +async fn import__keep_data_asked_in_fail_ask_transactions_cases() { + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 1, + }; + + let mut consensus_port = MockConsensusPort::default(); + consensus_port + .expect_check_sealed_header() + .times(3) + .returning(|_| Ok(true)); + consensus_port + .expect_await_da_height() + .times(4) + .returning(|_| Ok(())); + + let mut p2p = MockPeerToPeerPort::default(); + p2p.expect_get_sealed_block_headers() + .times(3) + .returning(|range| { + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) + }); + let mut seq = Sequence::new(); + // Given + // Fail to get transactions for block 4 + p2p.expect_get_transactions() + .times(1) + .in_sequence(&mut seq) + .returning(|_| { + Box::pin(async move { + tokio::time::sleep(Duration::from_millis(300)).await; + Err(anyhow::anyhow!("Some network error")) + }) + }); + + p2p.expect_get_transactions() + .times(2) + .in_sequence(&mut seq) + .returning(|block_ids| { + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) + }); + // Then + // Reask only for block 4 + p2p.expect_get_transactions() + .times(1) + .in_sequence(&mut seq) + .returning(|block_ids| { + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) + }); + + p2p.expect_report_peer().returning(|_, _| Ok(())); + + let p2p = Arc::new(p2p); + let executor: Arc = Arc::new(DefaultMocks::times([3])); + let consensus = Arc::new(consensus_port); + let notify = Arc::new(Notify::new()); + let state: SharedMutex = State::new(3, 6).into(); + + let import = Import { + state: state.clone(), + notify: notify.clone(), + params, + p2p, + executor, + consensus, + cache: SharedMutex::new(HashMap::new()), }; let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); let mut watcher = shutdown.into(); @@ -319,6 +436,7 @@ async fn import__keep_blocks_p2p_asked_in_fail_cases() { assert_eq!(&State::new(3, None), state.lock().deref()); // Reset the state for a next call *state.lock() = State::new(3, 6); + // When // Should re-ask to P2P only block 4 and for now it reask for all blocks let res = import.import(&mut watcher).await.is_ok(); assert_eq!(true, res); @@ -1094,6 +1212,7 @@ async fn test_import_inner( p2p, executor, consensus, + cache: SharedMutex::new(HashMap::new()), }; let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); let mut watcher = shutdown.into(); @@ -1319,6 +1438,7 @@ impl PeerReportTestBuilder { p2p, executor, consensus, + cache: SharedMutex::new(HashMap::new()), }; let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); From 6a616613b0974e05cf00eb78b5170d48ea595e85 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 16 Oct 2024 18:40:53 +0200 Subject: [PATCH 04/23] Add changelog and fmt --- CHANGELOG.md | 3 +++ crates/services/sync/src/import/tests.rs | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8dce9c8ab2..8c1281ec6d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,9 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +### Added +- [2361](https://github.com/FuelLabs/fuel-core/pull/2361): Add caches to the syync service to not reask for data it already fetched from the network. + ## [Version 0.40.0] ### Added diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index ae7e6fdc5d7..676744e5eee 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -338,7 +338,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { assert_eq!(&State::new(3, None), state.lock().deref()); // Reset the state for a next call *state.lock() = State::new(3, 6); - + // When // Should re-ask to P2P only block 4 and for now it reask for all blocks let res = import.import(&mut watcher).await.is_ok(); @@ -346,7 +346,6 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { assert_eq!(&State::new(6, None), state.lock().deref()); } - #[tokio::test] async fn import__keep_data_asked_in_fail_ask_transactions_cases() { let params = Config { From 34ec1c6f86dbeba541e734707bd4ecf155f21387 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 16 Oct 2024 18:54:32 +0200 Subject: [PATCH 05/23] Fix clippy --- crates/services/sync/src/import/tests.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 676744e5eee..e18ce0f7355 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -334,7 +334,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { let mut watcher = shutdown.into(); notify.notify_one(); let res = import.import(&mut watcher).await.is_ok(); - assert_eq!(false, res); + assert!(!res); assert_eq!(&State::new(3, None), state.lock().deref()); // Reset the state for a next call *state.lock() = State::new(3, 6); @@ -342,7 +342,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { // When // Should re-ask to P2P only block 4 and for now it reask for all blocks let res = import.import(&mut watcher).await.is_ok(); - assert_eq!(true, res); + assert!(res); assert_eq!(&State::new(6, None), state.lock().deref()); } @@ -431,14 +431,14 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { let mut watcher = shutdown.into(); notify.notify_one(); let res = import.import(&mut watcher).await.is_ok(); - assert_eq!(false, res); + assert!(!res); assert_eq!(&State::new(3, None), state.lock().deref()); // Reset the state for a next call *state.lock() = State::new(3, 6); // When // Should re-ask to P2P only block 4 and for now it reask for all blocks let res = import.import(&mut watcher).await.is_ok(); - assert_eq!(true, res); + assert!(res); assert_eq!(&State::new(6, None), state.lock().deref()); } From c4bb96c892e7bc1db4599a865606211df3e5ca07 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 17 Oct 2024 17:30:39 +0200 Subject: [PATCH 06/23] Add new cache structure. (Not fully working because of batch of headers and blocks mixed) --- crates/services/sync/src/import.rs | 145 +++++-------- .../sync/src/import/back_pressure_tests.rs | 4 +- crates/services/sync/src/import/cache.rs | 205 ++++++++++++++++++ crates/services/sync/src/import/tests.rs | 16 +- 4 files changed, 270 insertions(+), 100 deletions(-) create mode 100644 crates/services/sync/src/import/cache.rs diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 17e3c402d1c..e22775aca25 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -2,6 +2,7 @@ //! This module contains the import task which is responsible for //! importing blocks from the network into the local blockchain. +use cache::{Cache, CachedData}; use fuel_core_services::{ SharedMutex, StateWatcher, @@ -27,7 +28,6 @@ use futures::{ Stream, }; use std::{ - collections::HashMap, future::Future, ops::{ Range, @@ -55,6 +55,8 @@ use crate::{ state::State, }; +mod cache; + #[cfg(any(test, feature = "benchmarking"))] /// Accessories for testing the sync. Available only when compiling under test /// or benchmarking. @@ -100,23 +102,15 @@ pub struct Import { /// Consensus port. consensus: Arc, /// A cache of already validated header or blocks. - cache: SharedMutex, CachedData>>, + cache: Cache, } -/// The data that is cached for a range of headers or blocks. -#[derive(Debug, Clone)] -enum CachedData { - // Only headers fetch + check has been done - Headers(Batch), - // Both headers fetch + check and blocks fetch + check has been done - Blocks(Batch), -} /// The data that is fetched either in the network or in the cache for a range of headers or blocks. #[derive(Debug, Clone)] enum BlockHeaderData { /// The headers (or full blocks) have been fetched and checked. - Cached(CachedData), + Cached(Vec), /// The headers has just been fetched from the network. Fetched(Batch), } @@ -139,7 +133,7 @@ impl Import { p2p, executor, consensus, - cache: SharedMutex::new(HashMap::new()), + cache: Cache::new(), } } @@ -181,13 +175,13 @@ where { #[tracing::instrument(skip_all)] /// Execute imports until a shutdown is requested. - pub async fn import(&self, shutdown: &mut StateWatcher) -> anyhow::Result { + pub async fn import(&mut self, shutdown: &mut StateWatcher) -> anyhow::Result { self.import_inner(shutdown).await?; Ok(wait_for_notify_or_shutdown(&self.notify, shutdown).await) } - async fn import_inner(&self, shutdown: &StateWatcher) -> anyhow::Result<()> { + async fn import_inner(&mut self, shutdown: &StateWatcher) -> anyhow::Result<()> { // If there is a range to process, launch the stream. if let Some(range) = self.state.apply(|s| s.process_range()) { // Launch the stream to import the range. @@ -310,7 +304,7 @@ where /// If an error occurs, the preceding blocks still be processed /// and the error will be returned. async fn launch_stream( - &self, + &mut self, range: RangeInclusive, shutdown: &StateWatcher, ) -> usize { @@ -325,22 +319,25 @@ where self.fetch_batches_task(range, shutdown); let result = tokio_stream::wrappers::ReceiverStream::new(batch_receiver) .then(|batch| { + let mut cache = self.cache.clone(); async move { let Batch { peer, range, results, } = batch; - + // clean cache of all blocks that are in the range let mut done = vec![]; let mut shutdown = shutdown.clone(); for sealed_block in results { + let height = *sealed_block.entity.header().height(); let res = tokio::select! { biased; _ = shutdown.while_started() => { break; }, res = execute_and_commit(executor.as_ref(), state, sealed_block) => { + cache.remove_element(&height); res }, }; @@ -361,10 +358,6 @@ where let batch = Batch::new(peer.clone(), range.clone(), done); if !batch.is_err() { - { - let mut cache = self.cache.lock(); - cache.remove(&range); - } report_peer(p2p, peer, PeerReportReason::SuccessfulBlockImport); } @@ -399,22 +392,14 @@ fn get_block_stream< params: Config, p2p: Arc

, consensus: Arc, - cache: SharedMutex, CachedData>>, + cache: Cache, ) -> impl Stream> { - let ranges = range_chunks(range, params.header_batch_size); - futures::stream::iter(ranges) + cache.get_chunks(range.clone(), params.header_batch_size) .map({ let p2p = p2p.clone(); - let cache = cache.clone(); - move |range| { + move |(range, cached_data)| { let p2p = p2p.clone(); - let cache = cache.clone(); async move { - let cached_data = { - let cache = cache.lock(); - cache.get(&range).cloned() - }; - match cached_data { Some(cached_data) => BlockHeaderData::Cached(cached_data), None => BlockHeaderData::Fetched( @@ -431,7 +416,7 @@ fn get_block_stream< move |header_batch| { let p2p = p2p.clone(); let consensus = consensus.clone(); - let cache = cache.clone(); + let mut cache = cache.clone(); async move { match header_batch.await { BlockHeaderData::Cached(cached_data) => { @@ -456,11 +441,7 @@ fn get_block_stream< .collect::>(); let batch = Batch::new(peer, range.clone(), checked_headers); if !batch.is_err() { - let mut cache = cache.lock(); - cache.insert( - range.clone(), - CachedData::Headers(batch.clone()), - ); + cache.insert_headers(batch.clone()); } BlockHeaderData::Fetched(batch) } @@ -471,44 +452,41 @@ fn get_block_stream< .map({ let p2p = p2p.clone(); let consensus = consensus.clone(); - move |headers| { - let p2p = p2p.clone(); - let consensus = consensus.clone(); - let cache = cache.clone(); + move |_headers| { + let _p2p = p2p.clone(); + let _consensus = consensus.clone(); + let _cache = cache.clone(); async move { - match headers.await { - BlockHeaderData::Cached(CachedData::Blocks(batch)) => batch, - BlockHeaderData::Cached(CachedData::Headers(batch)) - | BlockHeaderData::Fetched(batch) => { - let Batch { - peer, - range, - results, - } = batch; - if results.is_empty() { - SealedBlockBatch::new(peer, range, vec![]) - } else { - await_da_height( - results - .last() - .expect("We checked headers are not empty above"), - &consensus, - ) - .await; - let headers = - SealedHeaderBatch::new(peer, range.clone(), results); - let batch = get_blocks(&p2p, headers).await; - if !batch.is_err() { - let mut cache = cache.lock(); - cache.insert( - range.clone(), - CachedData::Blocks(batch.clone()), - ); - } - batch - } - } - } + unimplemented!() + // match headers.await { + // BlockHeaderData::Cached(CachedData::Blocks(batch)) => batch, + // BlockHeaderData::Cached(CachedData::Headers(batch)) + // | BlockHeaderData::Fetched(batch) => { + // let Batch { + // peer, + // range, + // results, + // } = batch; + // if results.is_empty() { + // SealedBlockBatch::new(peer, range, vec![]) + // } else { + // await_da_height( + // results + // .last() + // .expect("We checked headers are not empty above"), + // &consensus, + // ) + // .await; + // let headers = + // SealedHeaderBatch::new(peer, range.clone(), results); + // let batch = get_blocks(&p2p, headers).await; + // if !batch.is_err() { + // cache.insert_blocks(batch.clone()); + // } + // batch + // } + // } + // } } .instrument(tracing::debug_span!("consensus_and_transactions")) .in_current_span() @@ -516,19 +494,6 @@ fn get_block_stream< }) } -fn range_chunks( - range: RangeInclusive, - chunk_size: usize, -) -> impl Iterator> { - let end = range.end().saturating_add(1); - let chunk_size_u32 = - u32::try_from(chunk_size).expect("The size of the chunk can't exceed `u32`"); - range.step_by(chunk_size).map(move |chunk_start| { - let block_end = (chunk_start.saturating_add(chunk_size_u32)).min(end); - chunk_start..block_end - }) -} - fn check_sealed_header< P: PeerToPeerPort + Send + Sync + 'static, C: ConsensusPort + Send + Sync + 'static, @@ -548,7 +513,7 @@ fn check_sealed_header< validity } -async fn await_da_height( +async fn _await_da_height( header: &SealedBlockHeader, consensus: &Arc, ) { @@ -595,7 +560,7 @@ where .map(|res| res.map(|data| data.unwrap_or_default())) } -async fn get_transactions

( +async fn _get_transactions

( peer_id: PeerId, range: Range, p2p: &Arc

, @@ -687,7 +652,7 @@ where return SealedBlockBatch::new(None, range, vec![]) }; - let Some(transaction_data) = get_transactions(peer.clone(), range.clone(), p2p).await + let Some(transaction_data) = _get_transactions(peer.clone(), range.clone(), p2p).await else { return Batch::new(Some(peer), range, vec![]) }; diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index dce3ae39aee..963e6d2ce03 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -106,14 +106,14 @@ async fn test_back_pressure(input: Input, state: State, params: Config) -> Count let consensus = Arc::new(PressureConsensus::new(counts.clone(), input.consensus)); let notify = Arc::new(Notify::new()); - let import = Import { + let mut import = Import { state, notify, params, p2p, executor, consensus, - cache: SharedMutex::new(HashMap::new()), + cache: Cache::new(), }; import.notify.notify_one(); diff --git a/crates/services/sync/src/import/cache.rs b/crates/services/sync/src/import/cache.rs new file mode 100644 index 00000000000..bd77a680f22 --- /dev/null +++ b/crates/services/sync/src/import/cache.rs @@ -0,0 +1,205 @@ +#![allow(unused)] + +use std::{collections::BTreeMap, ops::{Range, RangeInclusive}}; + +use fuel_core_services::SharedMutex; +use fuel_core_types::blockchain::{SealedBlock, SealedBlockHeader}; + +use super::Batch; + +/// The cache that stores the fetched headers and blocks. +#[derive(Clone, Debug)] +pub struct Cache(SharedMutex>); + +#[derive(Debug, Clone)] +pub enum CachedData { + Header(SealedBlockHeader), + Block(SealedBlock), +} + +/// The data that is fetched either in the network or in the cache for a range of headers or blocks. +#[derive(Debug, Clone)] +enum BlockHeaderData { + /// The headers (or full blocks) have been fetched and checked. + Cached(CachedData), + /// The headers has just been fetched from the network. + Fetched(Batch), +} + + +impl Cache { + pub fn new() -> Self { + Self(SharedMutex::new(BTreeMap::new())) + } + + pub fn insert_blocks(&mut self, batch: Batch) { + let mut lock = self.0.lock(); + for block in batch.results { + lock.insert(**block.entity.header().height(), CachedData::Block(block)); + } + } + + pub fn insert_headers(&mut self, batch: Batch) { + let mut lock = self.0.lock(); + for header in batch.results { + lock.insert(**header.entity.height(), CachedData::Header(header)); + } + } + + pub fn get_chunks(&self, range: RangeInclusive, max_chunk_size: usize) -> futures::stream::Iter, Option>)>> { + let end = (*range.end()).saturating_add(1); + let chunk_size_u32 = + u32::try_from(max_chunk_size).expect("The size of the chunk can't exceed `u32`"); + let lock = self.0.lock(); + let cache_iter = lock.range(range.clone()); + let mut start_current_chunk = *range.start(); + let mut current_height = *range.start(); + // Build a stream of futures that will be splitted in chunks of available data and missing data to fetch with a maximum size of `max_chunk_size` for each chunk. + let mut chunks = Vec::new(); + let mut current_chunk = Vec::new(); + for (height, data) in cache_iter { + if *height == current_height { + current_chunk.push(data.clone()); + current_height = *height + 1; + if current_chunk.len() >= max_chunk_size { + chunks.push((start_current_chunk..current_height, Some(current_chunk))); + start_current_chunk = current_height; + current_chunk = Vec::new(); + } + } else { + // Push the current chunk of cached if it is not empty. + if !current_chunk.is_empty() { + chunks.push((start_current_chunk..current_height, Some(current_chunk))); + } + // Push the missing chunks. + let missing_chunks = (current_height..*height).step_by(max_chunk_size).map(move |chunk_start| { + let block_end = chunk_start.saturating_add(chunk_size_u32).min(end); + (chunk_start..block_end, None) + }); + chunks.extend(missing_chunks); + + // Prepare the next chunk. + start_current_chunk = *height; + current_chunk = Vec::new(); + current_chunk.push(data.clone()); + current_height = *height + 1; + } + } + // Push the last chunk of cached if it is not empty. + if !current_chunk.is_empty() { + chunks.push((start_current_chunk..current_height, Some(current_chunk))); + } + // Push the last missing chunk. + if current_height < *range.end() { + // Include the last height. + let missing_chunks = (current_height..end).step_by(max_chunk_size).map(move |chunk_start| { + let block_end = chunk_start.saturating_add(chunk_size_u32).min(end); + (chunk_start..block_end, None) + }); + chunks.extend(missing_chunks); + } + futures::stream::iter(chunks) + } + + pub fn remove_element(&mut self, height: &u32) { + let mut lock = self.0.lock(); + lock.remove(height); + } +} + +#[cfg(test)] +mod tests { + use std::ops::Range; + use fuel_core_types::blockchain::{consensus::Sealed, header::BlockHeader}; + use futures::StreamExt; + use crate::import::{cache::{CachedData, Cache}, Batch}; + + #[tokio::test] + async fn test_one_element_and_empty_ranges() { + let mut cache = Cache::new(); + let mut header = BlockHeader::default(); + header.set_block_height(0.into()); + let header = Sealed { + entity: header, + consensus: Default::default(), + }; + cache.insert_headers(Batch::new(None, 0..1, vec![header.clone()])); + let chunks = cache.get_chunks(0..=10, 5); + let chunks: Vec<(Range, Option>)> = chunks.collect().await; + assert_eq!(chunks.len(), 3); + assert_eq!(chunks[0].0, 0..1); + assert_eq!(chunks[1].0, 1..6); + assert_eq!(chunks[2].0, 6..11); + assert_eq!(chunks[0].1.as_ref().unwrap().len(), 1); + assert_eq!(chunks[1].1.is_none(), true); + assert_eq!(chunks[2].1.is_none(), true); + } + + #[tokio::test] + async fn test_only_full_ranges() { + let mut cache = Cache::new(); + let mut headers = Vec::new(); + for i in 0..11 { + let mut header = BlockHeader::default(); + header.set_block_height(i.into()); + let header = Sealed { + entity: header.clone(), + consensus: Default::default(), + }; + headers.push(header); + } + cache.insert_headers(Batch::new(None, 0..11, headers)); + let chunks = cache.get_chunks(0..=10, 3); + let chunks: Vec<(Range, Option>)> = chunks.collect().await; + assert_eq!(chunks.len(), 4); + assert_eq!(chunks[0].0, 0..3); + assert_eq!(chunks[1].0, 3..6); + assert_eq!(chunks[2].0, 6..9); + assert_eq!(chunks[3].0, 9..11); + assert_eq!(chunks.iter().all(|(_, b)| b.is_some()), true); + assert_eq!(chunks[0].1.as_ref().unwrap().len(), 3); + assert_eq!(chunks[1].1.as_ref().unwrap().len(), 3); + assert_eq!(chunks[2].1.as_ref().unwrap().len(), 3); + assert_eq!(chunks[3].1.as_ref().unwrap().len(), 2); + } + + #[tokio::test] + async fn test_only_empty_ranges() { + let mut cache = Cache::new(); + let chunks = cache.get_chunks(0..=10, 3); + let chunks: Vec<(Range, Option>)> = chunks.collect().await; + assert_eq!(chunks.len(), 4); + assert_eq!(chunks[0].0, 0..3); + assert_eq!(chunks[1].0, 3..6); + assert_eq!(chunks[2].0, 6..9); + assert_eq!(chunks[3].0, 9..11); + assert_eq!(chunks.iter().all(|(_, b)| b.is_none()), true); + } + + #[tokio::test] + async fn test_ranges_cached_and_empty() { + let mut cache = Cache::new(); + let mut headers = Vec::new(); + for i in 0..6 { + let mut header = BlockHeader::default(); + header.set_block_height(i.into()); + let header = Sealed { + entity: header.clone(), + consensus: Default::default(), + }; + headers.push(header); + } + cache.insert_headers(Batch::new(None, 0..6, headers)); + let chunks = cache.get_chunks(0..=10, 3); + let chunks: Vec<(Range, Option>)> = chunks.collect().await; + assert_eq!(chunks.len(), 4); + assert_eq!(chunks[0].0, 0..3); + assert_eq!(chunks[1].0, 3..6); + assert_eq!(chunks[2].0, 6..9); + assert_eq!(chunks[3].0, 9..11); + assert_eq!(chunks[0].1.as_ref().unwrap().len(), 3); + assert_eq!(chunks[1].1.as_ref().unwrap().len(), 3); + assert_eq!(chunks[2].1.is_none(), true); + assert_eq!(chunks[3].1.is_none(), true); + } +} \ No newline at end of file diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index e18ce0f7355..3b4fa7cf6b1 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -321,14 +321,14 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { let notify = Arc::new(Notify::new()); let state: SharedMutex = State::new(3, 6).into(); - let import = Import { + let mut import = Import { state: state.clone(), notify: notify.clone(), params, p2p, executor, consensus, - cache: SharedMutex::new(HashMap::new()), + cache: Cache::new(), }; let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); let mut watcher = shutdown.into(); @@ -418,14 +418,14 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { let notify = Arc::new(Notify::new()); let state: SharedMutex = State::new(3, 6).into(); - let import = Import { + let mut import = Import { state: state.clone(), notify: notify.clone(), params, p2p, executor, consensus, - cache: SharedMutex::new(HashMap::new()), + cache: Cache::new(), }; let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); let mut watcher = shutdown.into(); @@ -1204,14 +1204,14 @@ async fn test_import_inner( let executor = Arc::new(executor); let consensus = Arc::new(consensus_port); - let import = Import { + let mut import = Import { state, notify, params, p2p, executor, consensus, - cache: SharedMutex::new(HashMap::new()), + cache: Cache::new(), }; let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); let mut watcher = shutdown.into(); @@ -1430,14 +1430,14 @@ impl PeerReportTestBuilder { header_batch_size: 10, }; - let import = Import { + let mut import = Import { state, notify, params, p2p, executor, consensus, - cache: SharedMutex::new(HashMap::new()), + cache: Cache::new(), }; let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); From 23c7eaa5864003c3ce9988c83f260c51ad1192a7 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 18 Oct 2024 11:42:40 +0200 Subject: [PATCH 07/23] Add new chunk system to distinguish chunks of blocks and blocks header and added a bunch of tests --- crates/services/sync/src/import.rs | 104 ++--- crates/services/sync/src/import/cache.rs | 539 +++++++++++++++++------ 2 files changed, 467 insertions(+), 176 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index e22775aca25..5dc18c73a6d 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -2,7 +2,10 @@ //! This module contains the import task which is responsible for //! importing blocks from the network into the local blockchain. -use cache::{Cache, CachedData}; +use cache::{ + Cache, + CachedDataBatch, +}; use fuel_core_services::{ SharedMutex, StateWatcher, @@ -105,12 +108,11 @@ pub struct Import { cache: Cache, } - /// The data that is fetched either in the network or in the cache for a range of headers or blocks. #[derive(Debug, Clone)] enum BlockHeaderData { /// The headers (or full blocks) have been fetched and checked. - Cached(Vec), + Cached(CachedDataBatch), /// The headers has just been fetched from the network. Fetched(Batch), } @@ -143,7 +145,7 @@ impl Import { } } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] struct Batch { peer: Option, range: Range, @@ -257,9 +259,7 @@ where // return an empty response _ = shutdown_signal.while_started() => None, // Stream a batch of blocks - blocks = stream_block_batch => { - Some(blocks) - }, + blocks = stream_block_batch => Some(blocks), } }).map(|task| { task.trace_err("Failed to join the task").ok().flatten() @@ -394,17 +394,17 @@ fn get_block_stream< consensus: Arc, cache: Cache, ) -> impl Stream> { - cache.get_chunks(range.clone(), params.header_batch_size) + cache + .get_chunks(range.clone(), params.header_batch_size) .map({ let p2p = p2p.clone(); - move |(range, cached_data)| { + move |cached_data_batch| { let p2p = p2p.clone(); async move { - match cached_data { - Some(cached_data) => BlockHeaderData::Cached(cached_data), - None => BlockHeaderData::Fetched( - get_headers_batch(range.clone(), &p2p).await, - ), + if let CachedDataBatch::None(range) = cached_data_batch { + BlockHeaderData::Fetched(get_headers_batch(range, &p2p).await) + } else { + BlockHeaderData::Cached(cached_data_batch) } } } @@ -452,41 +452,43 @@ fn get_block_stream< .map({ let p2p = p2p.clone(); let consensus = consensus.clone(); - move |_headers| { - let _p2p = p2p.clone(); - let _consensus = consensus.clone(); - let _cache = cache.clone(); + move |headers| { + let p2p = p2p.clone(); + let consensus = consensus.clone(); + let mut cache = cache.clone(); async move { - unimplemented!() - // match headers.await { - // BlockHeaderData::Cached(CachedData::Blocks(batch)) => batch, - // BlockHeaderData::Cached(CachedData::Headers(batch)) - // | BlockHeaderData::Fetched(batch) => { - // let Batch { - // peer, - // range, - // results, - // } = batch; - // if results.is_empty() { - // SealedBlockBatch::new(peer, range, vec![]) - // } else { - // await_da_height( - // results - // .last() - // .expect("We checked headers are not empty above"), - // &consensus, - // ) - // .await; - // let headers = - // SealedHeaderBatch::new(peer, range.clone(), results); - // let batch = get_blocks(&p2p, headers).await; - // if !batch.is_err() { - // cache.insert_blocks(batch.clone()); - // } - // batch - // } - // } - // } + match headers.await { + BlockHeaderData::Cached(CachedDataBatch::Blocks(batch)) => batch, + BlockHeaderData::Cached(CachedDataBatch::Headers(batch)) + | BlockHeaderData::Fetched(batch) => { + let Batch { + peer, + range, + results, + } = batch; + if results.is_empty() { + SealedBlockBatch::new(peer, range, vec![]) + } else { + await_da_height( + results + .last() + .expect("We checked headers are not empty above"), + &consensus, + ) + .await; + let headers = + SealedHeaderBatch::new(peer, range.clone(), results); + let batch = get_blocks(&p2p, headers).await; + if !batch.is_err() { + cache.insert_blocks(batch.clone()); + } + batch + } + } + BlockHeaderData::Cached(CachedDataBatch::None(_)) => { + unreachable!() + } + } } .instrument(tracing::debug_span!("consensus_and_transactions")) .in_current_span() @@ -513,7 +515,7 @@ fn check_sealed_header< validity } -async fn _await_da_height( +async fn await_da_height( header: &SealedBlockHeader, consensus: &Arc, ) { @@ -560,7 +562,7 @@ where .map(|res| res.map(|data| data.unwrap_or_default())) } -async fn _get_transactions

( +async fn get_transactions

( peer_id: PeerId, range: Range, p2p: &Arc

, @@ -652,7 +654,7 @@ where return SealedBlockBatch::new(None, range, vec![]) }; - let Some(transaction_data) = _get_transactions(peer.clone(), range.clone(), p2p).await + let Some(transaction_data) = get_transactions(peer.clone(), range.clone(), p2p).await else { return Batch::new(Some(peer), range, vec![]) }; diff --git a/crates/services/sync/src/import/cache.rs b/crates/services/sync/src/import/cache.rs index bd77a680f22..ca26105b908 100644 --- a/crates/services/sync/src/import/cache.rs +++ b/crates/services/sync/src/import/cache.rs @@ -1,9 +1,18 @@ #![allow(unused)] -use std::{collections::BTreeMap, ops::{Range, RangeInclusive}}; +use std::{ + collections::BTreeMap, + ops::{ + Range, + RangeInclusive, + }, +}; use fuel_core_services::SharedMutex; -use fuel_core_types::blockchain::{SealedBlock, SealedBlockHeader}; +use fuel_core_types::blockchain::{ + SealedBlock, + SealedBlockHeader, +}; use super::Batch; @@ -12,21 +21,19 @@ use super::Batch; pub struct Cache(SharedMutex>); #[derive(Debug, Clone)] +#[allow(clippy::large_enum_variant)] pub enum CachedData { Header(SealedBlockHeader), Block(SealedBlock), } -/// The data that is fetched either in the network or in the cache for a range of headers or blocks. -#[derive(Debug, Clone)] -enum BlockHeaderData { - /// The headers (or full blocks) have been fetched and checked. - Cached(CachedData), - /// The headers has just been fetched from the network. - Fetched(Batch), +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum CachedDataBatch { + Headers(Batch), + Blocks(Batch), + None(Range), } - impl Cache { pub fn new() -> Self { Self(SharedMutex::new(BTreeMap::new())) @@ -46,56 +53,153 @@ impl Cache { } } - pub fn get_chunks(&self, range: RangeInclusive, max_chunk_size: usize) -> futures::stream::Iter, Option>)>> { + pub fn get_chunks( + &self, + range: RangeInclusive, + max_chunk_size: usize, + ) -> futures::stream::Iter> { let end = (*range.end()).saturating_add(1); - let chunk_size_u32 = - u32::try_from(max_chunk_size).expect("The size of the chunk can't exceed `u32`"); - let lock = self.0.lock(); - let cache_iter = lock.range(range.clone()); - let mut start_current_chunk = *range.start(); + let chunk_size_u32 = u32::try_from(max_chunk_size) + .expect("The size of the chunk can't exceed `u32`"); + let cache_iter: Vec<(u32, CachedData)> = { + let lock = self.0.lock(); + lock.range(range.clone()) + .map(|(k, v)| (*k, v.clone())) + .collect() + }; let mut current_height = *range.start(); // Build a stream of futures that will be splitted in chunks of available data and missing data to fetch with a maximum size of `max_chunk_size` for each chunk. let mut chunks = Vec::new(); - let mut current_chunk = Vec::new(); + let mut current_chunk = CachedDataBatch::None(0..0); for (height, data) in cache_iter { - if *height == current_height { - current_chunk.push(data.clone()); - current_height = *height + 1; - if current_chunk.len() >= max_chunk_size { - chunks.push((start_current_chunk..current_height, Some(current_chunk))); - start_current_chunk = current_height; - current_chunk = Vec::new(); + if height == current_height { + current_chunk = match (current_chunk, data) { + (CachedDataBatch::None(_), CachedData::Header(data)) => { + CachedDataBatch::Headers(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )) + } + (CachedDataBatch::None(_), CachedData::Block(data)) => { + CachedDataBatch::Blocks(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )) + } + (CachedDataBatch::Headers(mut batch), CachedData::Header(data)) => { + batch.range = batch.range.start..height.saturating_add(1); + batch.results.push(data); + CachedDataBatch::Headers(batch) + } + (CachedDataBatch::Blocks(mut batch), CachedData::Block(data)) => { + batch.range = batch.range.start..height.saturating_add(1); + batch.results.push(data); + CachedDataBatch::Blocks(batch) + } + ( + CachedDataBatch::Headers(headers_batch), + CachedData::Block(block), + ) => { + chunks.push(CachedDataBatch::Headers(headers_batch)); + CachedDataBatch::Blocks(Batch::new( + None, + height..height.saturating_add(1), + vec![block], + )) + } + ( + CachedDataBatch::Blocks(blocks_batch), + CachedData::Header(header), + ) => { + chunks.push(CachedDataBatch::Blocks(blocks_batch)); + CachedDataBatch::Headers(Batch::new( + None, + height..height.saturating_add(1), + vec![header], + )) + } + }; + // Check the chunk limit and push the current chunk if it is full. + current_chunk = match current_chunk { + CachedDataBatch::Headers(batch) => { + if batch.results.len() >= max_chunk_size { + chunks.push(CachedDataBatch::Headers(batch)); + CachedDataBatch::None(current_height..current_height) + } else { + CachedDataBatch::Headers(batch) + } + } + CachedDataBatch::Blocks(batch) => { + if batch.results.len() >= max_chunk_size { + chunks.push(CachedDataBatch::Blocks(batch)); + CachedDataBatch::None(current_height..current_height) + } else { + CachedDataBatch::Blocks(batch) + } + } + CachedDataBatch::None(range) => CachedDataBatch::None(range), } } else { // Push the current chunk of cached if it is not empty. - if !current_chunk.is_empty() { - chunks.push((start_current_chunk..current_height, Some(current_chunk))); + match current_chunk { + CachedDataBatch::Headers(batch) => { + chunks.push(CachedDataBatch::Headers(batch)); + } + CachedDataBatch::Blocks(batch) => { + chunks.push(CachedDataBatch::Blocks(batch)); + } + CachedDataBatch::None(_) => {} } + // Push the missing chunks. - let missing_chunks = (current_height..*height).step_by(max_chunk_size).map(move |chunk_start| { - let block_end = chunk_start.saturating_add(chunk_size_u32).min(end); - (chunk_start..block_end, None) - }); + let missing_chunks = (current_height..height) + .step_by(max_chunk_size) + .map(move |chunk_start| { + let block_end = + chunk_start.saturating_add(chunk_size_u32).min(end); + CachedDataBatch::None(chunk_start..block_end) + }); chunks.extend(missing_chunks); // Prepare the next chunk. - start_current_chunk = *height; - current_chunk = Vec::new(); - current_chunk.push(data.clone()); - current_height = *height + 1; + current_chunk = match data { + CachedData::Header(data) => CachedDataBatch::Headers(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )), + CachedData::Block(data) => CachedDataBatch::Blocks(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )), + }; } + current_height = height.saturating_add(1); } // Push the last chunk of cached if it is not empty. - if !current_chunk.is_empty() { - chunks.push((start_current_chunk..current_height, Some(current_chunk))); + match current_chunk { + CachedDataBatch::Headers(batch) => { + chunks.push(CachedDataBatch::Headers(batch)); + } + CachedDataBatch::Blocks(batch) => { + chunks.push(CachedDataBatch::Blocks(batch)); + } + CachedDataBatch::None(_) => {} } // Push the last missing chunk. - if current_height < *range.end() { + if current_height <= *range.end() { // Include the last height. - let missing_chunks = (current_height..end).step_by(max_chunk_size).map(move |chunk_start| { - let block_end = chunk_start.saturating_add(chunk_size_u32).min(end); - (chunk_start..block_end, None) - }); + let missing_chunks = + (current_height..end) + .step_by(max_chunk_size) + .map(move |chunk_start| { + let block_end = + chunk_start.saturating_add(chunk_size_u32).min(end); + CachedDataBatch::None(chunk_start..block_end) + }); chunks.extend(missing_chunks); } futures::stream::iter(chunks) @@ -109,97 +213,282 @@ impl Cache { #[cfg(test)] mod tests { - use std::ops::Range; - use fuel_core_types::blockchain::{consensus::Sealed, header::BlockHeader}; + use crate::import::{ + cache::{ + Cache, + CachedData, + CachedDataBatch, + }, + Batch, + }; + use fuel_core_types::{ + blockchain::{ + block::Block, + consensus::Sealed, + header::BlockHeader, + }, + fuel_tx::Bytes32, + tai64::Tai64, + }; use futures::StreamExt; - use crate::import::{cache::{CachedData, Cache}, Batch}; + use std::ops::{ + Range, + RangeInclusive, + }; + use test_case::test_case; - #[tokio::test] - async fn test_one_element_and_empty_ranges() { - let mut cache = Cache::new(); - let mut header = BlockHeader::default(); - header.set_block_height(0.into()); - let header = Sealed { - entity: header, + fn create_header(height: u32) -> Sealed { + Sealed { + entity: BlockHeader::new_block(height.into(), Tai64::from_unix(0)), consensus: Default::default(), - }; - cache.insert_headers(Batch::new(None, 0..1, vec![header.clone()])); - let chunks = cache.get_chunks(0..=10, 5); - let chunks: Vec<(Range, Option>)> = chunks.collect().await; - assert_eq!(chunks.len(), 3); - assert_eq!(chunks[0].0, 0..1); - assert_eq!(chunks[1].0, 1..6); - assert_eq!(chunks[2].0, 6..11); - assert_eq!(chunks[0].1.as_ref().unwrap().len(), 1); - assert_eq!(chunks[1].1.is_none(), true); - assert_eq!(chunks[2].1.is_none(), true); - } - - #[tokio::test] - async fn test_only_full_ranges() { - let mut cache = Cache::new(); - let mut headers = Vec::new(); - for i in 0..11 { - let mut header = BlockHeader::default(); - header.set_block_height(i.into()); - let header = Sealed { - entity: header.clone(), - consensus: Default::default(), - }; - headers.push(header); } - cache.insert_headers(Batch::new(None, 0..11, headers)); - let chunks = cache.get_chunks(0..=10, 3); - let chunks: Vec<(Range, Option>)> = chunks.collect().await; - assert_eq!(chunks.len(), 4); - assert_eq!(chunks[0].0, 0..3); - assert_eq!(chunks[1].0, 3..6); - assert_eq!(chunks[2].0, 6..9); - assert_eq!(chunks[3].0, 9..11); - assert_eq!(chunks.iter().all(|(_, b)| b.is_some()), true); - assert_eq!(chunks[0].1.as_ref().unwrap().len(), 3); - assert_eq!(chunks[1].1.as_ref().unwrap().len(), 3); - assert_eq!(chunks[2].1.as_ref().unwrap().len(), 3); - assert_eq!(chunks[3].1.as_ref().unwrap().len(), 2); } - - #[tokio::test] - async fn test_only_empty_ranges() { - let mut cache = Cache::new(); - let chunks = cache.get_chunks(0..=10, 3); - let chunks: Vec<(Range, Option>)> = chunks.collect().await; - assert_eq!(chunks.len(), 4); - assert_eq!(chunks[0].0, 0..3); - assert_eq!(chunks[1].0, 3..6); - assert_eq!(chunks[2].0, 6..9); - assert_eq!(chunks[3].0, 9..11); - assert_eq!(chunks.iter().all(|(_, b)| b.is_none()), true); + fn create_block(height: u32) -> Sealed { + Sealed { + entity: Block::new( + (&create_header(height).entity).into(), + Vec::new(), + &[], + Bytes32::default(), + ) + .unwrap(), + consensus: Default::default(), + } } + #[test_case(&[], &[], 3, 0..=10 => vec![ + CachedDataBatch::None(0..3), + CachedDataBatch::None(3..6), + CachedDataBatch::None(6..9), + CachedDataBatch::None(9..11), + ] ; "multiple empty chunks")] + #[test_case(&[ + create_header(0) + ], &[], 3, 0..=10 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..1, vec![create_header(0)])), + CachedDataBatch::None(1..4), + CachedDataBatch::None(4..7), + CachedDataBatch::None(7..10), + CachedDataBatch::None(10..11), + ]; "one header and empty ranges")] + #[test_case(&[ + create_header(0), + create_header(1), + create_header(2) + ], &[], 3, 0..=10 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..3, vec![ + create_header(0), + create_header(1), + create_header(2) + ])), + CachedDataBatch::None(3..6), + CachedDataBatch::None(6..9), + CachedDataBatch::None(9..11), + ]; "multiple headers and empty ranges")] + #[test_case(&[], &[ + create_block(0) + ], 3, 0..=10 => vec![ + CachedDataBatch::Blocks(Batch::new(None, 0..1, vec![create_block(0)])), + CachedDataBatch::None(1..4), + CachedDataBatch::None(4..7), + CachedDataBatch::None(7..10), + CachedDataBatch::None(10..11), + ]; "one block and empty ranges")] + #[test_case(&[ + create_header(0) + ], &[ + create_block(1) + ], 3, 0..=10 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..1, vec![create_header(0)])), + CachedDataBatch::Blocks(Batch::new(None, 1..2, vec![create_block(1)])), + CachedDataBatch::None(2..5), + CachedDataBatch::None(5..8), + CachedDataBatch::None(8..11), + ]; "one header, one block and empty ranges")] + #[test_case(&[ + create_header(0), + create_header(1) + ], &[ + create_block(2), + create_block(3) + ], 2, 0..=10 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..2, vec![ + create_header(0), + create_header(1) + ])), + CachedDataBatch::Blocks(Batch::new(None, 2..4, vec![ + create_block(2), + create_block(3) + ])), + CachedDataBatch::None(4..6), + CachedDataBatch::None(6..8), + CachedDataBatch::None(8..10), + CachedDataBatch::None(10..11), + ]; "multiple headers, multiple blocks and empty ranges")] + #[test_case(&[ + create_header(0), + create_header(1), + create_header(2), + create_header(3) + ], &[ + create_block(4), + create_block(5), + create_block(6), + create_block(7) + ], 2, 0..=10 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..2, vec![ + create_header(0), + create_header(1) + ])), + CachedDataBatch::Headers(Batch::new(None, 2..4, vec![ + create_header(2), + create_header(3) + ])), + CachedDataBatch::Blocks(Batch::new(None, 4..6, vec![ + create_block(4), + create_block(5) + ])), + CachedDataBatch::Blocks(Batch::new(None, 6..8, vec![ + create_block(6), + create_block(7) + ])), + CachedDataBatch::None(8..10), + CachedDataBatch::None(10..11), + ]; "multiple headers, multiple blocks and empty ranges with smaller chunk size")] + #[test_case(&[ + create_header(0), + create_header(1), + create_header(2), + create_header(3) + ], &[ + create_block(4), + create_block(5), + create_block(6), + create_block(7) + ], 2, 0..=7 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..2, vec![ + create_header(0), + create_header(1) + ])), + CachedDataBatch::Headers(Batch::new(None, 2..4, vec![ + create_header(2), + create_header(3) + ])), + CachedDataBatch::Blocks(Batch::new(None, 4..6, vec![ + create_block(4), + create_block(5) + ])), + CachedDataBatch::Blocks(Batch::new(None, 6..8, vec![ + create_block(6), + create_block(7) + ])), + ]; "multiple headers, multiple blocks with no empty ranges")] + #[test_case(&[ + create_header(0), + create_header(1), + create_header(2) + ], &[ + create_block(3), + create_block(4), + create_block(5) + ], 3, 0..=5 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..3, vec![ + create_header(0), + create_header(1), + create_header(2) + ])), + CachedDataBatch::Blocks(Batch::new(None, 3..6, vec![ + create_block(3), + create_block(4), + create_block(5) + ])), + ]; "multiple headers, multiple blocks with no empty ranges and larger chunk size")] + #[test_case(&[ + create_header(0), + create_header(1) + ], &[ + create_block(2), + create_block(3) + ], 2, 0..=3 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..2, vec![ + create_header(0), + create_header(1) + ])), + CachedDataBatch::Blocks(Batch::new(None, 2..4, vec![ + create_block(2), + create_block(3) + ])), + ]; "multiple headers, multiple blocks with no empty ranges and exact chunk size")] + #[test_case(&[ + create_header(0), + create_header(1), + create_header(2) + ], &[ + create_block(3), + create_block(4), + create_block(5) + ], 1, 0..=5 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..1, vec![ + create_header(0) + ])), + CachedDataBatch::Headers(Batch::new(None, 1..2, vec![ + create_header(1) + ])), + CachedDataBatch::Headers(Batch::new(None, 2..3, vec![ + create_header(2) + ])), + CachedDataBatch::Blocks(Batch::new(None, 3..4, vec![ + create_block(3) + ])), + CachedDataBatch::Blocks(Batch::new(None, 4..5, vec![ + create_block(4) + ])), + CachedDataBatch::Blocks(Batch::new(None, 5..6, vec![ + create_block(5) + ])), + ]; "multiple headers, multiple blocks with max chunk size of 1")] + #[test_case(&[ + create_header(0) + ], &[ + create_block(1) + ], 1, 0..=1 => vec![ + CachedDataBatch::Headers(Batch::new(None, 0..1, vec![ + create_header(0) + ])), + CachedDataBatch::Blocks(Batch::new(None, 1..2, vec![ + create_block(1) + ])), + ]; "one header, one block with max chunk size of 1")] + #[test_case(&[], &[ + create_block(5) + ], 1, 4..=6 => vec![ + CachedDataBatch::None(4..5), + CachedDataBatch::Blocks(Batch::new(None, 5..6, vec![ + create_block(5) + ])), + CachedDataBatch::None(6..7), + ]; "one block in empty range sandwich with max chunk size of 1")] #[tokio::test] - async fn test_ranges_cached_and_empty() { + async fn test_get_batch_scenarios( + headers: &[Sealed], + blocks: &[Sealed], + max_chunk_size: usize, + asked_range: RangeInclusive, + ) -> Vec { let mut cache = Cache::new(); - let mut headers = Vec::new(); - for i in 0..6 { - let mut header = BlockHeader::default(); - header.set_block_height(i.into()); - let header = Sealed { - entity: header.clone(), - consensus: Default::default(), - }; - headers.push(header); - } - cache.insert_headers(Batch::new(None, 0..6, headers)); - let chunks = cache.get_chunks(0..=10, 3); - let chunks: Vec<(Range, Option>)> = chunks.collect().await; - assert_eq!(chunks.len(), 4); - assert_eq!(chunks[0].0, 0..3); - assert_eq!(chunks[1].0, 3..6); - assert_eq!(chunks[2].0, 6..9); - assert_eq!(chunks[3].0, 9..11); - assert_eq!(chunks[0].1.as_ref().unwrap().len(), 3); - assert_eq!(chunks[1].1.as_ref().unwrap().len(), 3); - assert_eq!(chunks[2].1.is_none(), true); - assert_eq!(chunks[3].1.is_none(), true); + cache.insert_headers(Batch::new( + None, + 0..headers.len().try_into().unwrap(), + headers.to_vec(), + )); + cache.insert_blocks(Batch::new( + None, + 0..blocks.len().try_into().unwrap(), + blocks.to_vec(), + )); + cache + .get_chunks(asked_range, max_chunk_size) + .collect() + .await } -} \ No newline at end of file +} From 41966a6ed3833748d6fbfb7132adf4825909a783 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 18 Oct 2024 11:45:11 +0200 Subject: [PATCH 08/23] Improve changes made to back pressure test because going too fast --- crates/services/sync/src/import/back_pressure_tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/services/sync/src/import/back_pressure_tests.rs b/crates/services/sync/src/import/back_pressure_tests.rs index 963e6d2ce03..c595825db89 100644 --- a/crates/services/sync/src/import/back_pressure_tests.rs +++ b/crates/services/sync/src/import/back_pressure_tests.rs @@ -42,7 +42,7 @@ struct Input { )] #[test_case( Input { - headers: Duration::from_millis(10), + headers: Duration::from_millis(100), ..Default::default() }, State::new(None, 1000), @@ -50,7 +50,7 @@ struct Input { block_stream_buffer_size: 10, header_batch_size: 5, } - => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 51 } + => is less_or_equal_than Count{ headers: 10, consensus: 10, transactions: 10, executes: 1, blocks: 50 } ; "1000 headers with max 5 size and max 10 requests when slow headers" )] #[test_case( From 8e4fc44fb9b99c47c0859a9a4f4bb782db847a94 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 18 Oct 2024 12:25:19 +0200 Subject: [PATCH 09/23] Update get_chunks method to be more readable --- crates/services/sync/src/import/cache.rs | 289 +++++++++++++---------- 1 file changed, 164 insertions(+), 125 deletions(-) diff --git a/crates/services/sync/src/import/cache.rs b/crates/services/sync/src/import/cache.rs index ca26105b908..f2c4eea7d0c 100644 --- a/crates/services/sync/src/import/cache.rs +++ b/crates/services/sync/src/import/cache.rs @@ -34,6 +34,16 @@ pub enum CachedDataBatch { None(Range), } +impl CachedDataBatch { + pub fn is_range_empty(&self) -> bool { + match self { + CachedDataBatch::None(range) => range.is_empty(), + CachedDataBatch::Blocks(batch) => batch.range.is_empty(), + CachedDataBatch::Headers(batch) => batch.range.is_empty(), + } + } +} + impl Cache { pub fn new() -> Self { Self(SharedMutex::new(BTreeMap::new())) @@ -61,148 +71,177 @@ impl Cache { let end = (*range.end()).saturating_add(1); let chunk_size_u32 = u32::try_from(max_chunk_size) .expect("The size of the chunk can't exceed `u32`"); - let cache_iter: Vec<(u32, CachedData)> = { - let lock = self.0.lock(); - lock.range(range.clone()) - .map(|(k, v)| (*k, v.clone())) - .collect() - }; + + let cache_iter = self.collect_cache_data(range.clone()); let mut current_height = *range.start(); - // Build a stream of futures that will be splitted in chunks of available data and missing data to fetch with a maximum size of `max_chunk_size` for each chunk. let mut chunks = Vec::new(); let mut current_chunk = CachedDataBatch::None(0..0); + for (height, data) in cache_iter { if height == current_height { - current_chunk = match (current_chunk, data) { - (CachedDataBatch::None(_), CachedData::Header(data)) => { - CachedDataBatch::Headers(Batch::new( - None, - height..height.saturating_add(1), - vec![data], - )) - } - (CachedDataBatch::None(_), CachedData::Block(data)) => { - CachedDataBatch::Blocks(Batch::new( - None, - height..height.saturating_add(1), - vec![data], - )) - } - (CachedDataBatch::Headers(mut batch), CachedData::Header(data)) => { - batch.range = batch.range.start..height.saturating_add(1); - batch.results.push(data); - CachedDataBatch::Headers(batch) - } - (CachedDataBatch::Blocks(mut batch), CachedData::Block(data)) => { - batch.range = batch.range.start..height.saturating_add(1); - batch.results.push(data); - CachedDataBatch::Blocks(batch) - } - ( - CachedDataBatch::Headers(headers_batch), - CachedData::Block(block), - ) => { - chunks.push(CachedDataBatch::Headers(headers_batch)); - CachedDataBatch::Blocks(Batch::new( - None, - height..height.saturating_add(1), - vec![block], - )) - } - ( - CachedDataBatch::Blocks(blocks_batch), - CachedData::Header(header), - ) => { - chunks.push(CachedDataBatch::Blocks(blocks_batch)); - CachedDataBatch::Headers(Batch::new( - None, - height..height.saturating_add(1), - vec![header], - )) - } - }; - // Check the chunk limit and push the current chunk if it is full. - current_chunk = match current_chunk { - CachedDataBatch::Headers(batch) => { - if batch.results.len() >= max_chunk_size { - chunks.push(CachedDataBatch::Headers(batch)); - CachedDataBatch::None(current_height..current_height) - } else { - CachedDataBatch::Headers(batch) - } - } - CachedDataBatch::Blocks(batch) => { - if batch.results.len() >= max_chunk_size { - chunks.push(CachedDataBatch::Blocks(batch)); - CachedDataBatch::None(current_height..current_height) - } else { - CachedDataBatch::Blocks(batch) - } - } - CachedDataBatch::None(range) => CachedDataBatch::None(range), - } + current_chunk = + self.handle_current_chunk(current_chunk, data, height, &mut chunks); + current_chunk = self.check_chunk_limit( + current_chunk, + max_chunk_size, + &mut chunks, + current_height, + ); } else { - // Push the current chunk of cached if it is not empty. - match current_chunk { - CachedDataBatch::Headers(batch) => { - chunks.push(CachedDataBatch::Headers(batch)); - } - CachedDataBatch::Blocks(batch) => { - chunks.push(CachedDataBatch::Blocks(batch)); - } - CachedDataBatch::None(_) => {} - } + self.push_current_chunk(&mut chunks, current_chunk); + self.push_missing_chunks( + &mut chunks, + current_height, + height, + max_chunk_size, + chunk_size_u32, + end, + ); + current_chunk = self.prepare_next_chunk(data, height); + } + current_height = height.saturating_add(1); + } + + self.push_current_chunk(&mut chunks, current_chunk); + if current_height <= end { + self.push_missing_chunks( + &mut chunks, + current_height, + end, + max_chunk_size, + chunk_size_u32, + end, + ); + } + futures::stream::iter(chunks) + } - // Push the missing chunks. - let missing_chunks = (current_height..height) - .step_by(max_chunk_size) - .map(move |chunk_start| { - let block_end = - chunk_start.saturating_add(chunk_size_u32).min(end); - CachedDataBatch::None(chunk_start..block_end) - }); - chunks.extend(missing_chunks); + fn collect_cache_data(&self, range: RangeInclusive) -> Vec<(u32, CachedData)> { + let lock = self.0.lock(); + lock.range(range).map(|(k, v)| (*k, v.clone())).collect() + } - // Prepare the next chunk. - current_chunk = match data { - CachedData::Header(data) => CachedDataBatch::Headers(Batch::new( - None, - height..height.saturating_add(1), - vec![data], - )), - CachedData::Block(data) => CachedDataBatch::Blocks(Batch::new( - None, - height..height.saturating_add(1), - vec![data], - )), - }; + fn handle_current_chunk( + &self, + current_chunk: CachedDataBatch, + data: CachedData, + height: u32, + chunks: &mut Vec, + ) -> CachedDataBatch { + match (current_chunk, data) { + (CachedDataBatch::None(_), CachedData::Header(data)) => { + CachedDataBatch::Headers(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )) + } + (CachedDataBatch::None(_), CachedData::Block(data)) => { + CachedDataBatch::Blocks(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )) + } + (CachedDataBatch::Headers(mut batch), CachedData::Header(data)) => { + batch.range = batch.range.start..height.saturating_add(1); + batch.results.push(data); + CachedDataBatch::Headers(batch) + } + (CachedDataBatch::Blocks(mut batch), CachedData::Block(data)) => { + batch.range = batch.range.start..height.saturating_add(1); + batch.results.push(data); + CachedDataBatch::Blocks(batch) + } + (CachedDataBatch::Headers(headers_batch), CachedData::Block(block)) => { + chunks.push(CachedDataBatch::Headers(headers_batch)); + CachedDataBatch::Blocks(Batch::new( + None, + height..height.saturating_add(1), + vec![block], + )) + } + (CachedDataBatch::Blocks(blocks_batch), CachedData::Header(header)) => { + chunks.push(CachedDataBatch::Blocks(blocks_batch)); + CachedDataBatch::Headers(Batch::new( + None, + height..height.saturating_add(1), + vec![header], + )) } - current_height = height.saturating_add(1); } - // Push the last chunk of cached if it is not empty. + } + + fn check_chunk_limit( + &self, + current_chunk: CachedDataBatch, + max_chunk_size: usize, + chunks: &mut Vec, + current_height: u32, + ) -> CachedDataBatch { match current_chunk { CachedDataBatch::Headers(batch) => { - chunks.push(CachedDataBatch::Headers(batch)); + if batch.results.len() >= max_chunk_size { + chunks.push(CachedDataBatch::Headers(batch)); + CachedDataBatch::None(current_height..current_height) + } else { + CachedDataBatch::Headers(batch) + } } CachedDataBatch::Blocks(batch) => { - chunks.push(CachedDataBatch::Blocks(batch)); + if batch.results.len() >= max_chunk_size { + chunks.push(CachedDataBatch::Blocks(batch)); + CachedDataBatch::None(current_height..current_height) + } else { + CachedDataBatch::Blocks(batch) + } } - CachedDataBatch::None(_) => {} + CachedDataBatch::None(range) => CachedDataBatch::None(range), + } + } + + fn push_current_chunk( + &self, + chunks: &mut Vec, + current_chunk: CachedDataBatch, + ) { + if !current_chunk.is_range_empty() { + chunks.push(current_chunk); } - // Push the last missing chunk. - if current_height <= *range.end() { - // Include the last height. - let missing_chunks = - (current_height..end) - .step_by(max_chunk_size) - .map(move |chunk_start| { - let block_end = - chunk_start.saturating_add(chunk_size_u32).min(end); - CachedDataBatch::None(chunk_start..block_end) - }); - chunks.extend(missing_chunks); + } + + fn push_missing_chunks( + &self, + chunks: &mut Vec, + current_height: u32, + height: u32, + max_chunk_size: usize, + chunk_size_u32: u32, + end: u32, + ) { + let missing_chunks = + (current_height..height) + .step_by(max_chunk_size) + .map(move |chunk_start| { + let block_end = chunk_start.saturating_add(chunk_size_u32).min(end); + CachedDataBatch::None(chunk_start..block_end) + }); + chunks.extend(missing_chunks); + } + + fn prepare_next_chunk(&self, data: CachedData, height: u32) -> CachedDataBatch { + match data { + CachedData::Header(data) => CachedDataBatch::Headers(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )), + CachedData::Block(data) => CachedDataBatch::Blocks(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )), } - futures::stream::iter(chunks) } pub fn remove_element(&mut self, height: &u32) { From 879eb946d69fd4e8c2707fbd56e8e770e8be09ee Mon Sep 17 00:00:00 2001 From: AurelienFT <32803821+AurelienFT@users.noreply.github.com> Date: Fri, 18 Oct 2024 12:28:58 +0200 Subject: [PATCH 10/23] Update CHANGELOG.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Rafał Chabowski <88321181+rafal-ch@users.noreply.github.com> --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d0d73f512b..24badf79bda 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added - [2321](https://github.com/FuelLabs/fuel-core/pull/2321): New metrics for the txpool: "The size of transactions in the txpool" (`txpool_tx_size`), "The time spent by a transaction in the txpool in seconds" (`txpool_tx_time_in_txpool_seconds`), The number of transactions in the txpool (`txpool_number_of_transactions`), "The number of transactions pending verification before entering the txpool" (`txpool_number_of_transactions_pending_verification`), "The number of executable transactions in the txpool" (`txpool_number_of_executable_transactions`), "The time it took to select transactions for inclusion in a block in nanoseconds" (`txpool_select_transaction_time_nanoseconds`), The time it took to insert a transaction in the txpool in milliseconds (`txpool_insert_transaction_time_milliseconds`). -- [2361](https://github.com/FuelLabs/fuel-core/pull/2361): Add caches to the syync service to not reask for data it already fetched from the network. +- [2361](https://github.com/FuelLabs/fuel-core/pull/2361): Add caches to the sync service to not reask for data it already fetched from the network. ### Fixed - [2366](https://github.com/FuelLabs/fuel-core/pull/2366): The `importer_gas_price_for_block` metric is properly collected. From a29e763ceae29b7295b7f8b6b910565311c2bc71 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 18 Oct 2024 12:29:56 +0200 Subject: [PATCH 11/23] Avoid unecessary changes in changelog --- CHANGELOG.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 24badf79bda..8aa4aa60db5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,13 +6,13 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +### Fixed +- [2366](https://github.com/FuelLabs/fuel-core/pull/2366): The `importer_gas_price_for_block` metric is properly collected. + ### Added - [2321](https://github.com/FuelLabs/fuel-core/pull/2321): New metrics for the txpool: "The size of transactions in the txpool" (`txpool_tx_size`), "The time spent by a transaction in the txpool in seconds" (`txpool_tx_time_in_txpool_seconds`), The number of transactions in the txpool (`txpool_number_of_transactions`), "The number of transactions pending verification before entering the txpool" (`txpool_number_of_transactions_pending_verification`), "The number of executable transactions in the txpool" (`txpool_number_of_executable_transactions`), "The time it took to select transactions for inclusion in a block in nanoseconds" (`txpool_select_transaction_time_nanoseconds`), The time it took to insert a transaction in the txpool in milliseconds (`txpool_insert_transaction_time_milliseconds`). - [2361](https://github.com/FuelLabs/fuel-core/pull/2361): Add caches to the sync service to not reask for data it already fetched from the network. -### Fixed -- [2366](https://github.com/FuelLabs/fuel-core/pull/2366): The `importer_gas_price_for_block` metric is properly collected. - ## [Version 0.40.0] ### Added From 83401ced14b341d39b3baa0ba788188076952f7c Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 18 Oct 2024 12:30:28 +0200 Subject: [PATCH 12/23] Remove old comment --- crates/services/sync/src/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 5dc18c73a6d..3e0b61aa35c 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -326,7 +326,7 @@ where range, results, } = batch; - // clean cache of all blocks that are in the range + let mut done = vec![]; let mut shutdown = shutdown.clone(); for sealed_block in results { From 62cae71d7ed0f1123f9f654f08462c1a59773a99 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Fri, 18 Oct 2024 18:46:13 +0200 Subject: [PATCH 13/23] fix compil benchmark --- benches/benches/import.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index c8402b37e51..19a212ec1e5 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -17,7 +17,7 @@ use fuel_core_sync::state::State; use std::time::Duration; use tokio::runtime::Runtime; -async fn execute_import(import: PressureImport, shutdown: &mut StateWatcher) { +async fn execute_import(mut import: PressureImport, shutdown: &mut StateWatcher) { import.import(shutdown).await.unwrap(); } @@ -50,7 +50,7 @@ fn bench_imports(c: &mut Criterion) { let shared_count = SharedCounts::new(Default::default()); let state = State::new(None, n); let shared_state = SharedMutex::new(state); - let (import, _tx, mut shutdown) = provision_import_test( + let (mut import, _tx, mut shutdown) = provision_import_test( shared_count.clone(), shared_state, durations, From 84bac0a9971e9b15ad65556934e84dbb20d02bb4 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 21 Oct 2024 09:46:56 +0200 Subject: [PATCH 14/23] fix clippy --- benches/benches/import.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benches/benches/import.rs b/benches/benches/import.rs index 19a212ec1e5..268c355ba11 100644 --- a/benches/benches/import.rs +++ b/benches/benches/import.rs @@ -50,7 +50,7 @@ fn bench_imports(c: &mut Criterion) { let shared_count = SharedCounts::new(Default::default()); let state = State::new(None, n); let shared_state = SharedMutex::new(state); - let (mut import, _tx, mut shutdown) = provision_import_test( + let (import, _tx, mut shutdown) = provision_import_test( shared_count.clone(), shared_state, durations, From 42719bf4226101cfea4bf437bcb48699f298c4b9 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 21 Oct 2024 14:36:56 +0200 Subject: [PATCH 15/23] Add a way to fetch transactions without specifying a peer in P2P --- crates/services/p2p/src/service.rs | 45 ++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/crates/services/p2p/src/service.rs b/crates/services/p2p/src/service.rs index 3ff09da0beb..a2798c4bdc6 100644 --- a/crates/services/p2p/src/service.rs +++ b/crates/services/p2p/src/service.rs @@ -115,6 +115,10 @@ pub enum TaskRequest { channel: OnResponse>>, }, GetTransactions { + block_height_range: Range, + channel: OnResponse>>, + }, + GetTransactionsFromPeer { block_height_range: Range, from_peer: PeerId, channel: OnResponse>>, @@ -165,6 +169,9 @@ impl Debug for TaskRequest { TaskRequest::GetTransactions { .. } => { write!(f, "TaskRequest::GetTransactions") } + TaskRequest::GetTransactionsFromPeer { .. } => { + write!(f, "TaskRequest::GetTransactionsFromPeer") + } TaskRequest::TxPoolGetAllTxIds { .. } => { write!(f, "TaskRequest::TxPoolGetAllTxIds") } @@ -856,7 +863,16 @@ where tracing::warn!("No peers found for block at height {:?}", height); } } - Some(TaskRequest::GetTransactions { block_height_range, from_peer, channel }) => { + Some(TaskRequest::GetTransactions {block_height_range, channel }) => { + let channel = ResponseSender::Transactions(channel); + let request_msg = RequestMessage::Transactions(block_height_range.clone()); + let height = BlockHeight::from(block_height_range.end.saturating_sub(1)); + let peer = self.p2p_service.get_peer_id_with_height(&height); + if self.p2p_service.send_request_msg(peer, request_msg, channel).is_err() { + tracing::warn!("No peers found for block at height {:?}", block_height_range.end); + } + } + Some(TaskRequest::GetTransactionsFromPeer { block_height_range, from_peer, channel }) => { let channel = ResponseSender::Transactions(channel); let request_msg = RequestMessage::Transactions(block_height_range); self.p2p_service.send_request_msg(Some(from_peer), request_msg, channel).expect("We always a peer here, so send has a target"); @@ -1020,6 +1036,31 @@ impl SharedState { Ok((peer_id.to_bytes(), data)) } + pub async fn get_transactions( + &self, + range: Range, + ) -> anyhow::Result<(Vec, Option>)> { + let (sender, receiver) = oneshot::channel(); + + if range.is_empty() { + return Err(anyhow!( + "Cannot retrieve transactions for an empty range of block heights" + )); + } + + self.request_sender + .send(TaskRequest::GetTransactions { + block_height_range: range, + channel: sender, + }) + .await?; + + let (peer_id, response) = receiver.await.map_err(|e| anyhow!("{e}"))?; + + let data = response.map_err(|e| anyhow!("Invalid response from peer {e:?}"))?; + Ok((peer_id.to_bytes(), data)) + } + pub async fn get_transactions_from_peer( &self, peer_id: FuelPeerId, @@ -1028,7 +1069,7 @@ impl SharedState { let (sender, receiver) = oneshot::channel(); let from_peer = PeerId::from_bytes(peer_id.as_ref()).expect("Valid PeerId"); - let request = TaskRequest::GetTransactions { + let request = TaskRequest::GetTransactionsFromPeer { block_height_range: range, from_peer, channel: sender, From c80f3acea550dc6382922cedc9f43057841ccb95 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 21 Oct 2024 14:41:03 +0200 Subject: [PATCH 16/23] Update CANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e4e728c2881..25b9955881a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added - [2321](https://github.com/FuelLabs/fuel-core/pull/2321): New metrics for the txpool: "The size of transactions in the txpool" (`txpool_tx_size`), "The time spent by a transaction in the txpool in seconds" (`txpool_tx_time_in_txpool_seconds`), The number of transactions in the txpool (`txpool_number_of_transactions`), "The number of transactions pending verification before entering the txpool" (`txpool_number_of_transactions_pending_verification`), "The number of executable transactions in the txpool" (`txpool_number_of_executable_transactions`), "The time it took to select transactions for inclusion in a block in nanoseconds" (`txpool_select_transaction_time_nanoseconds`), The time it took to insert a transaction in the txpool in milliseconds (`txpool_insert_transaction_time_milliseconds`). +- [2376](https://github.com/FuelLabs/fuel-core/pull/2376): Add a way to fetch transactions in P2P without specifying a peer. ## [Version 0.40.0] From 99135e39c0853813bcb0b535252d91e8d90825d9 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Mon, 21 Oct 2024 15:21:50 +0200 Subject: [PATCH 17/23] Use get transactions without peer_id when use headers from cache --- crates/fuel-core/src/service/adapters/sync.rs | 19 ++++ crates/services/sync/src/import.rs | 71 ++++++++----- .../test_helpers/pressure_peer_to_peer.rs | 30 ++++-- crates/services/sync/src/import/tests.rs | 100 ++++++++++-------- crates/services/sync/src/ports.rs | 8 +- crates/services/sync/src/service/tests.rs | 15 +-- 6 files changed, 156 insertions(+), 87 deletions(-) diff --git a/crates/fuel-core/src/service/adapters/sync.rs b/crates/fuel-core/src/service/adapters/sync.rs index cc4bb4dbbba..62adc4dd120 100644 --- a/crates/fuel-core/src/service/adapters/sync.rs +++ b/crates/fuel-core/src/service/adapters/sync.rs @@ -66,6 +66,25 @@ impl PeerToPeerPort for P2PAdapter { } async fn get_transactions( + &self, + block_ids: Range, + ) -> anyhow::Result>>> { + let result = if let Some(service) = &self.service { + service.get_transactions(block_ids).await + } else { + Err(anyhow::anyhow!("No P2P service available")) + }; + match result { + Ok((peer_id, transactions)) => { + let peer_id: PeerId = peer_id.into(); + let transactions = peer_id.bind(transactions); + Ok(transactions) + } + Err(err) => Err(err), + } + } + + async fn get_transactions_from_peer( &self, range: SourcePeer>, ) -> anyhow::Result>> { diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 3e0b61aa35c..37f8579cfe2 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -563,27 +563,47 @@ where } async fn get_transactions

( - peer_id: PeerId, range: Range, + peer_id: Option, p2p: &Arc

, -) -> Option> +) -> Option>> where P: PeerToPeerPort + Send + Sync + 'static, { - let range = peer_id.clone().bind(range); - let res = p2p - .get_transactions(range) - .await - .trace_err("Failed to get transactions"); - match res { - Ok(Some(transactions)) => Some(transactions), - _ => { - report_peer( - p2p, - Some(peer_id.clone()), - PeerReportReason::MissingTransactions, - ); - None + match peer_id { + Some(peer_id) => { + let source_peer = peer_id.clone().bind(range.clone()); + let Ok(Some(txs)) = p2p + .get_transactions_from_peer(source_peer) + .await + .trace_err("Failed to get transactions") + else { + report_peer( + p2p, + Some(peer_id.clone()), + PeerReportReason::MissingTransactions, + ); + return None; + }; + Some(SourcePeer { peer_id, data: txs }) + } + None => { + let Ok(SourcePeer { peer_id, data }) = p2p + .get_transactions(range.clone()) + .await + .trace_err("Failed to get transactions") + else { + return None; + }; + let Some(txs) = data else { + report_peer( + p2p, + Some(peer_id.clone()), + PeerReportReason::MissingTransactions, + ); + return None; + }; + Some(SourcePeer { peer_id, data: txs }) } } } @@ -646,20 +666,19 @@ where { let Batch { results: headers, - peer, range, + peer, } = headers; - let Some(peer) = peer else { - return SealedBlockBatch::new(None, range, vec![]) - }; - - let Some(transaction_data) = get_transactions(peer.clone(), range.clone(), p2p).await + let Some(SourcePeer { + peer_id, + data: transactions, + }) = get_transactions(range.clone(), peer.clone(), p2p).await else { - return Batch::new(Some(peer), range, vec![]) + return Batch::new(peer, range, vec![]) }; - let iter = headers.into_iter().zip(transaction_data.into_iter()); + let iter = headers.into_iter().zip(transactions.into_iter()); let mut blocks = vec![]; for (block_header, transactions) in iter { let SealedBlockHeader { @@ -676,13 +695,13 @@ where } else { report_peer( p2p, - Some(peer.clone()), + Some(peer_id.clone()), PeerReportReason::InvalidTransactions, ); break } } - Batch::new(Some(peer), range, blocks) + Batch::new(Some(peer_id), range, blocks) } #[tracing::instrument( diff --git a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs index e77e94ff4ad..3d74cf73ffa 100644 --- a/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs +++ b/crates/services/sync/src/import/test_helpers/pressure_peer_to_peer.rs @@ -47,7 +47,7 @@ impl PeerToPeerPort for PressurePeerToPeer { self.p2p.get_sealed_block_headers(block_height_range).await } - async fn get_transactions( + async fn get_transactions_from_peer( &self, block_ids: SourcePeer>, ) -> anyhow::Result>> { @@ -57,6 +57,19 @@ impl PeerToPeerPort for PressurePeerToPeer { self.counts.apply(|c| c.inc_blocks()); } self.counts.apply(|c| c.dec_transactions()); + self.p2p.get_transactions_from_peer(block_ids).await + } + + async fn get_transactions( + &self, + block_ids: Range, + ) -> anyhow::Result>>> { + self.counts.apply(|c| c.inc_transactions()); + tokio::time::sleep(self.durations[1]).await; + for _height in block_ids.clone() { + self.counts.apply(|c| c.inc_blocks()); + } + self.counts.apply(|c| c.dec_transactions()); self.p2p.get_transactions(block_ids).await } @@ -84,13 +97,14 @@ impl PressurePeerToPeer { Ok(headers) }) }); - mock.expect_get_transactions().returning(|block_ids| { - Box::pin(async move { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) - }) - }); + mock.expect_get_transactions_from_peer() + .returning(|block_ids| { + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) + }); Self { p2p: mock, durations: delays, diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 3b4fa7cf6b1..36c75d733f5 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -49,7 +49,7 @@ async fn test_import_0_to_5() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(|block_ids| { Box::pin(async move { @@ -99,7 +99,7 @@ async fn test_import_3_to_5() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(|block_ids| { Box::pin(async move { @@ -169,7 +169,7 @@ async fn test_import_0_to_499() { // Happens once for each batch let times = div_ceil(n, header_batch_size); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(times) .returning(|block_ids| { Box::pin(async move { @@ -219,7 +219,7 @@ async fn import__signature_fails_on_header_5_only() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(|block_ids| { Box::pin(async move { @@ -304,7 +304,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { }) }); // No reask - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(3) .returning(|block_ids| { Box::pin(async move { @@ -377,7 +377,7 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { let mut seq = Sequence::new(); // Given // Fail to get transactions for block 4 - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .in_sequence(&mut seq) .returning(|_| { @@ -387,7 +387,7 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(2) .in_sequence(&mut seq) .returning(|block_ids| { @@ -404,9 +404,12 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { .in_sequence(&mut seq) .returning(|block_ids| { Box::pin(async move { - let data = block_ids.data; + let data = block_ids; let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) + Ok(SourcePeer { + peer_id: random_peer(), + data: Some(v), + }) }) }); @@ -466,7 +469,7 @@ async fn import__signature_fails_on_header_4_only() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(0) .returning(|block_ids| { Box::pin(async move { @@ -575,7 +578,7 @@ async fn import__header_5_not_found() { }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(|block_ids| { Box::pin(async move { @@ -617,7 +620,7 @@ async fn import__header_4_not_found() { Ok(headers) }) }); - p2p.expect_get_transactions().times(0); + p2p.expect_get_transactions_from_peer().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -661,7 +664,7 @@ async fn import__transactions_not_found() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(|_| Box::pin(async move { Ok(None) })); @@ -708,7 +711,7 @@ async fn import__transactions_not_found_for_header_4() { }) }); let mut height = 3; - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(move |block_ids| { Box::pin(async move { @@ -765,12 +768,14 @@ async fn import__transactions_not_found_for_header_5() { Ok(headers) }) }); - p2p.expect_get_transactions().times(1).returning(move |_| { - Box::pin(async move { - let v = vec![Transactions::default()]; - Ok(Some(v)) - }) - }); + p2p.expect_get_transactions_from_peer() + .times(1) + .returning(move |_| { + Box::pin(async move { + let v = vec![Transactions::default()]; + Ok(Some(v)) + }) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -799,7 +804,7 @@ async fn import__p2p_error() { .returning(|_| { Box::pin(async move { Err(anyhow::anyhow!("Some network error")) }) }); - p2p.expect_get_transactions().times(0); + p2p.expect_get_transactions_from_peer().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -843,9 +848,11 @@ async fn import__p2p_error_on_4_transactions() { Ok(headers) }) }); - p2p.expect_get_transactions().times(1).returning(|_| { - Box::pin(async move { Err(anyhow::anyhow!("Some network error")) }) - }); + p2p.expect_get_transactions_from_peer() + .times(1) + .returning(|_| { + Box::pin(async move { Err(anyhow::anyhow!("Some network error")) }) + }); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -895,7 +902,7 @@ async fn import__consensus_error_on_4() { Ok(headers) }) }); - p2p.expect_get_transactions().times(0); + p2p.expect_get_transactions_from_peer().times(0); let state = State::new(3, 5).into(); let mocks = Mocks { @@ -945,7 +952,7 @@ async fn import__consensus_error_on_5() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(|block_ids| { Box::pin(async move { @@ -997,7 +1004,7 @@ async fn import__execution_error_on_header_4() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(|block_ids| { Box::pin(async move { @@ -1061,7 +1068,7 @@ async fn import__execution_error_on_header_5() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(1) .returning(|block_ids| { Box::pin(async move { @@ -1158,7 +1165,7 @@ async fn import__can_work_in_two_loops() { Ok(headers) }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(2) .returning(|block_ids| { Box::pin(async move { @@ -1310,13 +1317,14 @@ async fn import__execution_error_on_header_4_when_awaits_for_1000000_blocks() { Ok(headers) }) }); - p2p.expect_get_transactions().returning(|block_ids| { - Box::pin(async move { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) - }) - }); + p2p.expect_get_transactions_from_peer() + .returning(|block_ids| { + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) + }); let mut executor = MockBlockImporterPort::default(); executor @@ -1477,18 +1485,20 @@ impl PeerReportTestBuilder { let transactions = self.get_transactions.clone(); if let Some(t) = transactions { - p2p.expect_get_transactions().returning(move |_| { + p2p.expect_get_transactions_from_peer().returning(move |_| { let t = t.clone(); Box::pin(async move { Ok(t) }) }); } else { - p2p.expect_get_transactions().returning(|block_ids| { - Box::pin(async move { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) - }) - }); + p2p.expect_get_transactions_from_peer() + .returning(|block_ids| { + Box::pin(async move { + let data = block_ids.data; + let v = + data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) + }); } let mut seq = mockall::Sequence::new(); @@ -1611,7 +1621,7 @@ impl DefaultMocks for MockPeerToPeerPort { }) }); - p2p.expect_get_transactions() + p2p.expect_get_transactions_from_peer() .times(t.next().unwrap()) .returning(|block_ids| { Box::pin(async move { diff --git a/crates/services/sync/src/ports.rs b/crates/services/sync/src/ports.rs index 2d6e3f76eb3..483b4f10156 100644 --- a/crates/services/sync/src/ports.rs +++ b/crates/services/sync/src/ports.rs @@ -48,8 +48,14 @@ pub trait PeerToPeerPort { ) -> anyhow::Result>>>; /// Request transactions from the network for the given block range - /// and source peer. async fn get_transactions( + &self, + block_ids: Range, + ) -> anyhow::Result>>>; + + /// Request transactions from the network for the given block range + /// and source peer. + async fn get_transactions_from_peer( &self, block_ids: SourcePeer>, ) -> anyhow::Result>>; diff --git a/crates/services/sync/src/service/tests.rs b/crates/services/sync/src/service/tests.rs index 91f3460a632..19748c4b9aa 100644 --- a/crates/services/sync/src/service/tests.rs +++ b/crates/services/sync/src/service/tests.rs @@ -46,13 +46,14 @@ async fn test_new_service() { Ok(headers) }) }); - p2p.expect_get_transactions().returning(|block_ids| { - Box::pin(async move { - let data = block_ids.data; - let v = data.into_iter().map(|_| Transactions::default()).collect(); - Ok(Some(v)) - }) - }); + p2p.expect_get_transactions_from_peer() + .returning(|block_ids| { + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) + }); let mut importer = MockBlockImporterPort::default(); importer .expect_committed_height_stream() From a566ac569581447cbcc24cc4677bdb84f9c1c9be Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 27 Nov 2024 13:54:17 +0100 Subject: [PATCH 18/23] Improve some caches mechanism in sync and more comments on tests --- Cargo.lock | 2 +- crates/services/sync/src/import.rs | 3 +- crates/services/sync/src/import/cache.rs | 71 +++++++++--------------- crates/services/sync/src/import/tests.rs | 17 ++++-- 4 files changed, 41 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ed9dd67683..07022867482 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6958,7 +6958,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.87", + "syn 2.0.89", ] [[package]] diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 37f8579cfe2..96ffcd78578 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -486,7 +486,8 @@ fn get_block_stream< } } BlockHeaderData::Cached(CachedDataBatch::None(_)) => { - unreachable!() + tracing::error!("Cached data batch should never be created outside of the caching algorithm."); + Batch::new(None, 0..1, vec![]) } } } diff --git a/crates/services/sync/src/import/cache.rs b/crates/services/sync/src/import/cache.rs index f2c4eea7d0c..f50dc8f0d40 100644 --- a/crates/services/sync/src/import/cache.rs +++ b/crates/services/sync/src/import/cache.rs @@ -78,18 +78,12 @@ impl Cache { let mut current_chunk = CachedDataBatch::None(0..0); for (height, data) in cache_iter { - if height == current_height { - current_chunk = - self.handle_current_chunk(current_chunk, data, height, &mut chunks); - current_chunk = self.check_chunk_limit( - current_chunk, - max_chunk_size, - &mut chunks, - current_height, - ); - } else { - self.push_current_chunk(&mut chunks, current_chunk); - self.push_missing_chunks( + if height != current_height { + if !current_chunk.is_range_empty() { + chunks.push(current_chunk); + } + current_chunk = CachedDataBatch::None(0..0); + Self::push_missing_chunks( &mut chunks, current_height, height, @@ -97,14 +91,25 @@ impl Cache { chunk_size_u32, end, ); - current_chunk = self.prepare_next_chunk(data, height); + current_height = height; } + current_chunk = + Self::handle_current_chunk(current_chunk, data, height, &mut chunks); + current_chunk = Self::check_chunk_limit( + current_chunk, + max_chunk_size, + &mut chunks, + current_height, + ); + current_height = height.saturating_add(1); } - self.push_current_chunk(&mut chunks, current_chunk); + if !current_chunk.is_range_empty() { + chunks.push(current_chunk); + } if current_height <= end { - self.push_missing_chunks( + Self::push_missing_chunks( &mut chunks, current_height, end, @@ -122,7 +127,6 @@ impl Cache { } fn handle_current_chunk( - &self, current_chunk: CachedDataBatch, data: CachedData, height: u32, @@ -144,16 +148,19 @@ impl Cache { )) } (CachedDataBatch::Headers(mut batch), CachedData::Header(data)) => { - batch.range = batch.range.start..height.saturating_add(1); + debug_assert_eq!(batch.range.end, height); + batch.range = batch.range.start..batch.range.end.saturating_add(1); batch.results.push(data); CachedDataBatch::Headers(batch) } (CachedDataBatch::Blocks(mut batch), CachedData::Block(data)) => { - batch.range = batch.range.start..height.saturating_add(1); + debug_assert_eq!(batch.range.end, height); + batch.range = batch.range.start..batch.range.end.saturating_add(1); batch.results.push(data); CachedDataBatch::Blocks(batch) } (CachedDataBatch::Headers(headers_batch), CachedData::Block(block)) => { + debug_assert_eq!(headers_batch.range.end, height); chunks.push(CachedDataBatch::Headers(headers_batch)); CachedDataBatch::Blocks(Batch::new( None, @@ -162,6 +169,7 @@ impl Cache { )) } (CachedDataBatch::Blocks(blocks_batch), CachedData::Header(header)) => { + debug_assert_eq!(blocks_batch.range.end, height); chunks.push(CachedDataBatch::Blocks(blocks_batch)); CachedDataBatch::Headers(Batch::new( None, @@ -173,7 +181,6 @@ impl Cache { } fn check_chunk_limit( - &self, current_chunk: CachedDataBatch, max_chunk_size: usize, chunks: &mut Vec, @@ -200,18 +207,7 @@ impl Cache { } } - fn push_current_chunk( - &self, - chunks: &mut Vec, - current_chunk: CachedDataBatch, - ) { - if !current_chunk.is_range_empty() { - chunks.push(current_chunk); - } - } - fn push_missing_chunks( - &self, chunks: &mut Vec, current_height: u32, height: u32, @@ -229,21 +225,6 @@ impl Cache { chunks.extend(missing_chunks); } - fn prepare_next_chunk(&self, data: CachedData, height: u32) -> CachedDataBatch { - match data { - CachedData::Header(data) => CachedDataBatch::Headers(Batch::new( - None, - height..height.saturating_add(1), - vec![data], - )), - CachedData::Block(data) => CachedDataBatch::Blocks(Batch::new( - None, - height..height.saturating_add(1), - vec![data], - )), - } - } - pub fn remove_element(&mut self, height: &u32) { let mut lock = self.0.lock(); lock.remove(height); diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 36c75d733f5..2c125f136cd 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -249,18 +249,19 @@ async fn import__signature_fails_on_header_5_only() { #[tokio::test] async fn import__keep_data_asked_in_fail_ask_header_cases() { + // Test is going from block 4 (3 already committed) to 6 let params = Config { block_stream_buffer_size: 10, header_batch_size: 1, }; let mut consensus_port = MockConsensusPort::default(); - // No reask + // No reask on verification on all of the blocks consensus_port .expect_check_sealed_header() .times(3) .returning(|_| Ok(true)); - // No reask + // No reask on da height on all of the blocks consensus_port .expect_await_da_height() .times(3) @@ -279,6 +280,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { Err(anyhow::anyhow!("Some network error")) }) }); + // Success for 5 and 6 that is in parallel with 4 p2p.expect_get_sealed_block_headers() .times(2) .in_sequence(&mut seq) @@ -303,7 +305,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { Ok(headers) }) }); - // No reask + // No reask on getting full block step for 4, 5 and 6 blocks p2p.expect_get_transactions_from_peer() .times(3) .returning(|block_ids| { @@ -340,7 +342,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { *state.lock() = State::new(3, 6); // When - // Should re-ask to P2P only block 4 and for now it reask for all blocks + // Should re-ask to P2P only block 4. let res = import.import(&mut watcher).await.is_ok(); assert!(res); assert_eq!(&State::new(6, None), state.lock().deref()); @@ -348,22 +350,26 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { #[tokio::test] async fn import__keep_data_asked_in_fail_ask_transactions_cases() { + // Test is going from block 4 (3 already committed) to 6 let params = Config { block_stream_buffer_size: 10, header_batch_size: 1, }; let mut consensus_port = MockConsensusPort::default(); + // No reask on verification on all of the blocks consensus_port .expect_check_sealed_header() .times(3) .returning(|_| Ok(true)); + // No reask on da height on all of the blocks consensus_port .expect_await_da_height() .times(4) .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); + // Everything goes well on the headers part for all blocks p2p.expect_get_sealed_block_headers() .times(3) .returning(|range| { @@ -387,6 +393,7 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { }) }); + // Success for 5 and 6 that is in parallel with 4 p2p.expect_get_transactions_from_peer() .times(2) .in_sequence(&mut seq) @@ -439,7 +446,7 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { // Reset the state for a next call *state.lock() = State::new(3, 6); // When - // Should re-ask to P2P only block 4 and for now it reask for all blocks + // Should re-ask to P2P only block 4. let res = import.import(&mut watcher).await.is_ok(); assert!(res); assert_eq!(&State::new(6, None), state.lock().deref()); From 17a38e49fdde85e177296ecf14caddba9319f02c Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 27 Nov 2024 15:08:07 +0100 Subject: [PATCH 19/23] Simplify chunk lf cache creation algorithm --- Cargo.lock | 1 + crates/services/sync/Cargo.toml | 1 + crates/services/sync/src/import/cache.rs | 109 ++++++++++++++--------- crates/services/sync/src/import/tests.rs | 93 ++++++++++++++++++- 4 files changed, 162 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 07022867482..b05eb2d74a4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3683,6 +3683,7 @@ dependencies = [ "fuel-core-trace", "fuel-core-types 0.40.0", "futures", + "itertools 0.12.1", "mockall", "rand", "test-case", diff --git a/crates/services/sync/Cargo.toml b/crates/services/sync/Cargo.toml index e06d0f0eac1..d316506e121 100644 --- a/crates/services/sync/Cargo.toml +++ b/crates/services/sync/Cargo.toml @@ -15,6 +15,7 @@ async-trait = { workspace = true } fuel-core-services = { workspace = true } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } +itertools = { workspace = true, features = ["use_alloc"]} mockall = { workspace = true, optional = true } rand = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/crates/services/sync/src/import/cache.rs b/crates/services/sync/src/import/cache.rs index f50dc8f0d40..a0c92ce4a65 100644 --- a/crates/services/sync/src/import/cache.rs +++ b/crates/services/sync/src/import/cache.rs @@ -14,6 +14,8 @@ use fuel_core_types::blockchain::{ SealedBlockHeader, }; +use itertools::Itertools; + use super::Batch; /// The cache that stores the fetched headers and blocks. @@ -78,10 +80,10 @@ impl Cache { let mut current_chunk = CachedDataBatch::None(0..0); for (height, data) in cache_iter { + // We have a range missing in our cache. + // Push the existing chunk and push chunks of missing data if height != current_height { - if !current_chunk.is_range_empty() { - chunks.push(current_chunk); - } + Self::push_current_chunk(&mut chunks, current_chunk, max_chunk_size); current_chunk = CachedDataBatch::None(0..0); Self::push_missing_chunks( &mut chunks, @@ -93,21 +95,18 @@ impl Cache { ); current_height = height; } - current_chunk = - Self::handle_current_chunk(current_chunk, data, height, &mut chunks); - current_chunk = Self::check_chunk_limit( + // Either accumulate in the same chunk or transition if the data is not the same as the current chunk + current_chunk = Self::handle_current_chunk( current_chunk, - max_chunk_size, + data, + height, &mut chunks, - current_height, + max_chunk_size, ); - current_height = height.saturating_add(1); } - if !current_chunk.is_range_empty() { - chunks.push(current_chunk); - } + Self::push_current_chunk(&mut chunks, current_chunk, max_chunk_size); if current_height <= end { Self::push_missing_chunks( &mut chunks, @@ -121,6 +120,52 @@ impl Cache { futures::stream::iter(chunks) } + // Split the chunk in chunks of `max_chunk_size` and push them in `chunks` vector. + fn push_current_chunk( + chunks: &mut Vec, + chunk: CachedDataBatch, + max_chunk_size: usize, + ) { + if chunk.is_range_empty() { + return; + } + // Adds a vec of chunks that respect `max_chunk_size` + match chunk { + CachedDataBatch::Blocks(blocks) => chunks.extend( + blocks + .range + .zip(blocks.results) + .chunks(max_chunk_size) + .into_iter() + .map(|chunk| { + let (range, blocks): (Vec, Vec) = chunk.unzip(); + CachedDataBatch::Blocks(Batch::new( + None, + range[0]..range[range.len() - 1].saturating_add(1), + blocks, + )) + }), + ), + CachedDataBatch::Headers(headers) => chunks.extend( + headers + .range + .zip(headers.results) + .chunks(max_chunk_size) + .into_iter() + .map(|chunk| { + let (range, headers): (Vec, Vec) = + chunk.unzip(); + CachedDataBatch::Headers(Batch::new( + None, + range[0]..range[range.len() - 1].saturating_add(1), + headers, + )) + }), + ), + CachedDataBatch::None(range) => chunks.push(CachedDataBatch::None(range)), + } + } + fn collect_cache_data(&self, range: RangeInclusive) -> Vec<(u32, CachedData)> { let lock = self.0.lock(); lock.range(range).map(|(k, v)| (*k, v.clone())).collect() @@ -131,6 +176,7 @@ impl Cache { data: CachedData, height: u32, chunks: &mut Vec, + max_chunk_size: usize, ) -> CachedDataBatch { match (current_chunk, data) { (CachedDataBatch::None(_), CachedData::Header(data)) => { @@ -161,7 +207,11 @@ impl Cache { } (CachedDataBatch::Headers(headers_batch), CachedData::Block(block)) => { debug_assert_eq!(headers_batch.range.end, height); - chunks.push(CachedDataBatch::Headers(headers_batch)); + Self::push_current_chunk( + chunks, + CachedDataBatch::Headers(headers_batch), + max_chunk_size, + ); CachedDataBatch::Blocks(Batch::new( None, height..height.saturating_add(1), @@ -170,7 +220,11 @@ impl Cache { } (CachedDataBatch::Blocks(blocks_batch), CachedData::Header(header)) => { debug_assert_eq!(blocks_batch.range.end, height); - chunks.push(CachedDataBatch::Blocks(blocks_batch)); + Self::push_current_chunk( + chunks, + CachedDataBatch::Blocks(blocks_batch), + max_chunk_size, + ); CachedDataBatch::Headers(Batch::new( None, height..height.saturating_add(1), @@ -180,33 +234,6 @@ impl Cache { } } - fn check_chunk_limit( - current_chunk: CachedDataBatch, - max_chunk_size: usize, - chunks: &mut Vec, - current_height: u32, - ) -> CachedDataBatch { - match current_chunk { - CachedDataBatch::Headers(batch) => { - if batch.results.len() >= max_chunk_size { - chunks.push(CachedDataBatch::Headers(batch)); - CachedDataBatch::None(current_height..current_height) - } else { - CachedDataBatch::Headers(batch) - } - } - CachedDataBatch::Blocks(batch) => { - if batch.results.len() >= max_chunk_size { - chunks.push(CachedDataBatch::Blocks(batch)); - CachedDataBatch::None(current_height..current_height) - } else { - CachedDataBatch::Blocks(batch) - } - } - CachedDataBatch::None(range) => CachedDataBatch::None(range), - } - } - fn push_missing_chunks( chunks: &mut Vec, current_height: u32, diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 2c125f136cd..1d7a154505a 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -362,7 +362,7 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { .expect_check_sealed_header() .times(3) .returning(|_| Ok(true)); - // No reask on da height on all of the blocks + // One reask on the da_height after reask of the transactions for block 4 consensus_port .expect_await_da_height() .times(4) @@ -452,6 +452,97 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { assert_eq!(&State::new(6, None), state.lock().deref()); } +#[tokio::test] +async fn import__keep_data_asked_in_fail_execution() { + // Test is going from block 4 (3 already committed) to 6 + let params = Config { + block_stream_buffer_size: 10, + header_batch_size: 1, + }; + + let mut consensus_port = MockConsensusPort::default(); + // No reask on verification on all of the blocks + consensus_port + .expect_check_sealed_header() + .times(3) + .returning(|_| Ok(true)); + // No reask on da height on all of the blocks + consensus_port + .expect_await_da_height() + .times(3) + .returning(|_| Ok(())); + + let mut p2p = MockPeerToPeerPort::default(); + // Everything goes well on the headers part for all blocks + p2p.expect_get_sealed_block_headers() + .times(3) + .returning(|range| { + Box::pin(async move { + let peer = random_peer(); + let headers = Some(range.map(empty_header).collect()); + let headers = peer.bind(headers); + Ok(headers) + }) + }); + + // Everything goes well on the transactions retrieval + p2p.expect_get_transactions_from_peer() + .times(3) + .returning(|block_ids| { + Box::pin(async move { + let data = block_ids.data; + let v = data.into_iter().map(|_| Transactions::default()).collect(); + Ok(Some(v)) + }) + }); + p2p.expect_report_peer().returning(|_, _| Ok(())); + + let p2p = Arc::new(p2p); + + // Given + let mut executor: MockBlockImporterPort = MockBlockImporterPort::new(); + let mut seq = Sequence::new(); + // Fails execute on the 4 one + executor + .expect_execute_and_commit() + .times(1) + .in_sequence(&mut seq) + .returning(|_| anyhow::bail!("Bad execution")); + // Success execute the 3 after + executor + .expect_execute_and_commit() + .times(3) + .in_sequence(&mut seq) + .returning(|_| Ok(())); + let executor = Arc::new(executor); + let consensus = Arc::new(consensus_port); + let notify = Arc::new(Notify::new()); + let state: SharedMutex = State::new(3, 6).into(); + + let mut import = Import { + state: state.clone(), + notify: notify.clone(), + params, + p2p, + executor, + consensus, + cache: Cache::new(), + }; + let (_tx, shutdown) = tokio::sync::watch::channel(fuel_core_services::State::Started); + let mut watcher = shutdown.into(); + notify.notify_one(); + let res = import.import(&mut watcher).await.is_ok(); + assert!(!res); + assert_eq!(&State::new(3, None), state.lock().deref()); + // Reset the state for a next call + *state.lock() = State::new(3, 6); + // When + // Should re-ask to P2P only block 4. + let res = import.import(&mut watcher).await.is_ok(); + assert!(res); + assert_eq!(&State::new(6, None), state.lock().deref()); +} + #[tokio::test] async fn import__signature_fails_on_header_4_only() { // given From d9ef69ea61d1f8fe1647092e23ed135cb6109404 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Wed, 27 Nov 2024 15:11:18 +0100 Subject: [PATCH 20/23] toml lint --- crates/services/sync/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/services/sync/Cargo.toml b/crates/services/sync/Cargo.toml index d316506e121..74b52b8f921 100644 --- a/crates/services/sync/Cargo.toml +++ b/crates/services/sync/Cargo.toml @@ -15,7 +15,7 @@ async-trait = { workspace = true } fuel-core-services = { workspace = true } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } -itertools = { workspace = true, features = ["use_alloc"]} +itertools = { workspace = true, features = ["use_alloc"] } mockall = { workspace = true, optional = true } rand = { workspace = true } tokio = { workspace = true, features = ["full"] } From 546bcf5b5a370c564ae746dc75a4c734f6d02d99 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 28 Nov 2024 11:14:16 +0100 Subject: [PATCH 21/23] Improve test and fix some safety clippy. --- crates/services/sync/src/import.rs | 8 ++- crates/services/sync/src/import/cache.rs | 73 +++++++++++++++--------- crates/services/sync/src/import/tests.rs | 29 ++++++---- 3 files changed, 70 insertions(+), 40 deletions(-) diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index 96ffcd78578..b6179cf9a5c 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -32,6 +32,7 @@ use futures::{ }; use std::{ future::Future, + num::NonZeroUsize, ops::{ Range, RangeInclusive, @@ -233,7 +234,8 @@ where async move { let block_stream = get_block_stream( range.clone(), - params, + NonZeroUsize::new(params.header_batch_size) + .expect("Header batch size must be non-zero"), p2p, consensus, cache.clone(), @@ -389,13 +391,13 @@ fn get_block_stream< C: ConsensusPort + Send + Sync + 'static, >( range: RangeInclusive, - params: Config, + header_batch_size: NonZeroUsize, p2p: Arc

, consensus: Arc, cache: Cache, ) -> impl Stream> { cache - .get_chunks(range.clone(), params.header_batch_size) + .get_chunks(range.clone(), header_batch_size) .map({ let p2p = p2p.clone(); move |cached_data_batch| { diff --git a/crates/services/sync/src/import/cache.rs b/crates/services/sync/src/import/cache.rs index a0c92ce4a65..1503445bced 100644 --- a/crates/services/sync/src/import/cache.rs +++ b/crates/services/sync/src/import/cache.rs @@ -2,6 +2,7 @@ use std::{ collections::BTreeMap, + num::NonZeroUsize, ops::{ Range, RangeInclusive, @@ -68,10 +69,10 @@ impl Cache { pub fn get_chunks( &self, range: RangeInclusive, - max_chunk_size: usize, + max_chunk_size: NonZeroUsize, ) -> futures::stream::Iter> { let end = (*range.end()).saturating_add(1); - let chunk_size_u32 = u32::try_from(max_chunk_size) + let chunk_size_u32 = u32::try_from(max_chunk_size.get()) .expect("The size of the chunk can't exceed `u32`"); let cache_iter = self.collect_cache_data(range.clone()); @@ -124,7 +125,7 @@ impl Cache { fn push_current_chunk( chunks: &mut Vec, chunk: CachedDataBatch, - max_chunk_size: usize, + max_chunk_size: NonZeroUsize, ) { if chunk.is_range_empty() { return; @@ -135,31 +136,47 @@ impl Cache { blocks .range .zip(blocks.results) - .chunks(max_chunk_size) + .chunks(max_chunk_size.into()) .into_iter() - .map(|chunk| { + .filter_map(|chunk| { let (range, blocks): (Vec, Vec) = chunk.unzip(); - CachedDataBatch::Blocks(Batch::new( + let Some(start_range) = range.get(0) else { + return None; + }; + let Some(mut end_range) = + range.get(range.len().saturating_sub(1)) + else { + return None; + }; + Some(CachedDataBatch::Blocks(Batch::new( None, - range[0]..range[range.len() - 1].saturating_add(1), + *start_range..end_range.saturating_add(1), blocks, - )) + ))) }), ), CachedDataBatch::Headers(headers) => chunks.extend( headers .range .zip(headers.results) - .chunks(max_chunk_size) + .chunks(max_chunk_size.into()) .into_iter() - .map(|chunk| { + .filter_map(|chunk| { let (range, headers): (Vec, Vec) = chunk.unzip(); - CachedDataBatch::Headers(Batch::new( + let Some(start_range) = range.get(0) else { + return None; + }; + let Some(mut end_range) = + range.get(range.len().saturating_sub(1)) + else { + return None; + }; + Some(CachedDataBatch::Headers(Batch::new( None, - range[0]..range[range.len() - 1].saturating_add(1), + *start_range..end_range.saturating_add(1), headers, - )) + ))) }), ), CachedDataBatch::None(range) => chunks.push(CachedDataBatch::None(range)), @@ -176,7 +193,7 @@ impl Cache { data: CachedData, height: u32, chunks: &mut Vec, - max_chunk_size: usize, + max_chunk_size: NonZeroUsize, ) -> CachedDataBatch { match (current_chunk, data) { (CachedDataBatch::None(_), CachedData::Header(data)) => { @@ -194,12 +211,14 @@ impl Cache { )) } (CachedDataBatch::Headers(mut batch), CachedData::Header(data)) => { + tracing::warn!("Header data range in cache is not continuous."); debug_assert_eq!(batch.range.end, height); batch.range = batch.range.start..batch.range.end.saturating_add(1); batch.results.push(data); CachedDataBatch::Headers(batch) } (CachedDataBatch::Blocks(mut batch), CachedData::Block(data)) => { + tracing::warn!("Block data range in cache is not continuous."); debug_assert_eq!(batch.range.end, height); batch.range = batch.range.start..batch.range.end.saturating_add(1); batch.results.push(data); @@ -238,17 +257,16 @@ impl Cache { chunks: &mut Vec, current_height: u32, height: u32, - max_chunk_size: usize, + max_chunk_size: NonZeroUsize, chunk_size_u32: u32, end: u32, ) { - let missing_chunks = - (current_height..height) - .step_by(max_chunk_size) - .map(move |chunk_start| { - let block_end = chunk_start.saturating_add(chunk_size_u32).min(end); - CachedDataBatch::None(chunk_start..block_end) - }); + let missing_chunks = (current_height..height).step_by(max_chunk_size.into()).map( + move |chunk_start| { + let block_end = chunk_start.saturating_add(chunk_size_u32).min(end); + CachedDataBatch::None(chunk_start..block_end) + }, + ); chunks.extend(missing_chunks); } @@ -278,9 +296,12 @@ mod tests { tai64::Tai64, }; use futures::StreamExt; - use std::ops::{ - Range, - RangeInclusive, + use std::{ + num::NonZeroUsize, + ops::{ + Range, + RangeInclusive, + }, }; use test_case::test_case; @@ -534,7 +555,7 @@ mod tests { blocks.to_vec(), )); cache - .get_chunks(asked_range, max_chunk_size) + .get_chunks(asked_range, NonZeroUsize::try_from(max_chunk_size).unwrap()) .collect() .await } diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index 1d7a154505a..bfd8e8fc65a 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -274,7 +274,8 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { p2p.expect_get_sealed_block_headers() .times(1) .in_sequence(&mut seq) - .returning(|_| { + .returning(|range| { + assert_eq!(range, 4..4); Box::pin(async move { tokio::time::sleep(Duration::from_millis(300)).await; Err(anyhow::anyhow!("Some network error")) @@ -298,6 +299,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { .times(1) .in_sequence(&mut seq) .returning(|range| { + assert_eq!(range, 4..4); Box::pin(async move { let peer = random_peer(); let headers = Some(range.map(empty_header).collect()); @@ -386,7 +388,8 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { p2p.expect_get_transactions_from_peer() .times(1) .in_sequence(&mut seq) - .returning(|_| { + .returning(|range| { + assert_eq!(range.data, 4..4); Box::pin(async move { tokio::time::sleep(Duration::from_millis(300)).await; Err(anyhow::anyhow!("Some network error")) @@ -410,6 +413,7 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { .times(1) .in_sequence(&mut seq) .returning(|block_ids| { + assert_eq!(block_ids, 4..4); Box::pin(async move { let data = block_ids; let v = data.into_iter().map(|_| Transactions::default()).collect(); @@ -461,21 +465,21 @@ async fn import__keep_data_asked_in_fail_execution() { }; let mut consensus_port = MockConsensusPort::default(); - // No reask on verification on all of the blocks + // Data is re-ask for the block 4 because his execution failed consensus_port .expect_check_sealed_header() - .times(3) + .times(4) .returning(|_| Ok(true)); - // No reask on da height on all of the blocks + // Data is re-ask for the block 4 because his execution failed consensus_port .expect_await_da_height() - .times(3) + .times(4) .returning(|_| Ok(())); let mut p2p = MockPeerToPeerPort::default(); - // Everything goes well on the headers part for all blocks + // Data is re-ask for the block 4 because his execution failed p2p.expect_get_sealed_block_headers() - .times(3) + .times(4) .returning(|range| { Box::pin(async move { let peer = random_peer(); @@ -485,9 +489,9 @@ async fn import__keep_data_asked_in_fail_execution() { }) }); - // Everything goes well on the transactions retrieval + // Data is re-ask for the block 4 because his execution failed p2p.expect_get_transactions_from_peer() - .times(3) + .times(4) .returning(|block_ids| { Box::pin(async move { let data = block_ids.data; @@ -507,7 +511,10 @@ async fn import__keep_data_asked_in_fail_execution() { .expect_execute_and_commit() .times(1) .in_sequence(&mut seq) - .returning(|_| anyhow::bail!("Bad execution")); + .returning(|block| { + assert_eq!(block.entity.header().height(), &BlockHeight::new(4)); + anyhow::bail!("Bad execution") + }); // Success execute the 3 after executor .expect_execute_and_commit() From 10c8a216e901061b6225f68811f5f8d82925b798 Mon Sep 17 00:00:00 2001 From: AurelienFT Date: Thu, 28 Nov 2024 11:50:44 +0100 Subject: [PATCH 22/23] Fix tests checks and clippy --- crates/services/sync/src/import/cache.rs | 20 ++++---------------- crates/services/sync/src/import/tests.rs | 8 ++++---- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/crates/services/sync/src/import/cache.rs b/crates/services/sync/src/import/cache.rs index 1503445bced..8717b5b21f3 100644 --- a/crates/services/sync/src/import/cache.rs +++ b/crates/services/sync/src/import/cache.rs @@ -140,14 +140,8 @@ impl Cache { .into_iter() .filter_map(|chunk| { let (range, blocks): (Vec, Vec) = chunk.unzip(); - let Some(start_range) = range.get(0) else { - return None; - }; - let Some(mut end_range) = - range.get(range.len().saturating_sub(1)) - else { - return None; - }; + let start_range = range.first()?; + let mut end_range = range.get(range.len().saturating_sub(1))?; Some(CachedDataBatch::Blocks(Batch::new( None, *start_range..end_range.saturating_add(1), @@ -164,14 +158,8 @@ impl Cache { .filter_map(|chunk| { let (range, headers): (Vec, Vec) = chunk.unzip(); - let Some(start_range) = range.get(0) else { - return None; - }; - let Some(mut end_range) = - range.get(range.len().saturating_sub(1)) - else { - return None; - }; + let start_range = range.first()?; + let mut end_range = range.get(range.len().saturating_sub(1))?; Some(CachedDataBatch::Headers(Batch::new( None, *start_range..end_range.saturating_add(1), diff --git a/crates/services/sync/src/import/tests.rs b/crates/services/sync/src/import/tests.rs index bfd8e8fc65a..baa4146a626 100644 --- a/crates/services/sync/src/import/tests.rs +++ b/crates/services/sync/src/import/tests.rs @@ -275,7 +275,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { .times(1) .in_sequence(&mut seq) .returning(|range| { - assert_eq!(range, 4..4); + assert_eq!(range, 4..5); Box::pin(async move { tokio::time::sleep(Duration::from_millis(300)).await; Err(anyhow::anyhow!("Some network error")) @@ -299,7 +299,7 @@ async fn import__keep_data_asked_in_fail_ask_header_cases() { .times(1) .in_sequence(&mut seq) .returning(|range| { - assert_eq!(range, 4..4); + assert_eq!(range, 4..5); Box::pin(async move { let peer = random_peer(); let headers = Some(range.map(empty_header).collect()); @@ -389,7 +389,7 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { .times(1) .in_sequence(&mut seq) .returning(|range| { - assert_eq!(range.data, 4..4); + assert_eq!(range.data, 4..5); Box::pin(async move { tokio::time::sleep(Duration::from_millis(300)).await; Err(anyhow::anyhow!("Some network error")) @@ -413,7 +413,7 @@ async fn import__keep_data_asked_in_fail_ask_transactions_cases() { .times(1) .in_sequence(&mut seq) .returning(|block_ids| { - assert_eq!(block_ids, 4..4); + assert_eq!(block_ids, 4..5); Box::pin(async move { let data = block_ids; let v = data.into_iter().map(|_| Transactions::default()).collect(); From 2af4312c538cb501ef663b5946a163efc3bea206 Mon Sep 17 00:00:00 2001 From: green Date: Thu, 28 Nov 2024 16:47:01 -0500 Subject: [PATCH 23/23] Simplified the implementation --- Cargo.lock | 1 - crates/services/sync/Cargo.toml | 1 - crates/services/sync/src/import.rs | 11 +- crates/services/sync/src/import/cache.rs | 154 ++++++++--------------- 4 files changed, 60 insertions(+), 107 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b05eb2d74a4..07022867482 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3683,7 +3683,6 @@ dependencies = [ "fuel-core-trace", "fuel-core-types 0.40.0", "futures", - "itertools 0.12.1", "mockall", "rand", "test-case", diff --git a/crates/services/sync/Cargo.toml b/crates/services/sync/Cargo.toml index 74b52b8f921..e06d0f0eac1 100644 --- a/crates/services/sync/Cargo.toml +++ b/crates/services/sync/Cargo.toml @@ -15,7 +15,6 @@ async-trait = { workspace = true } fuel-core-services = { workspace = true } fuel-core-types = { workspace = true, features = ["std"] } futures = { workspace = true } -itertools = { workspace = true, features = ["use_alloc"] } mockall = { workspace = true, optional = true } rand = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/crates/services/sync/src/import.rs b/crates/services/sync/src/import.rs index b6179cf9a5c..f3d93359183 100644 --- a/crates/services/sync/src/import.rs +++ b/crates/services/sync/src/import.rs @@ -32,7 +32,7 @@ use futures::{ }; use std::{ future::Future, - num::NonZeroUsize, + num::NonZeroU32, ops::{ Range, RangeInclusive, @@ -221,6 +221,10 @@ where cache, .. } = &self; + let batch_size = u32::try_from(params.header_batch_size) + .expect("Header batch size must be less u32::MAX"); + let batch_size = + NonZeroU32::new(batch_size).expect("Header batch size must be non-zero"); let (batch_sender, batch_receiver) = mpsc::channel(1); @@ -234,8 +238,7 @@ where async move { let block_stream = get_block_stream( range.clone(), - NonZeroUsize::new(params.header_batch_size) - .expect("Header batch size must be non-zero"), + batch_size, p2p, consensus, cache.clone(), @@ -391,7 +394,7 @@ fn get_block_stream< C: ConsensusPort + Send + Sync + 'static, >( range: RangeInclusive, - header_batch_size: NonZeroUsize, + header_batch_size: NonZeroU32, p2p: Arc

, consensus: Arc, cache: Cache, diff --git a/crates/services/sync/src/import/cache.rs b/crates/services/sync/src/import/cache.rs index 8717b5b21f3..031ee0bfcf8 100644 --- a/crates/services/sync/src/import/cache.rs +++ b/crates/services/sync/src/import/cache.rs @@ -1,8 +1,6 @@ -#![allow(unused)] - use std::{ collections::BTreeMap, - num::NonZeroUsize, + num::NonZeroU32, ops::{ Range, RangeInclusive, @@ -15,8 +13,6 @@ use fuel_core_types::blockchain::{ SealedBlockHeader, }; -use itertools::Itertools; - use super::Batch; /// The cache that stores the fetched headers and blocks. @@ -69,11 +65,9 @@ impl Cache { pub fn get_chunks( &self, range: RangeInclusive, - max_chunk_size: NonZeroUsize, + max_chunk_size: NonZeroU32, ) -> futures::stream::Iter> { let end = (*range.end()).saturating_add(1); - let chunk_size_u32 = u32::try_from(max_chunk_size.get()) - .expect("The size of the chunk can't exceed `u32`"); let cache_iter = self.collect_cache_data(range.clone()); let mut current_height = *range.start(); @@ -84,19 +78,20 @@ impl Cache { // We have a range missing in our cache. // Push the existing chunk and push chunks of missing data if height != current_height { - Self::push_current_chunk(&mut chunks, current_chunk, max_chunk_size); + if !current_chunk.is_range_empty() { + chunks.push(current_chunk); + } current_chunk = CachedDataBatch::None(0..0); Self::push_missing_chunks( &mut chunks, current_height, height, max_chunk_size, - chunk_size_u32, end, ); - current_height = height; } - // Either accumulate in the same chunk or transition if the data is not the same as the current chunk + // Either accumulate in the same chunk or transition + // if the data is not the same as the current chunk current_chunk = Self::handle_current_chunk( current_chunk, data, @@ -107,68 +102,12 @@ impl Cache { current_height = height.saturating_add(1); } - Self::push_current_chunk(&mut chunks, current_chunk, max_chunk_size); - if current_height <= end { - Self::push_missing_chunks( - &mut chunks, - current_height, - end, - max_chunk_size, - chunk_size_u32, - end, - ); + if !current_chunk.is_range_empty() { + chunks.push(current_chunk); } - futures::stream::iter(chunks) - } - // Split the chunk in chunks of `max_chunk_size` and push them in `chunks` vector. - fn push_current_chunk( - chunks: &mut Vec, - chunk: CachedDataBatch, - max_chunk_size: NonZeroUsize, - ) { - if chunk.is_range_empty() { - return; - } - // Adds a vec of chunks that respect `max_chunk_size` - match chunk { - CachedDataBatch::Blocks(blocks) => chunks.extend( - blocks - .range - .zip(blocks.results) - .chunks(max_chunk_size.into()) - .into_iter() - .filter_map(|chunk| { - let (range, blocks): (Vec, Vec) = chunk.unzip(); - let start_range = range.first()?; - let mut end_range = range.get(range.len().saturating_sub(1))?; - Some(CachedDataBatch::Blocks(Batch::new( - None, - *start_range..end_range.saturating_add(1), - blocks, - ))) - }), - ), - CachedDataBatch::Headers(headers) => chunks.extend( - headers - .range - .zip(headers.results) - .chunks(max_chunk_size.into()) - .into_iter() - .filter_map(|chunk| { - let (range, headers): (Vec, Vec) = - chunk.unzip(); - let start_range = range.first()?; - let mut end_range = range.get(range.len().saturating_sub(1))?; - Some(CachedDataBatch::Headers(Batch::new( - None, - *start_range..end_range.saturating_add(1), - headers, - ))) - }), - ), - CachedDataBatch::None(range) => chunks.push(CachedDataBatch::None(range)), - } + Self::push_missing_chunks(&mut chunks, current_height, end, max_chunk_size, end); + futures::stream::iter(chunks) } fn collect_cache_data(&self, range: RangeInclusive) -> Vec<(u32, CachedData)> { @@ -181,8 +120,9 @@ impl Cache { data: CachedData, height: u32, chunks: &mut Vec, - max_chunk_size: NonZeroUsize, + max_chunk_size: NonZeroU32, ) -> CachedDataBatch { + let max_chunk_size = max_chunk_size.get() as usize; match (current_chunk, data) { (CachedDataBatch::None(_), CachedData::Header(data)) => { CachedDataBatch::Headers(Batch::new( @@ -201,24 +141,44 @@ impl Cache { (CachedDataBatch::Headers(mut batch), CachedData::Header(data)) => { tracing::warn!("Header data range in cache is not continuous."); debug_assert_eq!(batch.range.end, height); - batch.range = batch.range.start..batch.range.end.saturating_add(1); - batch.results.push(data); - CachedDataBatch::Headers(batch) + debug_assert!(batch.range.len() <= max_chunk_size); + + if batch.range.len() == max_chunk_size { + chunks.push(CachedDataBatch::Headers(batch)); + + CachedDataBatch::Headers(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )) + } else { + batch.range = batch.range.start..batch.range.end.saturating_add(1); + batch.results.push(data); + CachedDataBatch::Headers(batch) + } } (CachedDataBatch::Blocks(mut batch), CachedData::Block(data)) => { tracing::warn!("Block data range in cache is not continuous."); debug_assert_eq!(batch.range.end, height); - batch.range = batch.range.start..batch.range.end.saturating_add(1); - batch.results.push(data); - CachedDataBatch::Blocks(batch) + debug_assert!(batch.range.len() <= max_chunk_size); + + if batch.range.len() == max_chunk_size { + chunks.push(CachedDataBatch::Blocks(batch)); + + CachedDataBatch::Blocks(Batch::new( + None, + height..height.saturating_add(1), + vec![data], + )) + } else { + batch.range = batch.range.start..batch.range.end.saturating_add(1); + batch.results.push(data); + CachedDataBatch::Blocks(batch) + } } (CachedDataBatch::Headers(headers_batch), CachedData::Block(block)) => { debug_assert_eq!(headers_batch.range.end, height); - Self::push_current_chunk( - chunks, - CachedDataBatch::Headers(headers_batch), - max_chunk_size, - ); + chunks.push(CachedDataBatch::Headers(headers_batch)); CachedDataBatch::Blocks(Batch::new( None, height..height.saturating_add(1), @@ -227,11 +187,7 @@ impl Cache { } (CachedDataBatch::Blocks(blocks_batch), CachedData::Header(header)) => { debug_assert_eq!(blocks_batch.range.end, height); - Self::push_current_chunk( - chunks, - CachedDataBatch::Blocks(blocks_batch), - max_chunk_size, - ); + chunks.push(CachedDataBatch::Blocks(blocks_batch)); CachedDataBatch::Headers(Batch::new( None, height..height.saturating_add(1), @@ -245,13 +201,13 @@ impl Cache { chunks: &mut Vec, current_height: u32, height: u32, - max_chunk_size: NonZeroUsize, - chunk_size_u32: u32, + max_chunk_size: NonZeroU32, end: u32, ) { - let missing_chunks = (current_height..height).step_by(max_chunk_size.into()).map( + let chunk_size = max_chunk_size.get(); + let missing_chunks = (current_height..height).step_by(chunk_size as usize).map( move |chunk_start| { - let block_end = chunk_start.saturating_add(chunk_size_u32).min(end); + let block_end = chunk_start.saturating_add(chunk_size).min(end); CachedDataBatch::None(chunk_start..block_end) }, ); @@ -269,7 +225,6 @@ mod tests { use crate::import::{ cache::{ Cache, - CachedData, CachedDataBatch, }, Batch, @@ -285,11 +240,8 @@ mod tests { }; use futures::StreamExt; use std::{ - num::NonZeroUsize, - ops::{ - Range, - RangeInclusive, - }, + num::NonZeroU32, + ops::RangeInclusive, }; use test_case::test_case; @@ -528,7 +480,7 @@ mod tests { async fn test_get_batch_scenarios( headers: &[Sealed], blocks: &[Sealed], - max_chunk_size: usize, + max_chunk_size: u32, asked_range: RangeInclusive, ) -> Vec { let mut cache = Cache::new(); @@ -543,7 +495,7 @@ mod tests { blocks.to_vec(), )); cache - .get_chunks(asked_range, NonZeroUsize::try_from(max_chunk_size).unwrap()) + .get_chunks(asked_range, NonZeroU32::try_from(max_chunk_size).unwrap()) .collect() .await }