diff --git a/CHANGELOG.md b/CHANGELOG.md index d255357ed27..36baafd9fa2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,8 +8,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ### Added - [2350](https://github.com/FuelLabs/fuel-core/pull/2350): Added a new CLI flag `graphql-number-of-threads` to limit the number of threads used by the GraphQL service. The default value is `2`, `0` enables the old behavior. -### Changed +### Fixed +- [2345](https://github.com/FuelLabs/fuel-core/pull/2345): In PoA increase priority of block creation timer trigger compare to txpool event management +### Changed - [2334](https://github.com/FuelLabs/fuel-core/pull/2334): Prepare the GraphQL service for the switching to `async` methods. - [2310](https://github.com/FuelLabs/fuel-core/pull/2310): New metrics: "The gas prices used in a block" (`importer_gas_price_for_block`), "The total gas used in a block" (`importer_gas_per_block`), "The total fee (gwei) paid by transactions in a block" (`importer_fee_per_block_gwei`), "The total number of transactions in a block" (`importer_transactions_per_block`), P2P metrics for swarm and protocol. diff --git a/crates/services/consensus_module/poa/src/service.rs b/crates/services/consensus_module/poa/src/service.rs index 04255ac108a..ffb88f8db2a 100644 --- a/crates/services/consensus_module/poa/src/service.rs +++ b/crates/services/consensus_module/poa/src/service.rs @@ -573,10 +573,6 @@ where should_continue = false; } } - _ = self.new_txs_watcher.changed() => { - self.on_txpool_event().await.context("While processing txpool event")?; - should_continue = true; - } _ = next_block_production => { match self.on_timer().await.context("While processing timer event") { Ok(()) => should_continue = true, @@ -587,6 +583,10 @@ where } }; } + _ = self.new_txs_watcher.changed() => { + self.on_txpool_event().await.context("While processing txpool event")?; + should_continue = true; + } } Ok(should_continue) diff --git a/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs b/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs index 21cc4a22a3f..8b403e97fc1 100644 --- a/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs +++ b/crates/services/consensus_module/poa/src/service_test/trigger_tests.rs @@ -478,3 +478,32 @@ async fn interval_trigger_produces_blocks_in_the_future_when_time_rewinds() { // similarly to how it works when time is lagging. assert_eq!(second_block_time, start_time + block_time.as_secs() * 2); } + +#[tokio::test] +async fn interval_trigger_even_if_queued_tx_events() { + let block_time = Duration::from_secs(2); + let mut ctx = DefaultContext::new(Config { + trigger: Trigger::Interval { block_time }, + signer: SignMode::Key(test_signing_key()), + metrics: false, + ..Default::default() + }) + .await; + let block_creation_notifier = Arc::new(Notify::new()); + tokio::task::spawn({ + let notifier = ctx.new_txs_notifier.clone(); + async move { + loop { + time::sleep(Duration::from_nanos(10)).await; + notifier.send_replace(()); + } + } + }); + let block_creation_waiter = block_creation_notifier.clone(); + tokio::task::spawn(async move { + ctx.block_import.recv().await.unwrap(); + dbg!("First block produced"); + block_creation_notifier.notify_waiters(); + }); + block_creation_waiter.notified().await; +} diff --git a/crates/services/src/async_processor.rs b/crates/services/src/async_processor.rs index 74f7d8c7954..db929176483 100644 --- a/crates/services/src/async_processor.rs +++ b/crates/services/src/async_processor.rs @@ -270,6 +270,8 @@ mod tests { // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() >= Duration::from_secs(10)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 10); let duration = Duration::from_nanos(heavy_task_processor.metric.idle.get()); @@ -302,6 +304,8 @@ mod tests { // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() <= Duration::from_secs(2)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 10); let duration = Duration::from_nanos(heavy_task_processor.metric.idle.get()); @@ -333,8 +337,9 @@ mod tests { // Then while broadcast_receiver.recv().await.is_ok() {} - tokio::task::yield_now().await; assert!(instant.elapsed() <= Duration::from_secs(2)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 0); let duration = Duration::from_nanos(heavy_task_processor.metric.idle.get()); diff --git a/crates/services/src/sync_processor.rs b/crates/services/src/sync_processor.rs index 36fb53bc2a6..4e23a68205f 100644 --- a/crates/services/src/sync_processor.rs +++ b/crates/services/src/sync_processor.rs @@ -209,6 +209,8 @@ mod tests { // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() >= Duration::from_secs(10)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 10); } @@ -275,6 +277,8 @@ mod tests { // Then while broadcast_receiver.recv().await.is_ok() {} assert!(instant.elapsed() <= Duration::from_secs(2)); + // Wait for the metrics to be updated. + tokio::time::sleep(Duration::from_secs(1)).await; let duration = Duration::from_nanos(heavy_task_processor.metric.busy.get()); assert_eq!(duration.as_secs(), 10); } diff --git a/tests/tests/poa.rs b/tests/tests/poa.rs index fca05616275..792064fbade 100644 --- a/tests/tests/poa.rs +++ b/tests/tests/poa.rs @@ -259,6 +259,7 @@ mod p2p { // Then starts second_producer that uses the first one as a reserved peer. // second_producer should not produce blocks while the first one is producing // after the first_producer stops, second_producer should start producing blocks + #[ignore = "seems to be flaky, issue: https://github.com/FuelLabs/fuel-core/issues/2351"] #[tokio::test(flavor = "multi_thread")] async fn test_poa_multiple_producers() { const SYNC_TIMEOUT: u64 = 30;