diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml new file mode 100644 index 00000000000..db4fae79d4e --- /dev/null +++ b/.github/workflows/spellcheck.yml @@ -0,0 +1,16 @@ +name: Spellcheck + +on: + pull_request: + +jobs: + find-typos: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Check spelling + uses: crate-ci/typos@master + with: + config: .typos.toml \ No newline at end of file diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 00000000000..63be12140d5 --- /dev/null +++ b/.typos.toml @@ -0,0 +1,6 @@ +[default] +extend-ignore-identifiers-re = [ + "TRO", + "tro", + "Tro", +] \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a958c0222d6..a40a69892bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -188,11 +188,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - The `StatisticTable` table lives in the off-chain worker. - Removed duplication of the `Database` from the `dap::ConcreteStorage` since it is already available from the VM. - The executor return only produced `Changes` instead of the storage transaction, which simplifies the interaction between modules and port definition. - - The logic related to the iteration over the storage is moved to the `fuel-core-storage` crate and is now reusable. It provides an `interator` method that duplicates the logic from `MemoryStore` on iterating over the `BTreeMap` and methods like `iter_all`, `iter_all_by_prefix`, etc. It was done in a separate revivable [commit](https://github.com/FuelLabs/fuel-core/pull/1694/commits/5b9bd78320e6f36d0650ec05698f12f7d1b3c7c9). + - The logic related to the iteration over the storage is moved to the `fuel-core-storage` crate and is now reusable. It provides an `iterator` method that duplicates the logic from `MemoryStore` on iterating over the `BTreeMap` and methods like `iter_all`, `iter_all_by_prefix`, etc. It was done in a separate revivable [commit](https://github.com/FuelLabs/fuel-core/pull/1694/commits/5b9bd78320e6f36d0650ec05698f12f7d1b3c7c9). - The `MemoryTransactionView` is fully replaced by the `StorageTransactionInner`. - Removed `flush` method from the `Database` since it is not needed after https://github.com/FuelLabs/fuel-core/pull/1664. -- [#1693](https://github.com/FuelLabs/fuel-core/pull/1693): The change separates the initial chain state from the chain config and stores them in separate files when generating a snapshot. The state snapshot can be generated in a new format where parquet is used for compression and indexing while postcard is used for encoding. This enables importing in a stream like fashion which reduces memory requirements. Json encoding is still supported to enable easy manual setup. However, parquet is prefered for large state files. +- [#1693](https://github.com/FuelLabs/fuel-core/pull/1693): The change separates the initial chain state from the chain config and stores them in separate files when generating a snapshot. The state snapshot can be generated in a new format where parquet is used for compression and indexing while postcard is used for encoding. This enables importing in a stream like fashion which reduces memory requirements. Json encoding is still supported to enable easy manual setup. However, parquet is preferred for large state files. ### Snapshot command @@ -208,7 +208,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Each item group in the genesis process is handled by a separate worker, allowing for parallel loading. Workers stream file contents in batches. - A database transaction is committed every time an item group is succesfully loaded. Resumability is achieved by recording the last loaded group index within the same db tx. If loading is aborted, the remaining workers are shutdown. Upon restart, workers resume from the last processed group. + A database transaction is committed every time an item group is successfully loaded. Resumability is achieved by recording the last loaded group index within the same db tx. If loading is aborted, the remaining workers are shutdown. Upon restart, workers resume from the last processed group. ### Contract States and Balances diff --git a/Makefile.toml b/Makefile.toml index 7f46f6b8041..9494fce12af 100644 --- a/Makefile.toml +++ b/Makefile.toml @@ -2,9 +2,9 @@ # https://github.com/sagiegurari/cargo-make/blob/0.36.0/src/lib/descriptor/makefiles/stable.toml # This is a configuration file for the cargo plugin `cargo-make`. We use this plugin because of it's handling around -# cargo workspaces. Specifically, each task is run on workspace members indepedently, avoiding potential issues that +# cargo workspaces. Specifically, each task is run on workspace members independently, avoiding potential issues that # arise from feature unification (https://doc.rust-lang.org/cargo/reference/features.html#feature-unification). -# Feature unification allows two unrelated crates with the same depedency to enable features on eachother. +# Feature unification allows two unrelated crates with the same dependency to enable features on eachother. # This is problematic when a crate is built independently (when publishing / being consumed from crates.io), # and it implicitly depended on features enabled by other crates in the same workspace. # While feature resolver v2 attempted to resolve this problem, it still comes up in certain scenarios. diff --git a/crates/client/assets/debugAdapterProtocol.json b/crates/client/assets/debugAdapterProtocol.json index 6f9749f66fd..44a0c2eed9c 100644 --- a/crates/client/assets/debugAdapterProtocol.json +++ b/crates/client/assets/debugAdapterProtocol.json @@ -3836,7 +3836,7 @@ "ExceptionPathSegment": { "type": "object", - "description": "An ExceptionPathSegment represents a segment in a path that is used to match leafs or nodes in a tree of exceptions.\nIf a segment consists of more than one name, it matches the names provided if 'negate' is false or missing or\nit matches anything except the names provided if 'negate' is true.", + "description": "An ExceptionPathSegment represents a segment in a path that is used to match leaves or nodes in a tree of exceptions.\nIf a segment consists of more than one name, it matches the names provided if 'negate' is false or missing or\nit matches anything except the names provided if 'negate' is true.", "properties": { "negate": { "type": "boolean", diff --git a/crates/fuel-core/src/database/balances.rs b/crates/fuel-core/src/database/balances.rs index 5e1134be5b5..3d7b20a1b0d 100644 --- a/crates/fuel-core/src/database/balances.rs +++ b/crates/fuel-core/src/database/balances.rs @@ -20,7 +20,7 @@ use fuel_core_types::{ use itertools::Itertools; pub trait BalancesInitializer { - /// Initialize the balances of the contract from the all leafs. + /// Initialize the balances of the contract from the all leaves. /// This method is more performant than inserting balances one by one. fn init_contract_balances( &mut self, diff --git a/crates/fuel-core/src/database/block.rs b/crates/fuel-core/src/database/block.rs index cd6f19c5483..e24bade2040 100644 --- a/crates/fuel-core/src/database/block.rs +++ b/crates/fuel-core/src/database/block.rs @@ -118,7 +118,7 @@ impl Database { let proof_index = message_merkle_metadata .version() .checked_sub(1) - .ok_or(anyhow::anyhow!("The count of leafs - messages is zero"))?; + .ok_or(anyhow::anyhow!("The count of leaves - messages is zero"))?; let (_, proof_set) = tree .prove(proof_index) .map_err(|err| StorageError::Other(anyhow::anyhow!(err)))?; diff --git a/crates/fuel-core/src/graphql_api/api_service.rs b/crates/fuel-core/src/graphql_api/api_service.rs index dca47d0ce00..9643b5c5cb8 100644 --- a/crates/fuel-core/src/graphql_api/api_service.rs +++ b/crates/fuel-core/src/graphql_api/api_service.rs @@ -168,7 +168,7 @@ impl RunnableTask for Task { } } -// Need a seperate Data Object for each Query endpoint, cannot be avoided +// Need a separate Data Object for each Query endpoint, cannot be avoided #[allow(clippy::too_many_arguments)] pub fn new_service( genesis_block_height: BlockHeight, diff --git a/crates/fuel-core/src/service/genesis/importer/import_task.rs b/crates/fuel-core/src/service/genesis/importer/import_task.rs index 4325d6165e0..db509888150 100644 --- a/crates/fuel-core/src/service/genesis/importer/import_task.rs +++ b/crates/fuel-core/src/service/genesis/importer/import_task.rs @@ -440,7 +440,7 @@ mod tests { } #[test] - fn succesfully_processed_batch_updates_the_genesis_progress() { + fn successfully_processed_batch_updates_the_genesis_progress() { // given let data = TestData::new(2); let db = GenesisDatabase::default(); diff --git a/crates/fuel-core/src/state/rocks_db.rs b/crates/fuel-core/src/state/rocks_db.rs index 91f7ebf15d8..23264fff198 100644 --- a/crates/fuel-core/src/state/rocks_db.rs +++ b/crates/fuel-core/src/state/rocks_db.rs @@ -83,7 +83,7 @@ impl ShallowTempDir { Self { path } } - /// Returns the path of teh directory. + /// Returns the path of the directory. pub fn path(&self) -> &PathBuf { &self.path } diff --git a/crates/services/consensus_module/poa/src/deadline_clock.rs b/crates/services/consensus_module/poa/src/deadline_clock.rs index e652e551e30..bf69991dcd8 100644 --- a/crates/services/consensus_module/poa/src/deadline_clock.rs +++ b/crates/services/consensus_module/poa/src/deadline_clock.rs @@ -141,7 +141,7 @@ impl DeadlineClock { } /// Clears the timeout, so that now event is produced when it expires. - /// If the event has alread occurred, it will not be removed. + /// If the event has already occurred, it will not be removed. pub async fn clear(&self) { self.control .send(ControlMessage::Clear) diff --git a/crates/services/consensus_module/poa/src/verifier/tests.rs b/crates/services/consensus_module/poa/src/verifier/tests.rs index 7f7f9ffbd96..7cbd561f914 100644 --- a/crates/services/consensus_module/poa/src/verifier/tests.rs +++ b/crates/services/consensus_module/poa/src/verifier/tests.rs @@ -64,7 +64,7 @@ fn correct() -> Input { let mut i = correct(); i.ch.prev_root = [3u8; 32].into(); i - } => matches Err(_) ; "genesis verify prev root mis-match should error" + } => matches Err(_) ; "genesis verify prev root mismatch should error" )] #[test_case( { @@ -78,7 +78,7 @@ fn correct() -> Input { let mut i = correct(); i.ch.generated.application_hash = [0u8; 32].into(); i - } => matches Err(_) ; "genesis verify application hash mis-match should error" + } => matches Err(_) ; "genesis verify application hash mismatch should error" )] #[test_case( { diff --git a/crates/services/p2p/src/gossipsub/topics.rs b/crates/services/p2p/src/gossipsub/topics.rs index a63e4f60bb9..f56b2c38f0a 100644 --- a/crates/services/p2p/src/gossipsub/topics.rs +++ b/crates/services/p2p/src/gossipsub/topics.rs @@ -42,7 +42,7 @@ impl GossipsubTopics { } } - /// Given a `GossipsubBroadcastRequest` retruns a `GossipTopic` + /// Given a `GossipsubBroadcastRequest` returns a `GossipTopic` /// which is broadcast over the network with the serialized inner value of `GossipsubBroadcastRequest` pub fn get_gossipsub_topic( &self, diff --git a/crates/services/p2p/src/p2p_service.rs b/crates/services/p2p/src/p2p_service.rs index 3f0c0724edc..e7f86bba9a4 100644 --- a/crates/services/p2p/src/p2p_service.rs +++ b/crates/services/p2p/src/p2p_service.rs @@ -1404,7 +1404,7 @@ mod tests { p2p_config.bootstrap_nodes = node_b.multiaddrs(); let mut node_c = build_service_from_config(p2p_config.clone()).await; - // Node C does not connecto to Node A + // Node C does not connect to Node A // it should receive the propagated message from Node B if `GossipsubMessageAcceptance` is `Accept` node_c .swarm @@ -1451,7 +1451,7 @@ mod tests { // Node B received the correct message // If we try to publish it again we will get `PublishError::Duplicate` - // This asserts that our MessageId calculation is consistant irrespective of which Peer sends it + // This asserts that our MessageId calculation is consistent irrespective of which Peer sends it let broadcast_request = broadcast_request.clone(); matches!(node_b.publish_message(broadcast_request), Err(PublishError::Duplicate)); diff --git a/crates/services/p2p/src/peer_manager.rs b/crates/services/p2p/src/peer_manager.rs index 7e06463cff6..454a1c6bfde 100644 --- a/crates/services/p2p/src/peer_manager.rs +++ b/crates/services/p2p/src/peer_manager.rs @@ -254,7 +254,7 @@ impl PeerManager { .choose(&mut range) } - /// Handles the first connnection established with a Peer + /// Handles the first connection established with a Peer fn handle_initial_connection(&mut self, peer_id: &PeerId) -> bool { const HEARTBEAT_AVG_WINDOW: u32 = 10; diff --git a/crates/services/p2p/src/peer_manager/heartbeat_data.rs b/crates/services/p2p/src/peer_manager/heartbeat_data.rs index c0edf86c684..2e96c2eaa94 100644 --- a/crates/services/p2p/src/peer_manager/heartbeat_data.rs +++ b/crates/services/p2p/src/peer_manager/heartbeat_data.rs @@ -56,10 +56,10 @@ impl HeartbeatData { pub fn update(&mut self, block_height: BlockHeight) { self.block_height = Some(block_height); - let old_hearbeat = self.last_heartbeat; + let old_heartbeat = self.last_heartbeat; self.last_heartbeat = Instant::now(); self.last_heartbeat_sys = SystemTime::now(); - let new_duration = self.last_heartbeat.saturating_duration_since(old_hearbeat); + let new_duration = self.last_heartbeat.saturating_duration_since(old_heartbeat); self.add_new_duration(new_duration); } } diff --git a/crates/services/p2p/src/peer_report.rs b/crates/services/p2p/src/peer_report.rs index 5b33fc3f28d..dd4436a145a 100644 --- a/crates/services/p2p/src/peer_report.rs +++ b/crates/services/p2p/src/peer_report.rs @@ -56,7 +56,7 @@ pub enum PeerReportEvent { // `Behaviour` that reports events about peers pub struct Behaviour { pending_events: VecDeque, - // regulary checks if reserved nodes are connected + // regularly checks if reserved nodes are connected health_check: Interval, decay_interval: Interval, } diff --git a/crates/services/producer/src/mocks.rs b/crates/services/producer/src/mocks.rs index 2c0dde37071..9ab32391994 100644 --- a/crates/services/producer/src/mocks.rs +++ b/crates/services/producer/src/mocks.rs @@ -66,8 +66,8 @@ impl Relayer for MockRelayer { &self, _height: &DaBlockHeight, ) -> anyhow::Result { - let heighest = self.latest_block_height; - Ok(heighest) + let highest = self.latest_block_height; + Ok(highest) } async fn get_cost_for_block(&self, height: &DaBlockHeight) -> anyhow::Result { diff --git a/crates/services/upgradable-executor/wasm-executor/src/utils.rs b/crates/services/upgradable-executor/wasm-executor/src/utils.rs index 640c04c4d23..85047b28492 100644 --- a/crates/services/upgradable-executor/wasm-executor/src/utils.rs +++ b/crates/services/upgradable-executor/wasm-executor/src/utils.rs @@ -20,8 +20,8 @@ pub fn pack_ptr_and_len(ptr: u32, len: u32) -> u64 { /// Unpacks an `u64` into the pointer and length. pub fn unpack_ptr_and_len(val: u64) -> (u32, u32) { let ptr = u32::try_from(val & (u32::MAX as u64)) - .expect("It ony contains first 32 bytes; qed"); - let len = u32::try_from(val >> 32).expect("It ony contains first 32 bytes; qed"); + .expect("It only contains first 32 bytes; qed"); + let len = u32::try_from(val >> 32).expect("It only contains first 32 bytes; qed"); (ptr, len) } diff --git a/crates/storage/src/test_helpers.rs b/crates/storage/src/test_helpers.rs index 9e11db66f08..f6fb5d019e7 100644 --- a/crates/storage/src/test_helpers.rs +++ b/crates/storage/src/test_helpers.rs @@ -17,29 +17,29 @@ use crate::{ /// The trait is used to provide a generic mocked implementation for all possible `StorageInspect`, /// `StorageMutate`, and `MerkleRootStorage` traits. pub trait MockStorageMethods { - /// The mocked implementation fot the `StorageInspect::get` method. + /// The mocked implementation for the `StorageInspect::get` method. fn get( &self, key: &M::Key, ) -> StorageResult>>; - /// The mocked implementation fot the `StorageInspect::contains_key` method. + /// The mocked implementation for the `StorageInspect::contains_key` method. fn contains_key(&self, key: &M::Key) -> StorageResult; - /// The mocked implementation fot the `StorageMutate::insert` method. + /// The mocked implementation for the `StorageMutate::insert` method. fn insert( &mut self, key: &M::Key, value: &M::Value, ) -> StorageResult>; - /// The mocked implementation fot the `StorageMutate::remove` method. + /// The mocked implementation for the `StorageMutate::remove` method. fn remove( &mut self, key: &M::Key, ) -> StorageResult>; - /// The mocked implementation fot the `MerkleRootStorage::root` method. + /// The mocked implementation for the `MerkleRootStorage::root` method. fn root( &self, key: &Key, diff --git a/deployment/Dockerfile b/deployment/Dockerfile index e83b75b9540..a1d1ceefd58 100644 --- a/deployment/Dockerfile +++ b/deployment/Dockerfile @@ -35,7 +35,7 @@ ENV BUILD_FEATURES=$FEATURES COPY --from=planner /build/recipe.json recipe.json RUN echo $CARGO_PROFILE_RELEASE_DEBUG RUN echo $BUILD_FEATURES -# Build our project dependecies, not our application! +# Build our project dependencies, not our application! RUN xx-cargo chef cook --release --no-default-features --features "${BUILD_FEATURES}" -p fuel-core-bin --recipe-path recipe.json # Up to this point, if our dependency tree stays the same, # all layers should be cached. diff --git a/deployment/e2e-client.Dockerfile b/deployment/e2e-client.Dockerfile index 7741734b516..452bdf7d884 100644 --- a/deployment/e2e-client.Dockerfile +++ b/deployment/e2e-client.Dockerfile @@ -16,7 +16,7 @@ RUN cargo chef prepare --recipe-path recipe.json FROM chef as builder ENV CARGO_NET_GIT_FETCH_WITH_CLI=true COPY --from=planner /build/recipe.json recipe.json -# Build our project dependecies, not our application! +# Build our project dependencies, not our application! RUN cargo chef cook --release -p fuel-core-e2e-client --features p2p --recipe-path recipe.json # Up to this point, if our dependency tree stays the same, # all layers should be cached. diff --git a/docs/architecture.md b/docs/architecture.md index 233893c67c4..a7841bae61d 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -372,7 +372,7 @@ impl transaction_pool::ports::BlockImporter for Service { #### Ports: fuel_core_executor::ports ```rust -trait Database: IntepreterStorage +trait Database: InterpreterStorage + StorageMut + StorageMut + StorageMut diff --git a/tests/tests/messages.rs b/tests/tests/messages.rs index fe328b0b403..84404041185 100644 --- a/tests/tests/messages.rs +++ b/tests/tests/messages.rs @@ -586,7 +586,7 @@ async fn can_get_message() { ..Default::default() }; - // configure the messges + // configure the messages let state_config = StateConfig { messages: vec![first_msg.clone()], ..Default::default() diff --git a/tests/tests/trigger_integration/interval.rs b/tests/tests/trigger_integration/interval.rs index 039854be657..ee8e5815c1c 100644 --- a/tests/tests/trigger_integration/interval.rs +++ b/tests/tests/trigger_integration/interval.rs @@ -83,7 +83,7 @@ async fn poa_interval_produces_empty_blocks_at_correct_rate() { round_time_seconds <= secs_per_round && secs_per_round <= round_time_seconds + 2 * (rounds as u64) / round_time_seconds, - "Round time not within treshold" + "Round time not within threshold" ); } @@ -167,7 +167,7 @@ async fn poa_interval_produces_nonempty_blocks_at_correct_rate() { round_time_seconds <= secs_per_round && secs_per_round <= round_time_seconds + 2 * (rounds as u64) / round_time_seconds, - "Round time not within treshold" + "Round time not within threshold" ); // Make sure all txs got produced diff --git a/tests/tests/tx.rs b/tests/tests/tx.rs index 086b5921e5d..4f16a7ba8fd 100644 --- a/tests/tests/tx.rs +++ b/tests/tests/tx.rs @@ -81,7 +81,7 @@ async fn dry_run_script() { let tx_statuses = client.dry_run(&[tx.clone()]).await.unwrap(); let log = tx_statuses .last() - .expect("Nonempty repsonse") + .expect("Nonempty response") .result .receipts(); assert_eq!(3, log.len()); diff --git a/tests/tests/tx/utxo_validation.rs b/tests/tests/tx/utxo_validation.rs index 7262f2d0d1c..09f8a27e1bc 100644 --- a/tests/tests/tx/utxo_validation.rs +++ b/tests/tests/tx/utxo_validation.rs @@ -171,7 +171,7 @@ async fn dry_run_override_utxo_validation() { .unwrap(); let log = tx_statuses .last() - .expect("Nonempty reponse") + .expect("Nonempty response") .result .receipts(); assert_eq!(2, log.len());