diff --git a/Cargo.lock b/Cargo.lock index c78bec26..ec631353 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -957,6 +957,51 @@ dependencies = [ "cc", ] +[[package]] +name = "axum" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite 0.2.13", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "backoff" version = "0.4.0" @@ -1665,6 +1710,42 @@ dependencies = [ "windows-sys 0.45.0", ] +[[package]] +name = "console-api" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2895653b4d9f1538a83970077cb01dfc77a4810524e51a110944688e916b18e" +dependencies = [ + "prost", + "prost-types", + "tonic", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4cf42660ac07fcebed809cfe561dd8730bcd35b075215e6479c516bcd0d11cb" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures", + "hdrhistogram", + "humantime", + "prost-types", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic", + "tracing", + "tracing-core", + "tracing-subscriber 0.3.18", +] + [[package]] name = "const-oid" version = "0.9.5" @@ -4385,6 +4466,19 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "base64 0.21.5", + "byteorder", + "flate2", + "nom", + "num-traits", +] + [[package]] name = "heck" version = "0.4.1" @@ -4669,6 +4763,18 @@ dependencies = [ "webpki-roots 0.23.1", ] +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite 0.2.13", + "tokio", + "tokio-io-timeout", +] + [[package]] name = "iana-time-zone" version = "0.1.57" @@ -6456,6 +6562,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "matrixmultiply" version = "0.3.7" @@ -10047,7 +10159,6 @@ dependencies = [ [[package]] name = "sdk-dsn" version = "0.1.0" -source = "git+https://github.com/subspace/subspace-sdk?rev=000c6c774f3dd995e783d6d78d1d59669540b454#000c6c774f3dd995e783d6d78d1d59669540b454" dependencies = [ "anyhow", "derivative", @@ -10071,7 +10182,6 @@ dependencies = [ [[package]] name = "sdk-farmer" version = "0.1.0" -source = "git+https://github.com/subspace/subspace-sdk?rev=000c6c774f3dd995e783d6d78d1d59669540b454#000c6c774f3dd995e783d6d78d1d59669540b454" dependencies = [ "anyhow", "async-trait", @@ -10105,7 +10215,6 @@ dependencies = [ [[package]] name = "sdk-node" version = "0.1.0" -source = "git+https://github.com/subspace/subspace-sdk?rev=000c6c774f3dd995e783d6d78d1d59669540b454#000c6c774f3dd995e783d6d78d1d59669540b454" dependencies = [ "anyhow", "backoff", @@ -10172,7 +10281,6 @@ dependencies = [ [[package]] name = "sdk-substrate" version = "0.1.0" -source = "git+https://github.com/subspace/subspace-sdk?rev=000c6c774f3dd995e783d6d78d1d59669540b454#000c6c774f3dd995e783d6d78d1d59669540b454" dependencies = [ "bytesize", "derivative", @@ -10196,7 +10304,6 @@ dependencies = [ [[package]] name = "sdk-traits" version = "0.1.0" -source = "git+https://github.com/subspace/subspace-sdk?rev=000c6c774f3dd995e783d6d78d1d59669540b454#000c6c774f3dd995e783d6d78d1d59669540b454" dependencies = [ "async-trait", "parking_lot 0.12.1", @@ -10210,7 +10317,6 @@ dependencies = [ [[package]] name = "sdk-utils" version = "0.1.0" -source = "git+https://github.com/subspace/subspace-sdk?rev=000c6c774f3dd995e783d6d78d1d59669540b454#000c6c774f3dd995e783d6d78d1d59669540b454" dependencies = [ "anyhow", "async-trait", @@ -11890,15 +11996,29 @@ dependencies = [ [[package]] name = "subspace-sdk" version = "0.1.0" -source = "git+https://github.com/subspace/subspace-sdk?rev=000c6c774f3dd995e783d6d78d1d59669540b454#000c6c774f3dd995e783d6d78d1d59669540b454" dependencies = [ + "anyhow", + "clap", + "console-subscriber", + "derive_builder 0.12.0", + "derive_more", + "fdlimit 0.2.1", + "futures", + "mimalloc", "sdk-dsn", "sdk-farmer", "sdk-node", "sdk-substrate", "sdk-utils", + "serde_json", "static_assertions", + "subspace-farmer-components", "subspace-proof-of-space", + "tempfile", + "tokio", + "tracing", + "tracing-futures", + "tracing-subscriber 0.3.18", ] [[package]] @@ -12099,6 +12219,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "synstructure" version = "0.12.6" @@ -12323,6 +12449,16 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite 0.2.13", + "tokio", +] + [[package]] name = "tokio-macros" version = "2.2.0" @@ -12414,12 +12550,49 @@ dependencies = [ "winnow", ] +[[package]] +name = "tonic" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" +dependencies = [ + "async-trait", + "axum", + "base64 0.21.5", + "bytes", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite 0.2.13", + "rand 0.8.5", + "slab", + "tokio", + "tokio-util", "tower-layer", "tower-service", "tracing", diff --git a/Cargo.toml b/Cargo.toml index 4b01ac32..623b54a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ resolver = "2" members = [ "pulsar", + "sdk/*" ] # The list of dependencies below (which can be both direct and indirect dependencies) are crates diff --git a/README.md b/README.md index a3a3cda0..d1702f9c 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ In some instances, you may want to move the farming process to the background. T ### Example with `tmux` ```sh -$ tmux -S farming +tmux -S farming ``` This will create a new `tmux` session using a socket file named `farming`. @@ -46,7 +46,7 @@ This will create a new `tmux` session using a socket file named `farming`. Once the tmux session is created, you can go ahead and run the farming process. ```sh -$ ./pulsar farm +./pulsar farm ``` Once it's running, you can detach the process by pressing `CTRL+b d` (read more about [detaching a sessions](https://linuxhint.com/detach-session-tmux/)) @@ -56,7 +56,7 @@ That's it, you should be back to your terminal, with _subspace farming_ running To re-attach to your session, use tmux: ```sh -$ tmux -S farming attach +tmux -S farming attach ``` If you ever want to delete/kill your farming session, enter the command: @@ -91,6 +91,17 @@ If you ever want to delete/kill your farming session, enter the command: screen -S farming -X quit ``` +## Binary + +### macOS + +Install using [homebrew](https://brew.sh/) package manager: + +```sh +brew tap subspace/homebrew-pulsar +brew install pulsar +``` + ## Developer ### Pre-requisites @@ -107,30 +118,11 @@ sudo apt-get install llvm clang cmake #### macOS -1. Install via Homebrew: - -```bash -brew install llvm@15 clang cmake -``` - -2. Add `llvm` to your `~/.zshrc` or `~/.bashrc`: - -```bash -export PATH="/opt/homebrew/opt/llvm@15/bin:$PATH" -``` - -3. Activate the changes: - -```bash -source ~/.zshrc -# or -source ~/.bashrc -``` - -4. Verify that `llvm` is installed: +Install via Homebrew: ```bash -llvm-config --version +brew tap subspace/homebrew-pulsar +brew install pulsar ``` ### Build from Source @@ -140,7 +132,7 @@ Ensure the [pre-requisites](#pre-requisites). And then run: ```sh -$ cargo build +cargo build ``` > Use `--release` flag for a release build and optimized binary - `./target/release/pulsar` @@ -152,7 +144,7 @@ $ cargo build After ensuring the [pre-requisites](#pre-requisites), just build using cargo: ```sh -$ cargo build --release +cargo build --release ``` This would generate an optimized binary. @@ -160,7 +152,7 @@ This would generate an optimized binary. And then, you can install the binary (optimized) to your system: ```sh -$ cargo install --path . +cargo install --path . ``` The binary gets added to `~/.cargo/bin`, which is included in the PATH environment variable by default during installation of Rust tools. So you can run it immediately from the shell. diff --git a/pulsar/Cargo.toml b/pulsar/Cargo.toml index 201a1eb9..57f8f70e 100644 --- a/pulsar/Cargo.toml +++ b/pulsar/Cargo.toml @@ -30,7 +30,7 @@ single-instance = "0.3.3" sp-core = { version = "21.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", features = ["full_crypto"] } strum = "0.24.1" strum_macros = "0.24.3" -subspace-sdk = { git = "https://github.com/subspace/subspace-sdk", rev = "000c6c774f3dd995e783d6d78d1d59669540b454", default-features = false } +subspace-sdk = { path = "../sdk/subspace-sdk", default-features = false } thiserror = "1" tokio = { version = "1.34.0", features = ["macros", "rt-multi-thread", "tracing", "signal"] } toml = "0.7" diff --git a/sdk/README.md b/sdk/README.md new file mode 100644 index 00000000..2f1eb126 --- /dev/null +++ b/sdk/README.md @@ -0,0 +1,49 @@ +# Subspace-SDK + + + +A library for easily running a local Subspace node and/or farmer. + +## Dependencies + +You'll have to have [Rust toolchain](https://rustup.rs/) installed as well as some packages in addition (Ubuntu example): +```bash +sudo apt-get install build-essential llvm protobuf-compiler +``` + +## Simplest example + +Start a node and farmer and wait for 10 blocks being farmed. + +```rust +use futures::prelude::*; + +let node = subspace_sdk::Node::builder() + .force_authoring(true) + .role(subspace_sdk::node::Role::Authority) + // Starting a new chain + .build("node", subspace_sdk::chain_spec::dev_config().unwrap()) + .await + .unwrap(); + +let plots = [subspace_sdk::PlotDescription::new("plot", bytesize::ByteSize::mb(100)).unwrap()]; +let cache = subspace_sdk::farmer::CacheDescription::new("cache", bytesize::ByteSize::mb(10)).unwrap(); +let farmer = subspace_sdk::Farmer::builder() + .build(subspace_sdk::PublicKey::from([0; 32]), node.clone(), &plots, cache) + .await + .expect("Failed to init a farmer"); + +for plot in farmer.iter_plots().await { + let mut plotting_progress = plot.subscribe_initial_plotting_progress().await; + while plotting_progress.next().await.is_some() {} +} +tracing::info!("Initial plotting completed"); + +node.subscribe_new_blocks() + .await + .unwrap() + // Wait 10 blocks and exit + .take(10) + .for_each(|block| async move { tracing::info!(?block, "New block!") }) + .await; +``` diff --git a/sdk/dsn/Cargo.toml b/sdk/dsn/Cargo.toml new file mode 100644 index 00000000..d151c65d --- /dev/null +++ b/sdk/dsn/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "sdk-dsn" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1" +derivative = "2.2.0" +derive_builder = "0.12" +derive_more = "0.99" +futures = "0.3" +hex = "0.4.3" +parking_lot = "0.12" +prometheus-client = "0.22.0" +sc-client-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-consensus-subspace = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +sdk-utils = { path = "../utils" } +serde = { version = "1", features = ["derive"] } +sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e", default-features = false } +subspace-networking = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +tracing = "0.1" diff --git a/sdk/dsn/src/builder.rs b/sdk/dsn/src/builder.rs new file mode 100644 index 00000000..8a19cb31 --- /dev/null +++ b/sdk/dsn/src/builder.rs @@ -0,0 +1,356 @@ +use std::collections::HashSet; +use std::path::PathBuf; +use std::sync::{Arc, Weak}; + +use anyhow::Context; +use derivative::Derivative; +use derive_builder::Builder; +use derive_more::{Deref, DerefMut, Display, From}; +use futures::prelude::*; +use prometheus_client::registry::Registry; +use sc_consensus_subspace::archiver::SegmentHeadersStore; +use sdk_utils::{self, DestructorSet, Multiaddr, MultiaddrWithPeerId}; +use serde::{Deserialize, Serialize}; +use subspace_farmer::piece_cache::PieceCache as FarmerPieceCache; +use subspace_farmer::utils::readers_and_pieces::ReadersAndPieces; +use subspace_farmer::KNOWN_PEERS_CACHE_SIZE; +use subspace_networking::libp2p::metrics::Metrics; +use subspace_networking::utils::strip_peer_id; +use subspace_networking::{ + KademliaMode, KnownPeersManager, KnownPeersManagerConfig, PeerInfo, PeerInfoProvider, + PieceByIndexRequest, PieceByIndexRequestHandler, PieceByIndexResponse, + SegmentHeaderBySegmentIndexesRequestHandler, SegmentHeaderRequest, SegmentHeaderResponse, +}; + +use super::local_provider_record_utils::MaybeLocalRecordProvider; +use super::LocalRecordProvider; + +/// Wrapper with default value for listen address +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct ListenAddresses( + #[derivative(Default( + // TODO: get rid of it, once it won't be required by monorepo + value = "vec![\"/ip4/127.0.0.1/tcp/0\".parse().expect(\"Always valid\")]" + ))] + pub Vec, +); + +/// Wrapper with default value for number of incoming connections +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct InConnections(#[derivative(Default(value = "300"))] pub u32); + +/// Wrapper with default value for number of outgoing connections +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct OutConnections(#[derivative(Default(value = "150"))] pub u32); + +/// Wrapper with default value for number of target connections +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct TargetConnections(#[derivative(Default(value = "15"))] pub u32); + +/// Wrapper with default value for number of pending incoming connections +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct PendingInConnections(#[derivative(Default(value = "100"))] pub u32); + +/// Wrapper with default value for number of pending outgoing connections +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct PendingOutConnections(#[derivative(Default(value = "150"))] pub u32); + +/// Node DSN builder +#[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq)] +#[derivative(Default)] +#[builder(pattern = "immutable", build_fn(private, name = "_build"), name = "DsnBuilder")] +#[non_exhaustive] +pub struct Dsn { + /// Listen on some address for other nodes + #[builder(default, setter(into, strip_option))] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub provider_storage_path: Option, + /// Listen on some address for other nodes + #[builder(default, setter(into))] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub listen_addresses: ListenAddresses, + /// Boot nodes + #[builder(default)] + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub boot_nodes: Vec, + /// Known external addresses + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub external_addresses: Vec, + /// Reserved nodes + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub reserved_nodes: Vec, + /// Determines whether we allow keeping non-global (private, shared, + /// loopback..) addresses in Kademlia DHT. + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub allow_non_global_addresses_in_dht: bool, + /// Defines max established incoming swarm connection limit. + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub in_connections: InConnections, + /// Defines max established outgoing swarm connection limit. + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub out_connections: OutConnections, + /// Pending incoming swarm connection limit. + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub pending_in_connections: PendingInConnections, + /// Pending outgoing swarm connection limit. + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub pending_out_connections: PendingOutConnections, + /// Defines target total (in and out) connection number for DSN that + /// should be maintained. + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub target_connections: TargetConnections, +} + +sdk_utils::generate_builder!(Dsn); + +impl DsnBuilder { + /// Dev chain configuration + pub fn dev() -> Self { + Self::new().allow_non_global_addresses_in_dht(true) + } + + /// Gemini 3g configuration + pub fn gemini_3g() -> Self { + Self::new().listen_addresses(vec![ + "/ip6/::/tcp/30433".parse().expect("hardcoded value is true"), + "/ip4/0.0.0.0/tcp/30433".parse().expect("hardcoded value is true"), + ]) + } + + /// Gemini 3g configuration + pub fn devnet() -> Self { + Self::new().listen_addresses(vec![ + "/ip6/::/tcp/30433".parse().expect("hardcoded value is true"), + "/ip4/0.0.0.0/tcp/30433".parse().expect("hardcoded value is true"), + ]) + } +} + +/// Options for DSN +pub struct DsnOptions { + /// Client to aux storage for node piece cache + pub client: Arc, + /// Path for dsn + pub base_path: PathBuf, + /// Keypair for networking + pub keypair: subspace_networking::libp2p::identity::Keypair, + /// Get piece by hash handler + pub get_piece_by_index: PieceByIndex, + /// Get segment header by segment indexes handler + pub get_segment_header_by_segment_indexes: SegmentHeaderByIndexes, + /// Segment header store + pub segment_header_store: SegmentHeadersStore, + /// Is libp2p metrics enabled + pub is_metrics_enabled: bool, +} + +/// Shared Dsn structure between node and farmer +#[derive(Derivative)] +#[derivative(Debug)] +pub struct DsnShared { + /// Dsn node + pub node: subspace_networking::Node, + /// Farmer readers and pieces + pub farmer_readers_and_pieces: Arc>>, + /// Farmer piece cache + pub farmer_piece_cache: Arc>>, + _destructors: DestructorSet, +} + +impl Dsn { + /// Build dsn + pub fn build_dsn( + self, + options: DsnOptions, + ) -> anyhow::Result<( + DsnShared, + subspace_networking::NodeRunner, + Option, + )> + where + B: sp_runtime::traits::Block, + C: sc_client_api::AuxStore + sp_blockchain::HeaderBackend + Send + Sync + 'static, + PieceByIndex: Fn( + &PieceByIndexRequest, + Weak>>, + Arc>>, + ) -> F1 + + Send + + Sync + + 'static, + F1: Future> + Send + 'static, + SegmentHeaderByIndexes: Fn(&SegmentHeaderRequest, &SegmentHeadersStore) -> Option + + Send + + Sync + + 'static, + { + let DsnOptions { + client, + base_path, + keypair, + get_piece_by_index, + get_segment_header_by_segment_indexes, + segment_header_store, + is_metrics_enabled, + } = options; + let farmer_readers_and_pieces = Arc::new(parking_lot::Mutex::new(None)); + let protocol_version = hex::encode(client.info().genesis_hash); + let farmer_piece_cache = Arc::new(parking_lot::RwLock::new(None)); + let local_records_provider = MaybeLocalRecordProvider::new(farmer_piece_cache.clone()); + + let mut metrics_registry = Registry::default(); + let metrics = is_metrics_enabled.then(|| Metrics::new(&mut metrics_registry)); + + tracing::debug!(genesis_hash = protocol_version, "Setting DSN protocol version..."); + + let Self { + listen_addresses, + reserved_nodes, + allow_non_global_addresses_in_dht, + provider_storage_path: _, + in_connections: InConnections(max_established_incoming_connections), + out_connections: OutConnections(max_established_outgoing_connections), + target_connections: TargetConnections(target_connections), + pending_in_connections: PendingInConnections(max_pending_incoming_connections), + pending_out_connections: PendingOutConnections(max_pending_outgoing_connections), + boot_nodes, + external_addresses, + } = self; + + let bootstrap_nodes = boot_nodes.into_iter().map(Into::into).collect::>(); + + let listen_on = listen_addresses.0.into_iter().map(Into::into).collect(); + + let networking_parameters_registry = KnownPeersManager::new(KnownPeersManagerConfig { + path: Some(base_path.join("known_addresses.bin").into_boxed_path()), + ignore_peer_list: strip_peer_id(bootstrap_nodes.clone()) + .into_iter() + .map(|(peer_id, _)| peer_id) + .collect::>(), + cache_size: KNOWN_PEERS_CACHE_SIZE, + ..Default::default() + }) + .context("Failed to open known addresses database for DSN")? + .boxed(); + + let default_networking_config = subspace_networking::Config::new( + protocol_version, + keypair, + local_records_provider.clone(), + Some(PeerInfoProvider::new_farmer()), + ); + + let config = subspace_networking::Config { + listen_on, + allow_non_global_addresses_in_dht, + networking_parameters_registry, + request_response_protocols: vec![ + PieceByIndexRequestHandler::create({ + let weak_readers_and_pieces = Arc::downgrade(&farmer_readers_and_pieces); + let farmer_piece_cache = farmer_piece_cache.clone(); + move |_, req| { + let weak_readers_and_pieces = weak_readers_and_pieces.clone(); + let farmer_piece_cache = farmer_piece_cache.clone(); + + get_piece_by_index(req, weak_readers_and_pieces, farmer_piece_cache) + } + }), + SegmentHeaderBySegmentIndexesRequestHandler::create({ + let segment_header_store = segment_header_store.clone(); + move |_, req| { + futures::future::ready(get_segment_header_by_segment_indexes( + req, + &segment_header_store, + )) + } + }), + ], + reserved_peers: reserved_nodes.into_iter().map(Into::into).collect(), + max_established_incoming_connections, + max_established_outgoing_connections, + max_pending_incoming_connections, + max_pending_outgoing_connections, + bootstrap_addresses: bootstrap_nodes, + kademlia_mode: KademliaMode::Dynamic, + external_addresses: external_addresses.into_iter().map(Into::into).collect(), + // Proactively maintain permanent connections with farmers (least restrictive value + // taken from farmer) + special_connected_peers_handler: Some(Arc::new(PeerInfo::is_farmer)), + // Maintain proactive connections with all peers (least restrictive value taken from + // node) + general_connected_peers_handler: Some(Arc::new(|_| true)), + // Maintain some number of persistent connections (taken from farmer) + general_connected_peers_target: 0, + // Special peers (taken from farmer) + special_connected_peers_target: target_connections, + // Allow up to quarter of incoming connections to be maintained (taken from node) + general_connected_peers_limit: max_established_incoming_connections / 4, + // Allow to maintain some extra farmer connections beyond direct interest too (taken + // from farmer) + special_connected_peers_limit: target_connections + + max_established_incoming_connections / 4, + metrics, + ..default_networking_config + }; + + let (node, runner) = subspace_networking::construct(config)?; + + let mut destructors = DestructorSet::new_without_async("dsn-destructors"); + let on_new_listener = node.on_new_listener(Arc::new({ + let node = node.clone(); + + move |address| { + tracing::info!( + "DSN listening on {}", + address + .clone() + .with(subspace_networking::libp2p::multiaddr::Protocol::P2p(node.id())) + ); + } + })); + destructors.add_items_to_drop(on_new_listener)?; + + Ok(( + DsnShared { + node, + farmer_readers_and_pieces, + _destructors: destructors, + farmer_piece_cache, + }, + runner, + is_metrics_enabled.then_some(metrics_registry), + )) + } +} diff --git a/sdk/dsn/src/lib.rs b/sdk/dsn/src/lib.rs new file mode 100644 index 00000000..b36fa8ec --- /dev/null +++ b/sdk/dsn/src/lib.rs @@ -0,0 +1,22 @@ +//! Crate with DSN shared between sdk farmer and sdk node + +#![warn( + missing_docs, + clippy::dbg_macro, + clippy::unwrap_used, + clippy::disallowed_types, + unused_features +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![feature(concat_idents, const_option)] + +mod builder; +mod local_provider_record_utils; + +pub use builder::*; +use subspace_farmer::piece_cache::PieceCache as FarmerPieceCache; +use tracing::warn; + +/// A record provider that uses farmer piece cache underneath +pub type LocalRecordProvider = + local_provider_record_utils::MaybeLocalRecordProvider; diff --git a/sdk/dsn/src/local_provider_record_utils.rs b/sdk/dsn/src/local_provider_record_utils.rs new file mode 100644 index 00000000..2a846af7 --- /dev/null +++ b/sdk/dsn/src/local_provider_record_utils.rs @@ -0,0 +1,31 @@ +use std::sync::Arc; + +use derivative::Derivative; +use parking_lot::RwLock; +use subspace_networking::libp2p::kad::{ProviderRecord, RecordKey}; +use subspace_networking::LocalRecordProvider; + +#[derive(Derivative)] +#[derivative(Debug)] +pub struct MaybeLocalRecordProvider { + #[derivative(Debug = "ignore")] + inner: Arc>>, +} + +impl Clone for MaybeLocalRecordProvider { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +impl MaybeLocalRecordProvider { + pub fn new(inner: Arc>>) -> Self { + Self { inner } + } +} + +impl LocalRecordProvider for MaybeLocalRecordProvider { + fn record(&self, key: &RecordKey) -> Option { + self.inner.read().as_ref().map(|v| v.record(key)).unwrap_or(None) + } +} diff --git a/sdk/farmer/Cargo.toml b/sdk/farmer/Cargo.toml new file mode 100644 index 00000000..225bbce7 --- /dev/null +++ b/sdk/farmer/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "sdk-farmer" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1" +async-trait = "0.1" +bytesize = "1.2.0" +derivative = "2.2.0" +derive_builder = "0.12" +derive_more = "0.99" +futures = "0.3" +lru = "0.11.0" +libmimalloc-sys = { version = "0.1.35", features = ["extended"] } +parking_lot = "0.12" +pin-project = "1" +rayon = "1.7.0" +sdk-traits = { path = "../traits" } +sdk-utils = { path = "../utils" } +serde = { version = "1", features = ["derive"] } +subspace-core-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-erasure-coding = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e", default-features = false } +subspace-farmer-components = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-networking = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-proof-of-space = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e", features = ["parallel"] } +subspace-rpc-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +thiserror = "1" +tokio = { version = "1.34.0", features = ["fs", "rt", "tracing", "macros", "parking_lot", "rt-multi-thread", "signal"] } +tokio-stream = { version = "0.1", features = ["sync", "time"] } +tracing = "0.1" +tracing-futures = "0.2" + +[features] +default = ["numa"] +numa = [ + "subspace-farmer/numa", +] diff --git a/sdk/farmer/build.rs b/sdk/farmer/build.rs new file mode 100644 index 00000000..364bd6f5 --- /dev/null +++ b/sdk/farmer/build.rs @@ -0,0 +1,5 @@ +fn main() { + let output = std::process::Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap(); + let git_hash = String::from_utf8(output.stdout).unwrap(); + println!("cargo:rustc-env=GIT_HASH={git_hash}"); +} diff --git a/sdk/farmer/src/lib.rs b/sdk/farmer/src/lib.rs new file mode 100644 index 00000000..1bf684e9 --- /dev/null +++ b/sdk/farmer/src/lib.rs @@ -0,0 +1,1112 @@ +//! This crate is related to abstract farmer implementation + +#![warn( + missing_docs, + clippy::dbg_macro, + clippy::unwrap_used, + clippy::disallowed_types, + unused_features +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![feature(const_option)] + +use std::collections::HashMap; +use std::io; +use std::num::{NonZeroU8, NonZeroUsize}; +use std::path::PathBuf; +use std::sync::Arc; + +use anyhow::{anyhow, Context}; +pub use builder::{Builder, Config}; +use derivative::Derivative; +use futures::prelude::*; +use futures::stream::FuturesUnordered; +use sdk_traits::Node; +use sdk_utils::{ByteSize, DestructorSet, PublicKey, TaskOutput}; +use serde::{Deserialize, Serialize}; +use subspace_core_primitives::crypto::kzg; +use subspace_core_primitives::{PieceIndex, Record, SectorIndex}; +use subspace_erasure_coding::ErasureCoding; +use subspace_farmer::piece_cache::PieceCache as FarmerPieceCache; +use subspace_farmer::single_disk_farm::{ + SingleDiskFarm, SingleDiskFarmError, SingleDiskFarmId, SingleDiskFarmInfo, + SingleDiskFarmOptions, SingleDiskFarmSummary, +}; +use subspace_farmer::thread_pool_manager::PlottingThreadPoolManager; +use subspace_farmer::utils::farmer_piece_getter::FarmerPieceGetter; +use subspace_farmer::utils::piece_validator::SegmentCommitmentPieceValidator; +use subspace_farmer::utils::readers_and_pieces::ReadersAndPieces; +use subspace_farmer::utils::{ + all_cpu_cores, create_plotting_thread_pool_manager, thread_pool_core_indices, +}; +use subspace_farmer::{Identity, KNOWN_PEERS_CACHE_SIZE}; +use subspace_farmer_components::plotting::PlottedSector; +use subspace_farmer_components::sector::{sector_size, SectorMetadataChecksummed}; +use subspace_networking::libp2p::kad::RecordKey; +use subspace_networking::utils::multihash::ToMultihash; +use subspace_networking::KnownPeersManager; +use subspace_rpc_primitives::{FarmerAppInfo, SolutionResponse}; +use tokio::sync::{mpsc, oneshot, watch, Mutex, Semaphore}; +use tracing::{debug, error, info, warn}; +use tracing_futures::Instrument; + +/// Description of the farm +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)] +#[non_exhaustive] +pub struct FarmDescription { + /// Path of the farm + pub directory: PathBuf, + /// Space which you want to pledge + pub space_pledged: ByteSize, +} + +impl FarmDescription { + /// Construct Farm description + pub fn new(directory: impl Into, space_pledged: ByteSize) -> Self { + Self { directory: directory.into(), space_pledged } + } + + /// Wipe all the data from the farm + pub async fn wipe(self) -> io::Result<()> { + tokio::fs::remove_dir_all(self.directory).await + } +} + +mod builder { + use std::num::{NonZeroU8, NonZeroUsize}; + + use derivative::Derivative; + use derive_builder::Builder; + use derive_more::{Deref, DerefMut, Display, From}; + use sdk_traits::Node; + use sdk_utils::{ByteSize, PublicKey}; + use serde::{Deserialize, Serialize}; + + use super::BuildError; + use crate::{FarmDescription, Farmer}; + + #[derive( + Debug, + Clone, + Derivative, + Deserialize, + Serialize, + PartialEq, + Eq, + From, + Deref, + DerefMut, + Display, + )] + #[derivative(Default)] + #[serde(transparent)] + pub struct MaxConcurrentFarms( + #[derivative(Default(value = "NonZeroUsize::new(10).expect(\"10 > 0\")"))] + pub(crate) NonZeroUsize, + ); + + #[derive( + Debug, + Clone, + Derivative, + Deserialize, + Serialize, + PartialEq, + Eq, + From, + Deref, + DerefMut, + Display, + )] + #[derivative(Default)] + #[serde(transparent)] + pub struct PieceCacheSize( + #[derivative(Default(value = "ByteSize::mib(10)"))] pub(crate) ByteSize, + ); + + #[derive( + Debug, + Clone, + Derivative, + Deserialize, + Serialize, + PartialEq, + Eq, + From, + Deref, + DerefMut, + Display, + )] + #[derivative(Default)] + #[serde(transparent)] + pub struct ProvidedKeysLimit( + #[derivative(Default(value = "NonZeroUsize::new(655360).expect(\"655360 > 0\")"))] + pub(crate) NonZeroUsize, + ); + + /// Technical type which stores all + #[derive(Debug, Clone, Derivative, Builder, Serialize, Deserialize)] + #[derivative(Default)] + #[builder(pattern = "immutable", build_fn(private, name = "_build"), name = "Builder")] + #[non_exhaustive] + pub struct Config { + /// Number of farms that can be plotted concurrently, impacts RAM usage. + #[builder(default, setter(into))] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub max_concurrent_farms: MaxConcurrentFarms, + /// Number of farms that can be farmted concurrently, impacts RAM usage. + #[builder(default, setter(into))] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub provided_keys_limit: ProvidedKeysLimit, + /// Maximum number of pieces in single sector + #[builder(default)] + pub max_pieces_in_sector: Option, + /// Size of PER FARM thread pool used for farming (mostly for blocking + /// I/O, but also for some compute-intensive operations during + /// proving), defaults to number of logical CPUs + /// available on UMA system and number of logical CPUs in + /// first NUMA node on NUMA system. + #[builder(default)] + pub farming_thread_pool_size: Option, + /// Size of one thread pool used for plotting, defaults to number of + /// logical CPUs available on UMA system and number of logical + /// CPUs available in NUMA node on NUMA system. + /// + /// Number of thread pools is defined by `--sector-encoding-concurrency` + /// option, different thread pools might have different number + /// of threads if NUMA nodes do not have the same size. + /// + /// Threads will be pinned to corresponding CPU cores at creation. + #[builder(default)] + pub plotting_thread_pool_size: Option, + /// the plotting process, defaults to `--sector-downloading-concurrency` + /// + 1 to download future sector ahead of time + #[builder(default)] + pub sector_downloading_concurrency: Option, + /// Defines how many sectors farmer will encode concurrently, defaults + /// to 1 on UMA system and number of NUMA nodes on NUMA system. + /// It is further restricted by `sector_downloading_concurrency` + /// and setting this option higher than + /// `sector_downloading_concurrency` will have no effect. + #[builder(default)] + pub sector_encoding_concurrency: Option, + /// Threads will be pinned to corresponding CPU cores at creation. + #[builder(default)] + pub replotting_thread_pool_size: Option, + } + + impl Builder { + /// Get configuration for saving on disk + pub fn configuration(&self) -> Config { + self._build().expect("Build is infallible") + } + + /// Open and start farmer + pub async fn build( + self, + reward_address: PublicKey, + node: &N, + farms: &[FarmDescription], + cache_percentage: NonZeroU8, + ) -> Result, BuildError> { + self.configuration().build(reward_address, node, farms, cache_percentage).await + } + } +} + +/// Error when farm creation fails +#[derive(Debug, thiserror::Error)] +pub enum SingleDiskFarmCreationError { + /// Insufficient disk while creating single disk farm + #[error("Unable to create farm as Allocated space {} ({}) is not enough, minimum is ~{} (~{}, {} bytes to be exact", bytesize::to_string(*.allocated_space, true), bytesize::to_string(*.allocated_space, false), bytesize::to_string(*.min_space, true), bytesize::to_string(*.min_space, false), *.min_space)] + InsufficientSpaceForFarm { + /// Minimum space required for farm + min_space: u64, + /// Allocated space for farm + allocated_space: u64, + }, + /// Other error while creating single disk farm + #[error("Single disk farm creation error: {0}")] + Other(#[from] SingleDiskFarmError), +} + +/// Build Error +#[derive(Debug, thiserror::Error)] +pub enum BuildError { + /// Failed to create single disk farm + #[error("Single disk farm creation error: {0}")] + SingleDiskFarmCreate(#[from] SingleDiskFarmCreationError), + /// No farms were supplied during building + #[error("Supply at least one farm")] + NoFarmsSupplied, + /// Failed to fetch data from the node + #[error("Failed to fetch data from node: {0}")] + RPCError(#[source] subspace_farmer::RpcClientError), + /// Failed to build thread pool + #[error("Failed to build thread pool: {0}")] + ThreadPoolError(#[from] rayon::ThreadPoolBuildError), + /// Other error + #[error("{0}")] + Other(#[from] anyhow::Error), +} + +#[async_trait::async_trait] +impl sdk_traits::Farmer for Farmer { + type Table = T; + + async fn get_piece_by_index( + piece_index: PieceIndex, + piece_cache: &FarmerPieceCache, + weak_readers_and_pieces: &std::sync::Weak>>, + ) -> Option { + use tracing::debug; + + if let Some(piece) = + piece_cache.get_piece(RecordKey::from(piece_index.to_multihash())).await + { + return Some(piece); + } + + let weak_readers_and_pieces = weak_readers_and_pieces.clone(); + + debug!(?piece_index, "No piece in the cache. Trying archival storage..."); + + let readers_and_pieces = match weak_readers_and_pieces.upgrade() { + Some(readers_and_pieces) => readers_and_pieces, + None => { + debug!("A readers and pieces are already dropped"); + return None; + } + }; + let read_piece = match readers_and_pieces.lock().as_ref() { + Some(readers_and_pieces) => readers_and_pieces.read_piece(&piece_index), + None => { + debug!(?piece_index, "Readers and pieces are not initialized yet"); + return None; + } + }; + + match read_piece { + Some(fut) => fut.in_current_span().await, + None => None, + } + } +} + +const SEGMENT_COMMITMENTS_CACHE_SIZE: NonZeroUsize = + NonZeroUsize::new(1_000_000).expect("Not zero; qed"); + +async fn create_readers_and_pieces( + single_disk_farms: &[SingleDiskFarm], +) -> Result { + // Store piece readers so we can reference them later + let readers = single_disk_farms.iter().map(SingleDiskFarm::piece_reader).collect(); + let mut readers_and_pieces = ReadersAndPieces::new(readers); + + tracing::debug!("Collecting already plotted pieces"); + + let mut plotted_sectors_iters = futures::future::join_all( + single_disk_farms.iter().map(|single_disk_farm| single_disk_farm.plotted_sectors()), + ) + .await; + + plotted_sectors_iters.drain(..).enumerate().try_for_each( + |(disk_farm_index, plotted_sectors_iter)| { + let disk_farm_index = disk_farm_index.try_into().map_err(|_error| { + anyhow!( + "More than 256 farms are not supported, consider running multiple farmer \ + instances" + ) + })?; + + (0 as SectorIndex..).zip(plotted_sectors_iter).for_each( + |(sector_index, plotted_sector_result)| match plotted_sector_result { + Ok(plotted_sector) => { + readers_and_pieces.add_sector(disk_farm_index, &plotted_sector); + } + Err(error) => { + error!( + %error, + %disk_farm_index, + %sector_index, + "Failed reading plotted sector on startup, skipping" + ); + } + }, + ); + + Ok::<_, anyhow::Error>(()) + }, + )?; + + tracing::debug!("Finished collecting already plotted pieces"); + + Ok(readers_and_pieces) +} + +#[allow(clippy::too_many_arguments)] +fn handler_on_sector_plotted( + plotted_sector: &PlottedSector, + maybe_old_plotted_sector: &Option, + disk_farm_index: usize, + readers_and_pieces: Arc>>, +) { + let disk_farm_index = disk_farm_index + .try_into() + .expect("More than 256 farms are not supported, this is checked above already; qed"); + + { + let mut readers_and_pieces = readers_and_pieces.lock(); + let readers_and_pieces = + readers_and_pieces.as_mut().expect("Initial value was populated before; qed"); + + if let Some(old_plotted_sector) = maybe_old_plotted_sector { + readers_and_pieces.delete_sector(disk_farm_index, old_plotted_sector); + } + readers_and_pieces.add_sector(disk_farm_index, plotted_sector); + } +} + +impl Config { + /// Open and start farmer + pub async fn build( + self, + reward_address: PublicKey, + node: &N, + farms: &[FarmDescription], + cache_percentage: NonZeroU8, + ) -> Result, BuildError> { + if farms.is_empty() { + return Err(BuildError::NoFarmsSupplied); + } + + let mut destructors = DestructorSet::new("farmer-destructors"); + + let Self { + max_concurrent_farms: _, + provided_keys_limit: _, + max_pieces_in_sector, + farming_thread_pool_size, + plotting_thread_pool_size, + replotting_thread_pool_size, + sector_downloading_concurrency, + sector_encoding_concurrency, + } = self; + + let mut single_disk_farms = Vec::with_capacity(farms.len()); + let mut farm_info = HashMap::with_capacity(farms.len()); + + let readers_and_pieces = Arc::clone(&node.dsn().farmer_readers_and_pieces); + + let node_name = node.name().to_owned(); + + let peer_id = node.dsn().node.id(); + + let (farmer_piece_cache, farmer_piece_cache_worker) = + FarmerPieceCache::new(node.rpc().clone(), peer_id); + + let kzg = kzg::Kzg::new(kzg::embedded_kzg_settings()); + let erasure_coding = ErasureCoding::new( + NonZeroUsize::new(Record::NUM_S_BUCKETS.next_power_of_two().ilog2() as usize).expect( + "Number of buckets >= 1, therefore next power of 2 >= 2, therefore ilog2 >= 1", + ), + ) + .map_err(|error| anyhow::anyhow!("Failed to create erasure coding for farm: {error}"))?; + + let piece_provider = subspace_networking::utils::piece_provider::PieceProvider::new( + node.dsn().node.clone(), + Some(SegmentCommitmentPieceValidator::new( + node.dsn().node.clone(), + node.rpc().clone(), + kzg.clone(), + // TODO: Consider introducing and using global in-memory segment commitments cache + parking_lot::Mutex::new(lru::LruCache::new(SEGMENT_COMMITMENTS_CACHE_SIZE)), + )), + ); + let farmer_piece_getter = Arc::new(FarmerPieceGetter::new( + node.dsn().node.clone(), + piece_provider, + farmer_piece_cache.clone(), + node.rpc().clone(), + readers_and_pieces.clone(), + )); + + let (piece_cache_worker_drop_sender, piece_cache_worker_drop_receiver) = + oneshot::channel::<()>(); + let farmer_piece_cache_worker_join_handle = sdk_utils::task_spawn_blocking( + format!("sdk-farmer-{node_name}-pieces-cache-worker"), + { + let handle = tokio::runtime::Handle::current(); + let piece_getter = farmer_piece_getter.clone(); + + move || { + handle.block_on(future::select( + Box::pin({ + let piece_getter = piece_getter.clone(); + farmer_piece_cache_worker.run(piece_getter) + }), + piece_cache_worker_drop_receiver, + )); + } + }, + ); + + destructors.add_async_destructor({ + async move { + let _ = piece_cache_worker_drop_sender.send(()); + farmer_piece_cache_worker_join_handle.await.expect( + "awaiting worker should not fail except panic by the worker itself; qed", + ); + } + })?; + + let farmer_app_info = subspace_farmer::NodeClient::farmer_app_info(node.rpc()) + .await + .expect("Node is always reachable"); + + let max_pieces_in_sector = match max_pieces_in_sector { + Some(m) => m, + None => farmer_app_info.protocol_info.max_pieces_in_sector, + }; + + let mut plotting_delay_senders = Vec::with_capacity(farms.len()); + + let plotting_thread_pool_core_indices = + thread_pool_core_indices(plotting_thread_pool_size, sector_encoding_concurrency); + let replotting_thread_pool_core_indices = { + let mut replotting_thread_pool_core_indices = + thread_pool_core_indices(replotting_thread_pool_size, sector_encoding_concurrency); + if replotting_thread_pool_size.is_none() { + // The default behavior is to use all CPU cores, but for replotting we just want + // half + replotting_thread_pool_core_indices + .iter_mut() + .for_each(|set| set.truncate(set.cpu_cores().len() / 2)); + } + replotting_thread_pool_core_indices + }; + + let downloading_semaphore = Arc::new(Semaphore::new( + sector_downloading_concurrency + .map(|sector_downloading_concurrency| sector_downloading_concurrency.get()) + .unwrap_or(plotting_thread_pool_core_indices.len() + 1), + )); + + let all_cpu_cores = all_cpu_cores(); + let plotting_thread_pool_manager = create_plotting_thread_pool_manager( + plotting_thread_pool_core_indices.into_iter().zip(replotting_thread_pool_core_indices), + )?; + let farming_thread_pool_size = farming_thread_pool_size + .map(|farming_thread_pool_size| farming_thread_pool_size.get()) + .unwrap_or_else(|| { + all_cpu_cores + .first() + .expect("Not empty according to function description; qed") + .cpu_cores() + .len() + }); + + if all_cpu_cores.len() > 1 { + info!(numa_nodes = %all_cpu_cores.len(), "NUMA system detected"); + + if all_cpu_cores.len() > farms.len() { + warn!( + numa_nodes = %all_cpu_cores.len(), + farms_count = %farms.len(), + "Too few disk farms, CPU will not be utilized fully during plotting, same number of farms as NUMA \ + nodes or more is recommended" + ); + } + } + + // TODO: Remove code or environment variable once identified whether it helps or + // not + if std::env::var("NUMA_ALLOCATOR").is_ok() && all_cpu_cores.len() > 1 { + unsafe { + libmimalloc_sys::mi_option_set( + libmimalloc_sys::mi_option_use_numa_nodes, + all_cpu_cores.len() as std::ffi::c_long, + ); + } + } + + for (disk_farm_idx, description) in farms.iter().enumerate() { + let (plotting_delay_sender, plotting_delay_receiver) = + futures::channel::oneshot::channel(); + plotting_delay_senders.push(plotting_delay_sender); + + let (farm, single_disk_farm) = Farm::new(FarmOptions { + disk_farm_idx, + cache_percentage, + reward_address, + node, + max_pieces_in_sector, + piece_getter: Arc::clone(&farmer_piece_getter), + description, + kzg: kzg.clone(), + erasure_coding: erasure_coding.clone(), + farming_thread_pool_size, + plotting_delay: Some(plotting_delay_receiver), + downloading_semaphore: Arc::clone(&downloading_semaphore), + plotting_thread_pool_manager: plotting_thread_pool_manager.clone(), + }) + .await?; + + farm_info.insert(farm.directory.clone(), farm); + single_disk_farms.push(single_disk_farm); + } + + *node.dsn().farmer_piece_cache.write() = Some(farmer_piece_cache.clone()); + destructors.add_sync_destructor({ + let piece_cache = Arc::clone(&node.dsn().farmer_piece_cache); + move || { + piece_cache.write().take(); + } + })?; + + let cache_acknowledgement_receiver = farmer_piece_cache + .replace_backing_caches( + single_disk_farms + .iter() + .map(|single_disk_farm| single_disk_farm.piece_cache()) + .collect(), + ) + .await; + drop(farmer_piece_cache); + + let (plotting_delay_task_drop_sender, plotting_delay_task_drop_receiver) = + oneshot::channel::<()>(); + let plotting_delay_task_join_handle = sdk_utils::task_spawn_blocking( + format!("sdk-farmer-{node_name}-plotting-delay-task"), + { + let handle = tokio::runtime::Handle::current(); + + move || { + handle.block_on(future::select( + Box::pin(async { + if cache_acknowledgement_receiver.await.is_ok() { + for plotting_delay_sender in plotting_delay_senders { + // Doesn't matter if receiver is gone + let _ = plotting_delay_sender.send(()); + } + } + }), + plotting_delay_task_drop_receiver, + )); + } + }, + ); + + destructors.add_async_destructor({ + async move { + let _ = plotting_delay_task_drop_sender.send(()); + plotting_delay_task_join_handle.await.expect( + "awaiting worker should not fail except panic by the worker itself; qed", + ); + } + })?; + + let readers_and_pieces_instance = create_readers_and_pieces(&single_disk_farms).await?; + readers_and_pieces.lock().replace(readers_and_pieces_instance); + destructors.add_sync_destructor({ + let farmer_reader_and_pieces = node.dsn().farmer_readers_and_pieces.clone(); + move || { + farmer_reader_and_pieces.lock().take(); + } + })?; + + let mut sector_plotting_handler_ids = vec![]; + for (disk_farm_index, single_disk_farm) in single_disk_farms.iter().enumerate() { + let readers_and_pieces = Arc::clone(&readers_and_pieces); + let span = tracing::info_span!("farm", %disk_farm_index); + + // Collect newly plotted pieces + // TODO: Once we have replotting, this will have to be updated + sector_plotting_handler_ids.push(single_disk_farm.on_sector_plotted(Arc::new( + move |(plotted_sector, maybe_old_plotted_sector)| { + let _span_guard = span.enter(); + handler_on_sector_plotted( + plotted_sector, + maybe_old_plotted_sector, + disk_farm_index, + readers_and_pieces.clone(), + ) + }, + ))); + } + + let mut single_disk_farms_stream = + single_disk_farms.into_iter().map(SingleDiskFarm::run).collect::>(); + + let (farm_driver_drop_sender, mut farm_driver_drop_receiver) = oneshot::channel::<()>(); + let (farm_driver_result_sender, farm_driver_result_receiver) = + mpsc::channel::<_>(u8::MAX as usize + 1); + + let farm_driver_join_handle = + sdk_utils::task_spawn_blocking(format!("sdk-farmer-{node_name}-farms-driver"), { + let handle = tokio::runtime::Handle::current(); + + move || { + use future::Either::*; + + loop { + let result = handle.block_on(future::select( + single_disk_farms_stream.next(), + &mut farm_driver_drop_receiver, + )); + + match result { + Left((maybe_result, _)) => { + let send_result = match maybe_result { + None => farm_driver_result_sender + .try_send(Ok(TaskOutput::Value(None))), + Some(result) => match result { + Ok(single_disk_farm_id) => farm_driver_result_sender + .try_send(Ok(TaskOutput::Value(Some( + single_disk_farm_id, + )))), + Err(e) => farm_driver_result_sender.try_send(Err(e)), + }, + }; + + // Receiver is closed which would mean we are shutting down + if send_result.is_err() { + break; + } + } + Right((_, _)) => { + warn!("Received drop signal for farm driver, exiting..."); + let _ = + farm_driver_result_sender.try_send(Ok(TaskOutput::Cancelled( + "Received drop signal for farm driver".into(), + ))); + break; + } + }; + } + } + }); + + destructors.add_async_destructor({ + async move { + let _ = farm_driver_drop_sender.send(()); + farm_driver_join_handle.await.expect("joining should not fail; qed"); + } + })?; + + for handler_id in sector_plotting_handler_ids.drain(..) { + destructors.add_items_to_drop(handler_id)?; + } + + tracing::debug!("Started farmer"); + + Ok(Farmer { + reward_address, + farm_info, + result_receiver: Some(farm_driver_result_receiver), + node_name, + app_info: subspace_farmer::NodeClient::farmer_app_info(node.rpc()) + .await + .expect("Node is always reachable"), + _destructors: destructors, + }) + } +} + +type ResultReceiver = mpsc::Receiver, String>>>; + +/// Farmer structure +#[derive(Derivative)] +#[derivative(Debug)] +#[must_use = "Farmer should be closed"] +pub struct Farmer { + reward_address: PublicKey, + farm_info: HashMap>, + result_receiver: Option, + node_name: String, + app_info: FarmerAppInfo, + _destructors: DestructorSet, +} + +/// Info about some farm +#[derive(Debug)] +#[non_exhaustive] +// TODO: Should it be versioned? +pub struct FarmInfo { + /// ID of the farm + pub id: SingleDiskFarmId, + /// Genesis hash of the chain used for farm creation + pub genesis_hash: [u8; 32], + /// Public key of identity used for farm creation + pub public_key: PublicKey, + /// How much space in bytes is allocated for this farm + pub allocated_space: ByteSize, + /// How many pieces are in sector + pub pieces_in_sector: u16, +} + +impl From for FarmInfo { + fn from(info: SingleDiskFarmInfo) -> Self { + let SingleDiskFarmInfo::V0 { + id, + genesis_hash, + public_key, + allocated_space, + pieces_in_sector, + } = info; + Self { + id, + genesis_hash, + public_key: PublicKey(public_key), + allocated_space: ByteSize::b(allocated_space), + pieces_in_sector, + } + } +} + +/// Farmer info +#[derive(Debug)] +#[non_exhaustive] +pub struct Info { + /// Version of the farmer + pub version: String, + /// Reward address of our farmer + pub reward_address: PublicKey, + // TODO: add dsn peers info + // pub dsn_peers: u64, + /// Info about each farm + pub farms_info: HashMap, + /// Sector size in bits + pub sector_size: u64, +} + +/// Initial plotting progress +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct InitialPlottingProgress { + /// Number of sectors from which we started plotting + pub starting_sector: u64, + /// Current number of sectors + pub current_sector: u64, + /// Total number of sectors on disk + pub total_sectors: u64, +} + +/// Progress data received from sender, used to monitor plotting progress +pub type ProgressData = Option<(PlottedSector, Option)>; + +/// Farm structure +#[derive(Debug)] +pub struct Farm { + directory: PathBuf, + progress: watch::Receiver, + solutions: watch::Receiver>, + initial_plotting_progress: Arc>, + allocated_space: u64, + _destructors: DestructorSet, + _table: std::marker::PhantomData, +} + +#[pin_project::pin_project] +struct InitialPlottingProgressStreamInner { + last_initial_plotting_progress: InitialPlottingProgress, + #[pin] + stream: S, +} + +impl Stream for InitialPlottingProgressStreamInner +where + S: Stream, +{ + type Item = InitialPlottingProgress; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let this = self.project(); + match this.stream.poll_next(cx) { + result @ std::task::Poll::Ready(Some(progress)) => { + *this.last_initial_plotting_progress = progress; + result + } + result => result, + } + } + + fn size_hint(&self) -> (usize, Option) { + let left = self.last_initial_plotting_progress.total_sectors + - self.last_initial_plotting_progress.current_sector; + (left as usize, Some(left as usize)) + } +} + +/// Initial plotting progress stream +#[pin_project::pin_project] +pub struct InitialPlottingProgressStream { + #[pin] + boxed_stream: + std::pin::Pin + Send + Sync + Unpin>>, +} + +impl Stream for InitialPlottingProgressStream { + type Item = InitialPlottingProgress; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.project().boxed_stream.poll_next(cx) + } + + fn size_hint(&self) -> (usize, Option) { + self.boxed_stream.size_hint() + } +} + +struct FarmOptions<'a, PG, N: sdk_traits::Node> { + pub disk_farm_idx: usize, + pub cache_percentage: NonZeroU8, + pub reward_address: PublicKey, + pub node: &'a N, + pub piece_getter: PG, + pub description: &'a FarmDescription, + pub kzg: kzg::Kzg, + pub erasure_coding: ErasureCoding, + pub max_pieces_in_sector: u16, + pub farming_thread_pool_size: usize, + pub plotting_delay: Option>, + pub downloading_semaphore: Arc, + pub plotting_thread_pool_manager: PlottingThreadPoolManager, +} + +impl Farm { + async fn new( + FarmOptions { + disk_farm_idx, + cache_percentage, + reward_address, + node, + piece_getter, + description, + kzg, + erasure_coding, + max_pieces_in_sector, + farming_thread_pool_size, + plotting_delay, + downloading_semaphore, + plotting_thread_pool_manager, + }: FarmOptions< + '_, + impl subspace_farmer_components::plotting::PieceGetter + Clone + Send + Sync + 'static, + impl sdk_traits::Node, + >, + ) -> Result<(Self, SingleDiskFarm), BuildError> { + let directory = description.directory.clone(); + let allocated_space = description.space_pledged.as_u64(); + let farmer_app_info = subspace_farmer::NodeClient::farmer_app_info(node.rpc()) + .await + .expect("Node is always reachable"); + + let description = SingleDiskFarmOptions { + allocated_space, + directory: directory.clone(), + farmer_app_info, + max_pieces_in_sector, + reward_address: *reward_address, + node_client: node.rpc().clone(), + kzg, + erasure_coding, + piece_getter, + cache_percentage, + downloading_semaphore, + farm_during_initial_plotting: false, + farming_thread_pool_size, + plotting_thread_pool_manager, + plotting_delay, + }; + let single_disk_farm_fut = SingleDiskFarm::new::<_, _, T>(description, disk_farm_idx); + let single_disk_farm = match single_disk_farm_fut.await { + Ok(single_disk_farm) => single_disk_farm, + Err(SingleDiskFarmError::InsufficientAllocatedSpace { min_space, allocated_space }) => { + return Err(BuildError::SingleDiskFarmCreate( + SingleDiskFarmCreationError::InsufficientSpaceForFarm { + min_space, + allocated_space, + }, + )); + } + Err(error) => { + return Err(BuildError::SingleDiskFarmCreate(SingleDiskFarmCreationError::Other( + error, + ))); + } + }; + let mut destructors = DestructorSet::new_without_async("farm-destructors"); + + let progress = { + let (sender, receiver) = watch::channel::>(None); + destructors.add_items_to_drop(single_disk_farm.on_sector_plotted(Arc::new( + move |sector| { + let _ = sender.send(Some(sector.clone())); + }, + )))?; + receiver + }; + let solutions = { + let (sender, receiver) = watch::channel::>(None); + destructors.add_items_to_drop(single_disk_farm.on_solution(Arc::new( + move |solution| { + let _ = sender.send(Some(solution.clone())); + }, + )))?; + receiver + }; + + // TODO: This calculation is directly imported from the monorepo and relies on + // internal calculation of farm. Remove it once we have public function. + let fixed_space_usage = 2 * 1024 * 1024 + + Identity::file_size() as u64 + + KnownPeersManager::file_size(KNOWN_PEERS_CACHE_SIZE) as u64; + // Calculate how many sectors can fit + let target_sector_count = { + let potentially_plottable_space = allocated_space.saturating_sub(fixed_space_usage) + / 100 + * (100 - u64::from(cache_percentage.get())); + // Do the rounding to make sure we have exactly as much space as fits whole + // number of sectors + potentially_plottable_space + / (sector_size(max_pieces_in_sector) + SectorMetadataChecksummed::encoded_size()) + as u64 + }; + + Ok(( + Self { + directory: directory.clone(), + allocated_space, + progress, + solutions, + initial_plotting_progress: Arc::new(Mutex::new(InitialPlottingProgress { + starting_sector: u64::try_from(single_disk_farm.plotted_sectors_count().await) + .expect("Sector count is less than u64::MAX"), + current_sector: u64::try_from(single_disk_farm.plotted_sectors_count().await) + .expect("Sector count is less than u64::MAX"), + total_sectors: target_sector_count, + })), + _destructors: destructors, + _table: Default::default(), + }, + single_disk_farm, + )) + } + + /// Farm location + pub fn directory(&self) -> &PathBuf { + &self.directory + } + + /// Farm size + pub fn allocated_space(&self) -> ByteSize { + ByteSize::b(self.allocated_space) + } + + /// Will return a stream of initial plotting progress which will end once we + /// finish plotting + pub async fn subscribe_initial_plotting_progress(&self) -> InitialPlottingProgressStream { + let initial = *self.initial_plotting_progress.lock().await; + if initial.current_sector == initial.total_sectors { + return InitialPlottingProgressStream { + boxed_stream: Box::pin(futures::stream::iter(None)), + }; + } + + let stream = tokio_stream::wrappers::WatchStream::new(self.progress.clone()) + .filter_map({ + let initial_plotting_progress = Arc::clone(&self.initial_plotting_progress); + move |_| { + let initial_plotting_progress = Arc::clone(&initial_plotting_progress); + async move { + let mut guard = initial_plotting_progress.lock().await; + let plotting_progress = *guard; + guard.current_sector += 1; + Some(plotting_progress) + } + } + }) + .take_while(|InitialPlottingProgress { current_sector, total_sectors, .. }| { + futures::future::ready(current_sector < total_sectors) + }); + let last_initial_plotting_progress = *self.initial_plotting_progress.lock().await; + + InitialPlottingProgressStream { + boxed_stream: Box::pin(Box::pin(InitialPlottingProgressStreamInner { + stream, + last_initial_plotting_progress, + })), + } + } + + /// New solution subscription + pub async fn subscribe_new_solutions( + &self, + ) -> impl Stream + Send + Sync + Unpin { + tokio_stream::wrappers::WatchStream::new(self.solutions.clone()) + .filter_map(futures::future::ready) + } +} + +impl Farmer { + /// Farmer builder + pub fn builder() -> Builder { + Builder::default() + } + + /// Gets farm info + pub async fn get_info(&self) -> anyhow::Result { + let farms_info = tokio::task::spawn_blocking({ + let dirs = self.farm_info.keys().cloned().collect::>(); + || dirs.into_iter().map(SingleDiskFarm::collect_summary).collect::>() + }) + .await? + .into_iter() + .map(|summary| match summary { + SingleDiskFarmSummary::Found { info, directory } => Ok((directory, info.into())), + SingleDiskFarmSummary::NotFound { directory } => + Err(anyhow::anyhow!("Didn't found farm at `{directory:?}'")), + SingleDiskFarmSummary::Error { directory, error } => + Err(error).context(format!("Failed to get farm summary at `{directory:?}'")), + }) + .collect::>()?; + + Ok(Info { + farms_info, + version: format!("{}-{}", env!("CARGO_PKG_VERSION"), env!("GIT_HASH")), + reward_address: self.reward_address, + sector_size: subspace_farmer_components::sector::sector_size( + self.app_info.protocol_info.max_pieces_in_sector, + ) as _, + }) + } + + /// Iterate over farms + pub async fn iter_farms(&'_ self) -> impl Iterator> + '_ { + self.farm_info.values() + } + + /// Stops farming, closes farms, and sends signal to the node + pub async fn close(mut self) -> anyhow::Result<()> { + self._destructors.async_drop().await?; + let mut result_receiver = self.result_receiver.take().expect("Handle is always there"); + result_receiver.close(); + while let Some(task_result) = result_receiver.recv().await { + let output = task_result?; + match output { + TaskOutput::Value(_) => {} + TaskOutput::Cancelled(reason) => { + warn!("Farm driver was cancelled due to reason: {:?}", reason); + } + } + } + + Ok(()) + } +} diff --git a/sdk/node/Cargo.toml b/sdk/node/Cargo.toml new file mode 100644 index 00000000..65e303e9 --- /dev/null +++ b/sdk/node/Cargo.toml @@ -0,0 +1,67 @@ +[package] +name = "sdk-node" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1" +backoff = "0.4" +cross-domain-message-gossip = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +derivative = "2.2.0" +derive_builder = "0.12" +derive_more = "0.99" +domain-client-message-relayer = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +domain-client-operator = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +domain-eth-service = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +domain-runtime-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +domain-service = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +evm-domain-runtime = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +fp-evm = { version = "3.0.0-dev", git = "https://github.com/subspace/frontier", rev = "37ee45323120b21adc1d69ae7348bd0f7282eeae" } +frame-system = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +futures = "0.3" +hex-literal = "0.4" +pallet-rewards = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +pallet-subspace = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +parity-scale-codec = "3.6.3" +parking_lot = "0.12" +pin-project = "1" +sc-client-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-consensus-slots = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-consensus-subspace = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +sc-executor = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-network = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-network-sync = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-rpc-api = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-service = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", default-features = false } +sc-storage-monitor = { version = "0.1.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", default-features = false } +sc-subspace-chain-specs = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +sc-telemetry = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-transaction-pool-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-utils = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sdk-dsn = { path = "../dsn" } +sdk-substrate = { path = "../substrate" } +sdk-traits = { path = "../traits" } +sdk-utils = { path = "../utils" } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +sp-blockchain = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sp-consensus = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sp-consensus-subspace = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +sp-core = { version = "21.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sp-domains = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +sp-domains-fraud-proof = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +sp-messenger = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sp-version = { version = "22.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +subspace-core-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e", default-features = false } +subspace-farmer-components = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-networking = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-rpc-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-runtime = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-runtime-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-service = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +tokio = { version = "1.34.0", features = ["fs", "rt", "tracing", "macros", "parking_lot", "rt-multi-thread", "signal"] } +tokio-stream = { version = "0.1", features = ["sync", "time"] } +tracing = "0.1" diff --git a/sdk/node/src/builder.rs b/sdk/node/src/builder.rs new file mode 100644 index 00000000..cfbe3cbc --- /dev/null +++ b/sdk/node/src/builder.rs @@ -0,0 +1,169 @@ +use std::collections::HashSet; +use std::num::NonZeroUsize; +use std::path::Path; + +use derivative::Derivative; +use derive_builder::Builder; +use derive_more::{Deref, DerefMut, Display, From}; +use sc_service::BlocksPruning; +use sdk_dsn::{Dsn, DsnBuilder}; +use sdk_substrate::{ + Base, BaseBuilder, NetworkBuilder, OffchainWorkerBuilder, PruningMode, Role, RpcBuilder, + StorageMonitor, +}; +use sdk_utils::ByteSize; +use serde::{Deserialize, Serialize}; + +use super::{ChainSpec, Farmer, Node}; +use crate::domains::builder::DomainConfig; + +/// Wrapper with default value for piece cache size +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +/// Size of cache of pieces that node produces +/// TODO: Set it to 1 GB once DSN is fixed +pub struct PieceCacheSize(#[derivative(Default(value = "ByteSize::gib(3)"))] pub(crate) ByteSize); + +/// Wrapper with default value for segment publish concurrent jobs +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct SegmentPublishConcurrency( + #[derivative(Default(value = "NonZeroUsize::new(10).expect(\"10 > 0\")"))] + pub(crate) NonZeroUsize, +); + +/// Node builder +#[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq)] +#[derivative(Default(bound = ""))] +#[builder(pattern = "owned", build_fn(private, name = "_build"), name = "Builder")] +#[non_exhaustive] +pub struct Config { + /// Max number of segments that can be published concurrently, impacts + /// RAM usage and network bandwidth. + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub segment_publish_concurrency: SegmentPublishConcurrency, + /// Should we sync blocks from the DSN? + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub sync_from_dsn: bool, + #[doc(hidden)] + #[builder( + setter(into, strip_option), + field(type = "BaseBuilder", build = "self.base.build()") + )] + #[serde(flatten, skip_serializing_if = "sdk_utils::is_default")] + pub base: Base, + /// DSN settings + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub dsn: Dsn, + /// Storage monitor settings + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub storage_monitor: Option, + /// Enables subspace block relayer + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub enable_subspace_block_relay: bool, + #[builder(setter(skip), default)] + #[serde(skip, default)] + _farmer: std::marker::PhantomData, + /// Optional domain configuration + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub domain: Option, + /// Flag indicating if the node is authority for Proof of time consensus + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub is_timekeeper: bool, + /// CPU cores that timekeeper can use + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub timekeeper_cpu_cores: HashSet, + /// Proof of time entropy + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub pot_external_entropy: Option>, +} + +impl Config { + /// Dev configuraiton + pub fn dev() -> Builder { + Builder::dev() + } + + /// Gemini 3g configuraiton + pub fn gemini_3g() -> Builder { + Builder::gemini_3g() + } + + /// Devnet configuraiton + pub fn devnet() -> Builder { + Builder::devnet() + } +} + +impl Builder { + /// Dev chain configuration + pub fn dev() -> Self { + Self::new() + .is_timekeeper(true) + .force_authoring(true) + .network(NetworkBuilder::dev()) + .dsn(DsnBuilder::dev()) + .rpc(RpcBuilder::dev()) + .offchain_worker(OffchainWorkerBuilder::dev()) + } + + /// Gemini 3g configuration + pub fn gemini_3g() -> Self { + Self::new() + .network(NetworkBuilder::gemini_3g()) + .dsn(DsnBuilder::gemini_3g()) + .rpc(RpcBuilder::gemini_3g()) + .offchain_worker(OffchainWorkerBuilder::gemini_3g()) + .role(Role::Authority) + .state_pruning(PruningMode::ArchiveAll) + .blocks_pruning(BlocksPruning::Some(256)) + } + + /// Devnet chain configuration + pub fn devnet() -> Self { + Self::new() + .network(NetworkBuilder::devnet()) + .dsn(DsnBuilder::devnet()) + .rpc(RpcBuilder::devnet()) + .offchain_worker(OffchainWorkerBuilder::devnet()) + .role(Role::Authority) + .state_pruning(PruningMode::ArchiveAll) + .blocks_pruning(BlocksPruning::Some(256)) + } + + /// Get configuration for saving on disk + pub fn configuration(self) -> Config { + self._build().expect("Build is infallible") + } + + /// New builder + pub fn new() -> Self { + Self::default() + } + + /// Start a node with supplied parameters + pub async fn build( + self, + directory: impl AsRef, + chain_spec: ChainSpec, + ) -> anyhow::Result> { + self.configuration().build(directory, chain_spec).await + } +} + +sdk_substrate::derive_base!( @ Base => Builder); diff --git a/sdk/node/src/chain_spec.rs b/sdk/node/src/chain_spec.rs new file mode 100644 index 00000000..9f6140b9 --- /dev/null +++ b/sdk/node/src/chain_spec.rs @@ -0,0 +1,500 @@ +//! Subspace chain configurations. + +use std::collections::BTreeSet; +use std::marker::PhantomData; +use std::num::NonZeroU32; + +use hex_literal::hex; +use parity_scale_codec::Encode; +use sc_service::{ChainType, NoExtension}; +use sc_subspace_chain_specs::{SerializableChainSpec, DEVNET_CHAIN_SPEC, GEMINI_3G_CHAIN_SPEC}; +use sc_telemetry::TelemetryEndpoints; +use sdk_utils::chain_spec as utils; +use sdk_utils::chain_spec::{chain_spec_properties, get_public_key_from_seed}; +use sp_consensus_subspace::FarmerPublicKey; +use sp_core::crypto::{Ss58Codec, UncheckedFrom}; +use sp_domains::storage::RawGenesis; +use sp_domains::{OperatorAllowList, OperatorPublicKey, RuntimeType}; +use sp_runtime::{BuildStorage, Percent}; +use subspace_core_primitives::PotKey; +use subspace_runtime::{ + AllowAuthoringBy, BalancesConfig, DomainsConfig, MaxDomainBlockSize, MaxDomainBlockWeight, + RuntimeConfigsConfig, RuntimeGenesisConfig, SubspaceConfig, SudoConfig, SystemConfig, + VestingConfig, MILLISECS_PER_BLOCK, WASM_BINARY, +}; +use subspace_runtime_primitives::{AccountId, Balance, BlockNumber, SSC}; + +use crate::domains::evm_chain_spec; +use crate::domains::evm_chain_spec::SpecId; + +const SUBSPACE_TELEMETRY_URL: &str = "wss://telemetry.subspace.network/submit/"; + +/// List of accounts which should receive token grants, amounts are specified in +/// SSC. +const TOKEN_GRANTS: &[(&str, u128)] = &[ + ("5Dns1SVEeDqnbSm2fVUqHJPCvQFXHVsgiw28uMBwmuaoKFYi", 3_000_000), + ("5DxtHHQL9JGapWCQARYUAWj4yDcwuhg9Hsk5AjhEzuzonVyE", 1_500_000), + ("5EHhw9xuQNdwieUkNoucq2YcateoMVJQdN8EZtmRy3roQkVK", 133_333), + ("5C5qYYCQBnanGNPGwgmv6jiR2MxNPrGnWYLPFEyV1Xdy2P3x", 178_889), + ("5GBWVfJ253YWVPHzWDTos1nzYZpa9TemP7FpQT9RnxaFN6Sz", 350_000), + ("5F9tEPid88uAuGbjpyegwkrGdkXXtaQ9sGSWEnYrfVCUCsen", 111_111), + ("5DkJFCv3cTBsH5y1eFT94DXMxQ3EmVzYojEA88o56mmTKnMp", 244_444), + ("5G23o1yxWgVNQJuL4Y9UaCftAFvLuMPCRe7BCARxCohjoHc9", 311_111), + ("5GhHwuJoK1b7uUg5oi8qUXxWHdfgzv6P5CQSdJ3ffrnPRgKM", 317_378), + ("5EqBwtqrCV427xCtTsxnb9X2Qay39pYmKNk9wD9Kd62jLS97", 300_000), + ("5D9pNnGCiZ9UqhBQn5n71WFVaRLvZ7znsMvcZ7PHno4zsiYa", 600_000), + ("5DXfPcXUcP4BG8LBSkJDrfFNApxjWySR6ARfgh3v27hdYr5S", 430_000), + ("5CXSdDJgzRTj54f9raHN2Z5BNPSMa2ETjqCTUmpaw3ECmwm4", 330_000), + ("5DqKxL7bQregQmUfFgzTMfRKY4DSvA1KgHuurZWYmxYSCmjY", 200_000), + ("5CfixiS93yTwHQbzzfn8P2tMxhKXdTx7Jam9htsD7XtiMFtn", 27_800), + ("5FZe9YzXeEXe7sK5xLR8yCmbU8bPJDTZpNpNbToKvSJBUiEo", 18_067), + ("5FZwEgsvZz1vpeH7UsskmNmTpbfXvAcojjgVfShgbRqgC1nx", 27_800), +]; + +/// Additional subspace specific genesis parameters. +pub struct GenesisParams { + enable_rewards: bool, + enable_storage_access: bool, + allow_authoring_by: AllowAuthoringBy, + pot_slot_iterations: NonZeroU32, + enable_domains: bool, + enable_balance_transfers: bool, + confirmation_depth_k: u32, +} + +struct GenesisDomainParams { + domain_name: String, + operator_allow_list: OperatorAllowList, + operator_signing_key: OperatorPublicKey, +} + +/// Chain spec type for the subspace +pub type ChainSpec = SerializableChainSpec; + +/// Gemini 3g chain spec +pub fn gemini_3g() -> ChainSpec { + ChainSpec::from_json_bytes(GEMINI_3G_CHAIN_SPEC.as_bytes()).expect("Always valid") +} + +/// Gemini 3g compiled chain spec +pub fn gemini_3g_compiled() -> ChainSpec { + ChainSpec::from_genesis( + // Name + "Subspace Gemini 3g", + // ID + "subspace_gemini_3g", + ChainType::Custom("Subspace Gemini 3g".to_string()), + || { + let sudo_account = + AccountId::from_ss58check("5DNwQTHfARgKoa2NdiUM51ZUow7ve5xG9S2yYdSbVQcnYxBA") + .expect("Wrong root account address"); + + let mut balances = vec![(sudo_account.clone(), 1_000 * SSC)]; + let vesting_schedules = TOKEN_GRANTS + .iter() + .flat_map(|&(account_address, amount)| { + let account_id = AccountId::from_ss58check(account_address) + .expect("Wrong vesting account address"); + let amount: Balance = amount * SSC; + + // TODO: Adjust start block to real value before mainnet launch + let start_block = 100_000_000; + let one_month_in_blocks = + u32::try_from(3600 * 24 * 30 * MILLISECS_PER_BLOCK / 1000) + .expect("One month of blocks always fits in u32; qed"); + + // Add balance so it can be locked + balances.push((account_id.clone(), amount)); + + [ + // 1/4 of tokens are released after 1 year. + (account_id.clone(), start_block, one_month_in_blocks * 12, 1, amount / 4), + // 1/48 of tokens are released every month after that for 3 more years. + ( + account_id, + start_block + one_month_in_blocks * 12, + one_month_in_blocks, + 36, + amount / 48, + ), + ] + }) + .collect::>(); + subspace_genesis_config( + SpecId::Gemini, + WASM_BINARY.expect("Wasm binary must be built for Gemini"), + sudo_account.clone(), + balances, + vesting_schedules, + GenesisParams { + enable_rewards: false, + enable_storage_access: false, + allow_authoring_by: AllowAuthoringBy::RootFarmer( + FarmerPublicKey::unchecked_from(hex_literal::hex!( + "8aecbcf0b404590ddddc01ebacb205a562d12fdb5c2aa6a4035c1a20f23c9515" + )), + ), + // TODO: Adjust once we bench PoT on faster hardware + // About 1s on 6.0 GHz Raptor Lake CPU (14900K) + pot_slot_iterations: NonZeroU32::new(200_032_000).expect("Not zero; qed"), + enable_domains: true, + enable_balance_transfers: true, + confirmation_depth_k: 100, // TODO: Proper value here + }, + GenesisDomainParams { + domain_name: "nova".to_owned(), + operator_allow_list: OperatorAllowList::Operators(BTreeSet::from_iter(vec![ + sudo_account, + ])), + operator_signing_key: OperatorPublicKey::unchecked_from(hex!( + "aa3b05b4d649666723e099cf3bafc2f2c04160ebe0e16ddc82f72d6ed97c4b6b" + )), + }, + ) + }, + // Bootnodes + vec![], + // Telemetry + Some( + TelemetryEndpoints::new(vec![(SUBSPACE_TELEMETRY_URL.into(), 1)]) + .expect("Telemetry value is valid"), + ), + // Protocol ID + Some("subspace-gemini-3g"), + None, + // Properties + Some({ + let mut properties = chain_spec_properties(); + properties.insert( + "potExternalEntropy".to_string(), + serde_json::to_value(None::).expect("Serialization is not infallible; qed"), + ); + properties + }), + // Extensions + NoExtension::None, + ) +} + +/// Dev net raw configuration +pub fn devnet_config() -> ChainSpec { + ChainSpec::from_json_bytes(DEVNET_CHAIN_SPEC.as_bytes()).expect("Always valid") +} + +/// Dev net compiled configuration +pub fn devnet_config_compiled() -> ChainSpec { + ChainSpec::from_genesis( + // Name + "Subspace Dev network", + // ID + "subspace_devnet", + ChainType::Custom("Testnet".to_string()), + || { + let sudo_account = + AccountId::from_ss58check("5CXTmJEusve5ixyJufqHThmy4qUrrm6FyLCR7QfE4bbyMTNC") + .expect("Wrong root account address"); + + let mut balances = vec![(sudo_account.clone(), 1_000 * SSC)]; + let vesting_schedules = TOKEN_GRANTS + .iter() + .flat_map(|&(account_address, amount)| { + let account_id = AccountId::from_ss58check(account_address) + .expect("Wrong vesting account address"); + let amount: Balance = amount * SSC; + + // TODO: Adjust start block to real value before mainnet launch + let start_block = 100_000_000; + let one_month_in_blocks = + u32::try_from(3600 * 24 * 30 * MILLISECS_PER_BLOCK / 1000) + .expect("One month of blocks always fits in u32; qed"); + + // Add balance so it can be locked + balances.push((account_id.clone(), amount)); + + [ + // 1/4 of tokens are released after 1 year. + (account_id.clone(), start_block, one_month_in_blocks * 12, 1, amount / 4), + // 1/48 of tokens are released every month after that for 3 more years. + ( + account_id, + start_block + one_month_in_blocks * 12, + one_month_in_blocks, + 36, + amount / 48, + ), + ] + }) + .collect::>(); + subspace_genesis_config( + evm_chain_spec::SpecId::DevNet, + WASM_BINARY.expect("Wasm binary must be built for Gemini"), + sudo_account, + balances, + vesting_schedules, + GenesisParams { + enable_rewards: false, + enable_storage_access: false, + allow_authoring_by: AllowAuthoringBy::FirstFarmer, + pot_slot_iterations: NonZeroU32::new(150_000_000).expect("Not zero; qed"), + enable_domains: true, + enable_balance_transfers: true, + confirmation_depth_k: 100, // TODO: Proper value here + }, + GenesisDomainParams { + domain_name: "evm-domain".to_owned(), + operator_allow_list: OperatorAllowList::Anyone, + operator_signing_key: OperatorPublicKey::unchecked_from(hex!( + "aa3b05b4d649666723e099cf3bafc2f2c04160ebe0e16ddc82f72d6ed97c4b6b" + )), + }, + ) + }, + // Bootnodes + vec![], + // Telemetry + Some( + TelemetryEndpoints::new(vec![(SUBSPACE_TELEMETRY_URL.into(), 1)]) + .expect("Telemetry value is valid"), + ), + // Protocol ID + Some("subspace-devnet"), + None, + // Properties + Some({ + let mut properties = chain_spec_properties(); + properties.insert( + "potExternalEntropy".to_string(), + serde_json::to_value(None::).expect("Serialization is not infallible; qed"), + ); + properties + }), + // Extensions + None, + ) +} + +/// New dev chain spec +pub fn dev_config() -> ChainSpec { + let wasm_binary = WASM_BINARY.expect("Development wasm not available"); + + ChainSpec::from_genesis( + // Name + "Subspace development", + // ID + "subspace_dev", + ChainType::Development, + || { + subspace_genesis_config( + evm_chain_spec::SpecId::Dev, + wasm_binary, + // Sudo account + utils::get_account_id_from_seed("Alice"), + // Pre-funded accounts + vec![ + (utils::get_account_id_from_seed("Alice"), 1_000 * SSC), + (utils::get_account_id_from_seed("Bob"), 1_000 * SSC), + (utils::get_account_id_from_seed("Alice//stash"), 1_000 * SSC), + (utils::get_account_id_from_seed("Bob//stash"), 1_000 * SSC), + ], + vec![], + GenesisParams { + enable_rewards: false, + enable_balance_transfers: true, + enable_storage_access: false, + allow_authoring_by: AllowAuthoringBy::Anyone, + pot_slot_iterations: NonZeroU32::new(100_000_000).expect("Not zero; qed"), + enable_domains: true, + confirmation_depth_k: 100, + }, + GenesisDomainParams { + domain_name: "evm-domain".to_owned(), + operator_allow_list: OperatorAllowList::Anyone, + operator_signing_key: get_public_key_from_seed::("Alice"), + }, + ) + }, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + None, + None, + // Properties + Some({ + let mut properties = chain_spec_properties(); + properties.insert( + "potExternalEntropy".to_string(), + serde_json::to_value(None::).expect("Serialization is not infallible; qed"), + ); + properties + }), + // Extensions + None, + ) +} + +/// New local chain spec +pub fn local_config() -> ChainSpec { + let wasm_binary = WASM_BINARY.expect("Development wasm not available"); + + ChainSpec::from_genesis( + // Name + "Subspace local", + // ID + "subspace_local", + ChainType::Local, + || { + subspace_genesis_config( + evm_chain_spec::SpecId::Local, + wasm_binary, + // Sudo account + utils::get_account_id_from_seed("Alice"), + // Pre-funded accounts + vec![ + (utils::get_account_id_from_seed("Alice"), 1_000 * SSC), + (utils::get_account_id_from_seed("Bob"), 1_000 * SSC), + (utils::get_account_id_from_seed("Charlie"), 1_000 * SSC), + (utils::get_account_id_from_seed("Dave"), 1_000 * SSC), + (utils::get_account_id_from_seed("Eve"), 1_000 * SSC), + (utils::get_account_id_from_seed("Ferdie"), 1_000 * SSC), + (utils::get_account_id_from_seed("Alice//stash"), 1_000 * SSC), + (utils::get_account_id_from_seed("Bob//stash"), 1_000 * SSC), + (utils::get_account_id_from_seed("Charlie//stash"), 1_000 * SSC), + (utils::get_account_id_from_seed("Dave//stash"), 1_000 * SSC), + (utils::get_account_id_from_seed("Eve//stash"), 1_000 * SSC), + (utils::get_account_id_from_seed("Ferdie//stash"), 1_000 * SSC), + ], + vec![], + GenesisParams { + enable_rewards: false, + enable_balance_transfers: true, + enable_storage_access: false, + allow_authoring_by: AllowAuthoringBy::Anyone, + pot_slot_iterations: NonZeroU32::new(100_000_000).expect("Not zero; qed"), + enable_domains: true, + confirmation_depth_k: 1, + }, + GenesisDomainParams { + domain_name: "evm-domain".to_owned(), + operator_allow_list: OperatorAllowList::Anyone, + operator_signing_key: get_public_key_from_seed::("Alice"), + }, + ) + }, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + None, + None, + // Properties + Some({ + let mut properties = chain_spec_properties(); + properties.insert( + "potExternalEntropy".to_string(), + serde_json::to_value(None::).expect("Serialization is not infallible; qed"), + ); + properties + }), + // Extensions + None, + ) +} + +/// Configure initial storage state for FRAME modules. +fn subspace_genesis_config( + evm_domain_spec_id: evm_chain_spec::SpecId, + wasm_binary: &[u8], + sudo_account: AccountId, + balances: Vec<(AccountId, Balance)>, + // who, start, period, period_count, per_period + vesting: Vec<(AccountId, BlockNumber, BlockNumber, u32, Balance)>, + genesis_params: GenesisParams, + genesis_domain_params: GenesisDomainParams, +) -> RuntimeGenesisConfig { + let GenesisParams { + enable_rewards, + enable_storage_access, + allow_authoring_by, + pot_slot_iterations, + enable_domains, + enable_balance_transfers, + confirmation_depth_k, + } = genesis_params; + + let domain_genesis_config = evm_chain_spec::get_testnet_genesis_by_spec_id(evm_domain_spec_id); + + let raw_genesis_storage = { + let storage = domain_genesis_config + .build_storage() + .expect("Failed to build genesis storage from genesis runtime config"); + let raw_genesis = RawGenesis::from_storage(storage); + raw_genesis.encode() + }; + + RuntimeGenesisConfig { + domains: DomainsConfig { + genesis_domain: Some(sp_domains::GenesisDomain { + runtime_name: "evm".into(), + runtime_type: RuntimeType::Evm, + runtime_version: evm_domain_runtime::VERSION, + + // Domain config, mainly for placeholder the concrete value TBD + raw_genesis_storage, + owner_account_id: sudo_account.clone(), + domain_name: genesis_domain_params.domain_name, + max_block_size: MaxDomainBlockSize::get(), + max_block_weight: MaxDomainBlockWeight::get(), + bundle_slot_probability: (1, 1), + target_bundles_per_block: 10, + operator_allow_list: genesis_domain_params.operator_allow_list, + signing_key: genesis_domain_params.operator_signing_key, + nomination_tax: Percent::from_percent(5), + minimum_nominator_stake: 100 * SSC, + }), + }, + system: SystemConfig { + // Add Wasm runtime to storage. + code: wasm_binary.to_vec(), + ..Default::default() + }, + balances: BalancesConfig { balances }, + transaction_payment: Default::default(), + sudo: SudoConfig { + // Assign network admin rights. + key: Some(sudo_account), + }, + subspace: SubspaceConfig { + enable_rewards, + enable_storage_access, + allow_authoring_by, + pot_slot_iterations, + phantom: PhantomData, + }, + vesting: VestingConfig { vesting }, + runtime_configs: RuntimeConfigsConfig { + enable_domains, + enable_balance_transfers, + confirmation_depth_k, + }, + } +} + +#[cfg(test)] +mod tests { + #![allow(clippy::unwrap_used)] + + use super::*; + + #[test] + fn test_chain_specs() { + gemini_3g_compiled(); + gemini_3g(); + devnet_config_compiled(); + devnet_config(); + dev_config(); + local_config(); + } +} diff --git a/sdk/node/src/domains/builder.rs b/sdk/node/src/domains/builder.rs new file mode 100644 index 00000000..be4d8ed4 --- /dev/null +++ b/sdk/node/src/domains/builder.rs @@ -0,0 +1,462 @@ +use std::path::Path; +use std::sync::Arc; + +use anyhow::{anyhow, Result}; +use cross_domain_message_gossip::{ChainTxPoolMsg, Message}; +use derivative::Derivative; +use derive_builder::Builder; +use domain_client_operator::{BootstrapResult, Bootstrapper}; +use domain_runtime_primitives::opaque::Block as DomainBlock; +use futures::future; +use futures::future::Either::{Left, Right}; +use sc_consensus_subspace::block_import::BlockImportingNotification; +use sc_consensus_subspace::notification::SubspaceNotificationStream; +use sc_consensus_subspace::slot_worker::NewSlotNotification; +use sc_network::NetworkService; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender}; +use sdk_substrate::{Base, BaseBuilder}; +use sdk_utils::{DestructorSet, MultiaddrWithPeerId, TaskOutput}; +use serde::{Deserialize, Serialize}; +use sp_core::H256; +use sp_domains::{DomainId, OperatorId}; +use sp_runtime::traits::Block as BlockT; +use subspace_runtime::RuntimeApi as CRuntimeApi; +use subspace_runtime_primitives::opaque::Block as CBlock; +use subspace_service::transaction_pool::FullPool; +use subspace_service::FullClient as CFullClient; +use tokio::sync::{oneshot, RwLock}; + +use crate::domains::domain::{Domain, DomainBuildingProgress}; +use crate::domains::domain_instance_starter::DomainInstanceStarter; +use crate::domains::evm_chain_spec; +use crate::ExecutorDispatch as CExecutorDispatch; + +/// Link to the consensus node +pub struct ConsensusNodeLink { + /// Consensus client + pub consensus_client: Arc>, + /// Consensus network + pub consensus_network: Arc>, + /// Block import notification stream for consensus chain + pub block_importing_notification_stream: + SubspaceNotificationStream>, + /// New slot notification stream for consensus chain + pub new_slot_notification_stream: SubspaceNotificationStream, + /// Reference to the consensus node's network sync service + pub consensus_sync_service: Arc>, + /// Consensus tx pool + pub consensus_transaction_pool: Arc< + FullPool< + CFullClient, + CBlock, + ::Header, + >, + >, + /// Cross domain message gossip worker's message sink + pub gossip_message_sink: TracingUnboundedSender, + /// Cross domain message receiver for the domain + pub domain_message_receiver: TracingUnboundedReceiver, + /// Domain boot node property read from chain-spec + pub chain_spec_domains_bootstrap_nodes: Vec, +} + +/// Domain node configuration +#[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq)] +#[builder(pattern = "owned", build_fn(private, name = "_build"))] +#[non_exhaustive] +pub struct DomainConfig { + /// Chain ID of domain node (must be same as the consensus node's chain id) + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub chain_id: String, + + /// Uniquely identifies a domain + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub domain_id: DomainId, + + /// Operator Id + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub maybe_operator_id: Option, + + /// Additional arguments to pass to domain instance starter + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub additional_args: Vec, + + #[doc(hidden)] + #[builder( + setter(into, strip_option), + field(type = "BaseBuilder", build = "self.base.build()") + )] + #[serde(flatten, skip_serializing_if = "sdk_utils::is_default")] + pub base: Base, +} + +impl Default for DomainConfig { + fn default() -> Self { + DomainConfig { + chain_id: "".to_string(), + domain_id: Default::default(), + maybe_operator_id: None, + additional_args: vec![], + base: Default::default(), + } + } +} + +sdk_substrate::derive_base!(@ Base => DomainConfigBuilder); + +impl DomainConfig { + /// Dev configuraiton + pub fn dev() -> DomainConfigBuilder { + DomainConfigBuilder::dev() + } + + /// Gemini 3g configuraiton + pub fn gemini_3g() -> DomainConfigBuilder { + DomainConfigBuilder::gemini_3g() + } + + /// Devnet configuraiton + pub fn devnet() -> DomainConfigBuilder { + DomainConfigBuilder::devnet() + } +} + +impl DomainConfigBuilder { + /// New builder + pub fn new() -> Self { + Self::default() + } + + /// Dev chain configuration + pub fn dev() -> Self { + Self::new().chain_id("dev").domain_id(DomainId::new(0)).dev_key_seed("//Alice") + } + + /// Gemini 3g configuration + pub fn gemini_3g() -> Self { + Self::new().chain_id("gemini-3g").domain_id(DomainId::new(0)) + } + + /// Devnet chain configuration + pub fn devnet() -> Self { + Self::new().chain_id("devnet").domain_id(DomainId::new(0)) + } + + /// Get configuration for saving on disk + pub fn configuration(self) -> DomainConfig { + self._build().expect("Build is infallible") + } + + /// Build a domain node + pub async fn build( + self, + directory: impl AsRef + Send + 'static, + consensus_node_link: ConsensusNodeLink, + ) -> Result { + self.configuration().build(directory, consensus_node_link).await + } +} + +impl DomainConfig { + /// Build a domain node + pub async fn build( + self, + directory: impl AsRef + Send + 'static, + consensus_node_link: ConsensusNodeLink, + ) -> Result { + let ConsensusNodeLink { + consensus_client, + consensus_network, + block_importing_notification_stream, + new_slot_notification_stream, + consensus_sync_service, + consensus_transaction_pool, + gossip_message_sink, + domain_message_receiver, + chain_spec_domains_bootstrap_nodes, + } = consensus_node_link; + let printable_domain_id: u32 = self.domain_id.into(); + let mut destructor_set = + DestructorSet::new(format!("domain-{}-worker-destructor", printable_domain_id)); + let shared_rpc_handler = Arc::new(RwLock::new(None)); + let shared_progress_data = Arc::new(RwLock::new(DomainBuildingProgress::Default)); + + let (bootstrapping_result_sender, bootstrapping_result_receiver) = oneshot::channel(); + let (bootstrapping_worker_drop_sender, bootstrapping_worker_drop_receiver) = + oneshot::channel(); + let domain_bootstrapper_join_handle = sdk_utils::task_spawn( + format!("domain/domain-{}/bootstrapping", printable_domain_id), + { + let consensus_client = consensus_client.clone(); + let shared_progress_data = shared_progress_data.clone(); + async move { + *shared_progress_data.write().await = DomainBuildingProgress::BuildingStarted; + let bootstrapper = + Bootstrapper::::new(consensus_client.clone()); + match future::select( + Box::pin(bootstrapper.fetch_domain_bootstrap_info(self.domain_id)), + bootstrapping_worker_drop_receiver, + ) + .await + { + Left((result, _)) => { + let result = result + .map_err(|bootstrapping_error| { + anyhow!( + "Error while bootstrapping the domain:{} : {:?}", + printable_domain_id, + bootstrapping_error + ) + }) + .map(TaskOutput::Value); + let _ = bootstrapping_result_sender.send(result); + } + Right(_) => { + tracing::info!( + "Received drop signal while bootstrapping the domain with \ + domain_id: {:?}. exiting...", + printable_domain_id + ); + let _ = bootstrapping_result_sender.send(Ok(TaskOutput::Cancelled( + format!( + "received cancellation signal while bootstrapping the domain: \ + {}.", + printable_domain_id + ), + ))); + } + }; + } + }, + ); + + destructor_set.add_async_destructor({ + async move { + let _ = bootstrapping_worker_drop_sender.send(()); + domain_bootstrapper_join_handle.await.expect( + "If joining is failing; that means the future being joined panicked, so we \ + need to propagate it; qed.", + ); + } + })?; + + let (domain_runner_result_sender, domain_runner_result_receiver) = oneshot::channel(); + let (domain_runner_drop_sender, mut domain_runner_drop_receiver) = oneshot::channel(); + let domain_runner_join_handle = + sdk_utils::task_spawn(format!("domain/domain-{}/running", printable_domain_id), { + let shared_rpc_handler = shared_rpc_handler.clone(); + let shared_progress_data = shared_progress_data.clone(); + async move { + let bootstrap_result = match future::select( + bootstrapping_result_receiver, + &mut domain_runner_drop_receiver, + ) + .await + { + Left((wrapped_result, _)) => match wrapped_result { + Ok(result) => match result { + Ok(boostrap_task_output) => match boostrap_task_output { + TaskOutput::Value(bootstrap_result) => bootstrap_result, + TaskOutput::Cancelled(reason) => { + tracing::info!( + "Bootstrapping task was cancelled for reason: {:?} \ + for domain_id: {:?}. exiting...", + reason, + printable_domain_id + ); + let _ = domain_runner_result_sender.send(Ok( + TaskOutput::Cancelled(format!( + "Bootstrapping task was cancelled for reason: \ + {:?} for domain_id: {:?}. exiting...", + reason, printable_domain_id + )), + )); + return; + } + }, + Err(bootstrap_error) => { + let _ = domain_runner_result_sender.send(Err(anyhow!( + "received an error from domain bootstrapper for domain \ + id: {} error: {}", + printable_domain_id, + bootstrap_error + ))); + return; + } + }, + Err(recv_err) => { + let _ = domain_runner_result_sender.send(Err(anyhow!( + "unable to receive message from domain bootstrapper for \ + domain id: {} due to an error: {}", + printable_domain_id, + recv_err + ))); + return; + } + }, + Right(_) => { + tracing::info!( + "Received drop signal while bootstrapping the domain with \ + domain_id: {:?}. exiting...", + self.domain_id + ); + let _ = domain_runner_result_sender.send(Ok(TaskOutput::Cancelled( + format!( + "received cancellation signal while waiting for bootstrapping \ + result for domain: {}.", + printable_domain_id + ), + ))); + return; + } + }; + + *shared_progress_data.write().await = DomainBuildingProgress::Bootstrapped; + + let BootstrapResult { + domain_instance_data, + domain_created_at, + imported_block_notification_stream, + } = bootstrap_result; + + let runtime_type = domain_instance_data.runtime_type.clone(); + + let domain_spec_result = evm_chain_spec::create_domain_spec( + self.chain_id.as_str(), + domain_instance_data.raw_genesis, + ); + + let domain_spec = match domain_spec_result { + Ok(domain_spec) => domain_spec, + Err(domain_spec_creation_error) => { + let _ = domain_runner_result_sender.send(Err(anyhow!( + "Error while creating domain spec for the domain: {} Error: {:?}", + printable_domain_id, + domain_spec_creation_error + ))); + return; + } + }; + + let domains_directory = + directory.as_ref().join(format!("domain-{}", printable_domain_id)); + let mut service_config = + self.base.configuration(domains_directory, domain_spec).await; + + if service_config.network.boot_nodes.is_empty() { + service_config.network.boot_nodes = chain_spec_domains_bootstrap_nodes + .clone() + .into_iter() + .map(Into::into) + .collect::>(); + } + + let domain_starter = DomainInstanceStarter { + service_config, + consensus_network, + maybe_operator_id: self.maybe_operator_id, + domain_id: self.domain_id, + runtime_type, + additional_arguments: self.additional_args.clone(), + consensus_client, + block_importing_notification_stream, + new_slot_notification_stream, + consensus_sync_service, + consensus_offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( + consensus_transaction_pool.clone(), + ), + gossip_message_sink, + domain_message_receiver, + }; + + *shared_progress_data.write().await = DomainBuildingProgress::PreparingToStart; + + let maybe_start_data = domain_starter + .prepare_for_start(domain_created_at, imported_block_notification_stream) + .await; + let (rpc_handler, domain_start_handle) = match maybe_start_data { + Ok(start_data) => start_data, + Err(start_error) => { + let _ = domain_runner_result_sender.send(Err(anyhow!( + "Error while preparing to start domain for the domain id: {} \ + Error: {:?}", + printable_domain_id, + start_error + ))); + return; + } + }; + + let shared_rpc_handler = shared_rpc_handler.clone(); + shared_rpc_handler.write().await.replace(rpc_handler); + + *shared_progress_data.write().await = DomainBuildingProgress::Starting; + + match future::select(domain_start_handle, &mut domain_runner_drop_receiver) + .await + { + Left((wrapped_result, _)) => match wrapped_result { + Ok(result) => match result { + Ok(_) => { + let _ = + domain_runner_result_sender.send(Ok(TaskOutput::Value(()))); + } + Err(run_error) => { + let _ = domain_runner_result_sender.send(Err(anyhow!( + "received an error while trying to run the domain id: {} \ + error: {}", + printable_domain_id, + run_error + ))); + } + }, + Err(join_error) => { + let _ = domain_runner_result_sender.send(Err(anyhow!( + "unable to join domain runner for domain id: {} due to an \ + error: {}", + printable_domain_id, + join_error + ))); + } + }, + Right(_) => { + tracing::info!( + "Received drop signal while running the domain with domain_id: \ + {:?}. exiting...", + self.domain_id + ); + let _ = domain_runner_result_sender.send(Ok(TaskOutput::Cancelled( + format!( + "Received cancellation signal while waiting for domain runner \ + for domain: {}.", + printable_domain_id + ), + ))); + } + }; + } + }); + + destructor_set.add_async_destructor({ + async move { + let _ = domain_runner_drop_sender.send(()); + domain_runner_join_handle.await.expect( + "If joining is failing; that means the future being joined panicked, so we \ + need to propagate it; qed.", + ); + } + })?; + + Ok(Domain { + _destructors: destructor_set, + rpc_handlers: shared_rpc_handler, + domain_runner_result_receiver, + current_building_progress: shared_progress_data, + }) + } +} diff --git a/sdk/node/src/domains/domain.rs b/sdk/node/src/domains/domain.rs new file mode 100644 index 00000000..46b5b5ed --- /dev/null +++ b/sdk/node/src/domains/domain.rs @@ -0,0 +1,49 @@ +use std::sync::Arc; + +use derivative::Derivative; +use sc_service::RpcHandlers; +use sdk_utils::{DestructorSet, TaskOutput}; + +/// Progress of Domain +#[derive(Derivative)] +#[derivative(Debug)] +pub enum DomainBuildingProgress { + Default, + BuildingStarted, + Bootstrapped, + PreparingToStart, + Starting, +} + +/// Domain structure +#[derive(Derivative)] +#[derivative(Debug)] +#[must_use = "Domain should be closed"] +pub struct Domain { + #[doc(hidden)] + pub _destructors: DestructorSet, + /// Rpc Handlers for Domain node + #[derivative(Debug = "ignore")] + pub rpc_handlers: Arc>>, + /// Domain building progress tracker + pub current_building_progress: Arc>, + /// Oneshot channel to receive result of domain runner + #[derivative(Debug = "ignore")] + pub domain_runner_result_receiver: + tokio::sync::oneshot::Receiver>>, +} + +impl Domain { + /// Shuts down domain node + pub async fn close(self) -> anyhow::Result<()> { + self._destructors.async_drop().await?; + let output = self.domain_runner_result_receiver.await??; + match output { + TaskOutput::Value(_) => Ok(()), + TaskOutput::Cancelled(reason) => { + tracing::warn!("Domain runner task was cancelled due to reason: {}", reason); + Ok(()) + } + } + } +} diff --git a/sdk/node/src/domains/domain_instance_starter.rs b/sdk/node/src/domains/domain_instance_starter.rs new file mode 100644 index 00000000..5546df01 --- /dev/null +++ b/sdk/node/src/domains/domain_instance_starter.rs @@ -0,0 +1,162 @@ +use std::sync::Arc; + +use cross_domain_message_gossip::ChainTxPoolMsg; +use domain_client_operator::OperatorStreams; +use domain_eth_service::provider::EthProvider; +use domain_eth_service::DefaultEthConfig; +use domain_runtime_primitives::opaque::Block as DomainBlock; +use domain_service::{FullBackend, FullClient}; +use futures::StreamExt; +use sc_client_api::ImportNotifications; +use sc_consensus_subspace::block_import::BlockImportingNotification; +use sc_consensus_subspace::notification::SubspaceNotificationStream; +use sc_consensus_subspace::slot_worker::NewSlotNotification; +use sc_network::NetworkService; +use sc_service::{BasePath, Configuration, RpcHandlers}; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; +use sc_utils::mpsc::{TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_core::H256; +use sp_domains::{DomainId, OperatorId, RuntimeType}; +use sp_runtime::traits::NumberFor; +use subspace_runtime::RuntimeApi as CRuntimeApi; +use subspace_runtime_primitives::opaque::Block as CBlock; +use subspace_service::FullClient as CFullClient; +use tokio::task::JoinHandle; + +use crate::domains::evm_domain_executor_dispatch::EVMDomainExecutorDispatch; +use crate::domains::utils::AccountId20; +use crate::ExecutorDispatch as CExecutorDispatch; + +/// `DomainInstanceStarter` used to start a domain instance node based on the +/// given bootstrap result +pub struct DomainInstanceStarter { + pub service_config: Configuration, + pub maybe_operator_id: Option, + pub domain_id: DomainId, + pub runtime_type: RuntimeType, + pub additional_arguments: Vec, + pub consensus_client: Arc>, + pub consensus_network: Arc>, + pub block_importing_notification_stream: + SubspaceNotificationStream>, + pub new_slot_notification_stream: SubspaceNotificationStream, + pub consensus_sync_service: Arc>, + pub consensus_offchain_tx_pool_factory: OffchainTransactionPoolFactory, + pub domain_message_receiver: TracingUnboundedReceiver, + pub gossip_message_sink: TracingUnboundedSender, +} + +impl DomainInstanceStarter { + pub async fn prepare_for_start( + self, + domain_created_at: NumberFor, + imported_block_notification_stream: ImportNotifications, + ) -> anyhow::Result<(RpcHandlers, JoinHandle>)> { + let DomainInstanceStarter { + domain_id, + consensus_network, + maybe_operator_id, + runtime_type, + mut additional_arguments, + service_config, + consensus_client, + block_importing_notification_stream, + new_slot_notification_stream, + consensus_sync_service, + consensus_offchain_tx_pool_factory, + domain_message_receiver, + gossip_message_sink, + } = self; + + let block_importing_notification_stream = || { + block_importing_notification_stream.subscribe().then( + |block_importing_notification| async move { + ( + block_importing_notification.block_number, + block_importing_notification.acknowledgement_sender, + ) + }, + ) + }; + + let new_slot_notification_stream = || { + new_slot_notification_stream.subscribe().then(|slot_notification| async move { + ( + slot_notification.new_slot_info.slot, + slot_notification.new_slot_info.global_randomness, + ) + }) + }; + + let operator_streams = OperatorStreams { + // TODO: proper value + consensus_block_import_throttling_buffer_size: 10, + block_importing_notification_stream: block_importing_notification_stream(), + imported_block_notification_stream, + new_slot_notification_stream: new_slot_notification_stream(), + _phantom: Default::default(), + acknowledgement_sender_stream: futures::stream::empty(), + }; + + match runtime_type { + RuntimeType::Evm => { + let eth_provider = EthProvider::< + evm_domain_runtime::TransactionConverter, + DefaultEthConfig< + FullClient< + DomainBlock, + evm_domain_runtime::RuntimeApi, + EVMDomainExecutorDispatch, + >, + FullBackend, + >, + >::new( + Some(BasePath::new(service_config.base_path.path())), + additional_arguments.drain(..), + ); + + let domain_params = domain_service::DomainParams { + domain_id, + domain_config: service_config, + domain_created_at, + maybe_operator_id, + consensus_client, + consensus_network, + consensus_offchain_tx_pool_factory, + consensus_network_sync_oracle: consensus_sync_service.clone(), + operator_streams, + gossip_message_sink, + domain_message_receiver, + provider: eth_provider, + skip_empty_bundle_production: true, + }; + + let mut domain_node = domain_service::new_full::< + _, + _, + _, + _, + _, + _, + evm_domain_runtime::RuntimeApi, + EVMDomainExecutorDispatch, + AccountId20, + _, + _, + >(domain_params) + .await + .map_err(anyhow::Error::new)?; + + let domain_start_join_handle = sdk_utils::task_spawn( + format!("domain-{}/start-domain", >::into(domain_id)), + async move { + domain_node.network_starter.start_network(); + domain_node.task_manager.future().await.map_err(anyhow::Error::new) + }, + ); + + Ok((domain_node.rpc_handlers.clone(), domain_start_join_handle)) + } + } + } +} diff --git a/sdk/node/src/domains/domain_node.rs b/sdk/node/src/domains/domain_node.rs new file mode 100644 index 00000000..c2accd42 --- /dev/null +++ b/sdk/node/src/domains/domain_node.rs @@ -0,0 +1,11 @@ +use derivative::Derivative; +use sdk_utils::DestructorSet; +use tokio::sync::oneshot; + +#[derive(Derivative)] +#[derivative(Debug)] +#[must_use = "Domain node should be closed"] +pub struct DomainNode { + pub domain_worker_result_receiver: oneshot::Receiver>, + pub _destructors: DestructorSet, +} diff --git a/sdk/node/src/domains/evm_chain_spec.rs b/sdk/node/src/domains/evm_chain_spec.rs new file mode 100644 index 00000000..f445a89e --- /dev/null +++ b/sdk/node/src/domains/evm_chain_spec.rs @@ -0,0 +1,238 @@ +//! System domain chain specs + +use std::str::FromStr; + +use evm_domain_runtime::{ + AccountId, BalancesConfig, EVMChainIdConfig, EVMConfig, Precompiles, RuntimeGenesisConfig, + SudoConfig, SystemConfig, WASM_BINARY, +}; +use hex_literal::hex; +use sc_service::{ChainSpec as _, ChainType}; +use sc_subspace_chain_specs::ExecutionChainSpec; +use sdk_utils::chain_spec::chain_spec_properties; +use sp_domains::storage::RawGenesis; +use subspace_runtime_primitives::SSC; + +/// Chain spec type for the system domain +pub type ChainSpec = ExecutionChainSpec; + +pub enum SpecId { + Dev, + Gemini, + DevNet, + Local, +} + +pub fn create_domain_spec(chain_id: &str, raw_genesis: RawGenesis) -> Result { + // The value of the `RuntimeGenesisConfig` doesn't matter since it will be + // overwritten later + let constructor = RuntimeGenesisConfig::default; + let mut chain_spec = match chain_id { + "dev" => development_config(constructor), + "gemini-3g" => gemini_3g_config(constructor), + "devnet" => devnet_config(constructor), + "" | "local" => local_testnet_config(constructor), + path => ChainSpec::from_json_file(std::path::PathBuf::from(path))?, + }; + + chain_spec.set_storage(raw_genesis.into_storage()); + + Ok(chain_spec) +} + +/// Development keys that will be injected automatically on polkadotjs apps +fn get_dev_accounts() -> Vec { + vec![ + // Alith key + AccountId::from(hex!("f24FF3a9CF04c71Dbc94D0b566f7A27B94566cac")), + // Baltathar key + AccountId::from(hex!("3Cd0A705a2DC65e5b1E1205896BaA2be8A07c6e0")), + // Charleth key + AccountId::from(hex!("798d4Ba9baf0064Ec19eB4F0a1a45785ae9D6DFc")), + // Dorothy + AccountId::from(hex!("773539d4Ac0e786233D90A233654ccEE26a613D9")), + ] +} + +pub fn get_testnet_genesis_by_spec_id(spec_id: SpecId) -> RuntimeGenesisConfig { + match spec_id { + SpecId::Dev => { + let accounts = get_dev_accounts(); + testnet_genesis( + accounts.clone(), + // Alith is Sudo + Some(accounts[0]), + ) + } + SpecId::Gemini => { + let sudo_account = AccountId::from_str("f31e60022e290708c17d6997c34de6a30d09438f") + .expect("Invalid Sudo account"); + testnet_genesis( + vec![ + // Sudo account + sudo_account, + ], + Some(sudo_account), + ) + } + SpecId::DevNet => { + let sudo_account = AccountId::from_str("b66a91845249464309fad766fd0ece8144547736") + .expect("Invalid Sudo account"); + testnet_genesis( + vec![ + // Sudo account + sudo_account, + ], + Some(sudo_account), + ) + } + SpecId::Local => { + let accounts = get_dev_accounts(); + testnet_genesis( + accounts.clone(), + // Alith is sudo + Some(accounts[0]), + ) + } + } +} + +/// Development config +pub fn development_config RuntimeGenesisConfig + 'static + Send + Sync>( + constructor: F, +) -> ChainSpec { + ChainSpec::from_genesis( + // Name + "Development", + // ID + "evm_domain_dev", + ChainType::Development, + constructor, + vec![], + None, + None, + None, + Some(chain_spec_properties()), + None, + ) +} + +/// Local config +pub fn local_testnet_config RuntimeGenesisConfig + 'static + Send + Sync>( + constructor: F, +) -> ChainSpec { + ChainSpec::from_genesis( + // Name + "Local Testnet", + // ID + "evm_domain_local_testnet", + ChainType::Local, + constructor, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + Some("evm-local"), + None, + // Properties + Some(chain_spec_properties()), + // Extensions + None, + ) +} + +/// Gemini 3g config +pub fn gemini_3g_config RuntimeGenesisConfig + 'static + Send + Sync>( + constructor: F, +) -> ChainSpec { + ChainSpec::from_genesis( + // Name + "Subspace Gemini 3g EVM Domain", + // ID + "subspace_gemini_3g_evm_domain", + ChainType::Live, + constructor, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + Some("subspace-gemini-3g-evm-domain"), + None, + // Properties + Some(chain_spec_properties()), + // Extensions + None, + ) +} + +pub fn devnet_config RuntimeGenesisConfig + 'static + Send + Sync>( + constructor: F, +) -> ChainSpec { + ChainSpec::from_genesis( + // Name + "Subspace Devnet EVM Domain", + // ID + "subspace_devnet_evm_domain", + ChainType::Custom("Testnet".to_string()), + constructor, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + Some("subspace-devnet-evm-domain"), + None, + // Properties + Some(chain_spec_properties()), + // Extensions + None, + ) +} + +fn testnet_genesis( + endowed_accounts: Vec, + maybe_sudo_account: Option, +) -> RuntimeGenesisConfig { + // This is the simplest bytecode to revert without returning any data. + // We will pre-deploy it under all of our precompiles to ensure they can be + // called from within contracts. + // (PUSH1 0x00 PUSH1 0x00 REVERT) + let revert_bytecode = vec![0x60, 0x00, 0x60, 0x00, 0xFD]; + + RuntimeGenesisConfig { + system: SystemConfig { + code: WASM_BINARY.expect("WASM binary was not build, please build it!").to_vec(), + ..Default::default() + }, + sudo: SudoConfig { key: maybe_sudo_account }, + transaction_payment: Default::default(), + balances: BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k| (k, 1_000_000 * SSC)).collect(), + }, + // this is set to default and chain_id will be set into genesis during the domain + // instantiation on Consensus runtime. + evm_chain_id: EVMChainIdConfig::default(), + evm: EVMConfig { + // We need _some_ code inserted at the precompile address so that + // the evm will actually call the address. + accounts: Precompiles::used_addresses() + .into_iter() + .map(|addr| { + ( + addr, + fp_evm::GenesisAccount { + nonce: Default::default(), + balance: Default::default(), + storage: Default::default(), + code: revert_bytecode.clone(), + }, + ) + }) + .collect(), + ..Default::default() + }, + ..Default::default() + } +} diff --git a/sdk/node/src/domains/evm_domain_executor_dispatch.rs b/sdk/node/src/domains/evm_domain_executor_dispatch.rs new file mode 100644 index 00000000..4255e213 --- /dev/null +++ b/sdk/node/src/domains/evm_domain_executor_dispatch.rs @@ -0,0 +1,19 @@ +use sc_executor::NativeExecutionDispatch; + +/// EVM domain executor instance. +pub struct EVMDomainExecutorDispatch; + +impl NativeExecutionDispatch for EVMDomainExecutorDispatch { + #[cfg(feature = "runtime-benchmarks")] + type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + #[cfg(not(feature = "runtime-benchmarks"))] + type ExtendHostFunctions = (); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + evm_domain_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + evm_domain_runtime::native_version() + } +} diff --git a/sdk/node/src/domains/mod.rs b/sdk/node/src/domains/mod.rs new file mode 100644 index 00000000..d8a19018 --- /dev/null +++ b/sdk/node/src/domains/mod.rs @@ -0,0 +1,7 @@ +pub mod builder; +pub mod domain; +pub mod domain_instance_starter; +pub mod domain_node; +pub mod evm_chain_spec; +pub mod evm_domain_executor_dispatch; +pub mod utils; diff --git a/sdk/node/src/domains/utils.rs b/sdk/node/src/domains/utils.rs new file mode 100644 index 00000000..6078ee89 --- /dev/null +++ b/sdk/node/src/domains/utils.rs @@ -0,0 +1,14 @@ +pub use evm_domain_runtime::AccountId as AccountId20; +use sp_core::crypto::AccountId32; +use sp_core::{ByteArray, H160}; +use sp_runtime::traits::Convert; + +pub struct AccountId32ToAccountId20Converter; + +impl Convert for AccountId32ToAccountId20Converter { + fn convert(acc: AccountId32) -> AccountId20 { + // Using the full hex key, truncating to the first 20 bytes (the first 40 hex + // chars) + H160::from_slice(&acc.as_slice()[0..20]).into() + } +} diff --git a/sdk/node/src/lib.rs b/sdk/node/src/lib.rs new file mode 100644 index 00000000..7efc10aa --- /dev/null +++ b/sdk/node/src/lib.rs @@ -0,0 +1,926 @@ +//! Crate with subspace node + +#![warn( + missing_docs, + clippy::dbg_macro, + clippy::unwrap_used, + clippy::disallowed_types, + unused_features +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![feature(concat_idents)] + +use std::io; +use std::path::Path; +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use cross_domain_message_gossip::GossipWorkerBuilder; +use derivative::Derivative; +use frame_system::pallet_prelude::BlockNumberFor; +use futures::{FutureExt, Stream, StreamExt}; +use sc_consensus_subspace::archiver::SegmentHeadersStore; +use sc_network::network_state::NetworkState; +use sc_network::{NetworkService, NetworkStateInfo, SyncState}; +use sc_rpc_api::state::StateApiClient; +use sc_service::Configuration; +use sc_utils::mpsc::tracing_unbounded; +use sdk_dsn::{DsnOptions, DsnShared}; +use sdk_traits::Farmer; +use sdk_utils::{DestructorSet, MultiaddrWithPeerId, PublicKey, TaskOutput}; +use sp_consensus::SyncOracle; +use sp_consensus_subspace::digests::PreDigest; +use sp_core::traits::SpawnEssentialNamed; +use sp_messenger::messages::ChainId; +use sp_runtime::DigestItem; +use subspace_core_primitives::{HistorySize, SegmentIndex}; +use subspace_farmer::node_client::NodeClient; +use subspace_farmer::piece_cache::PieceCache as FarmerPieceCache; +use subspace_farmer_components::FarmerProtocolInfo; +use subspace_networking::{ + PieceByIndexRequest, PieceByIndexResponse, SegmentHeaderRequest, SegmentHeaderResponse, +}; +use subspace_rpc_primitives::MAX_SEGMENT_HEADERS_PER_REQUEST; +use subspace_runtime::RuntimeApi; +use subspace_runtime_primitives::opaque::{Block as OpaqueBlock, Header}; +use subspace_service::SubspaceConfiguration; +use tokio::sync::oneshot; + +mod builder; +pub mod chain_spec; +mod domains; + +pub use builder::*; +pub use domains::builder::{DomainConfig, DomainConfigBuilder}; +pub use domains::domain::Domain; +pub use subspace_runtime::RuntimeEvent as Event; +use tracing::Instrument; + +use crate::domains::builder::ConsensusNodeLink; + +/// Events from subspace pallet +pub type SubspaceEvent = pallet_subspace::Event; + +/// Events from subspace pallet +pub type RewardsEvent = pallet_rewards::Event; + +const SEGMENT_HEADERS_NUMBER_LIMIT: u64 = MAX_SEGMENT_HEADERS_PER_REQUEST as u64; + +fn pot_external_entropy( + consensus_chain_config: &Configuration, + config_pot_external_entropy: Option>, +) -> Result, sc_service::Error> { + let maybe_chain_spec_pot_external_entropy = consensus_chain_config + .chain_spec + .properties() + .get("potExternalEntropy") + .map(|d| serde_json::from_value(d.clone())) + .transpose() + .map_err(|error| { + sc_service::Error::Other(format!("Failed to decode PoT initial key: {error:?}")) + })? + .flatten(); + if maybe_chain_spec_pot_external_entropy.is_some() + && config_pot_external_entropy.is_some() + && maybe_chain_spec_pot_external_entropy != config_pot_external_entropy + { + tracing::warn!( + "--pot-external-entropy CLI argument was ignored due to chain spec having a different \ + explicit value" + ); + } + Ok(maybe_chain_spec_pot_external_entropy.or(config_pot_external_entropy).unwrap_or_default()) +} + +impl Config { + /// Start a node with supplied parameters + pub async fn build( + self, + directory: impl AsRef, + chain_spec: ChainSpec, + ) -> anyhow::Result> { + let Self { + base, + mut dsn, + sync_from_dsn, + storage_monitor, + enable_subspace_block_relay, + is_timekeeper, + timekeeper_cpu_cores, + pot_external_entropy: config_pot_external_entropy, + .. + } = self; + + let base = base.configuration(directory.as_ref(), chain_spec.clone()).await; + let name = base.network.node_name.clone(); + let database_source = base.database.clone(); + + let partial_components = + subspace_service::new_partial::( + &base, + &pot_external_entropy(&base, config_pot_external_entropy) + .context("Failed to get proof of time external entropy")?, + ) + .context("Failed to build a partial subspace node")?; + + let (subspace_networking, dsn, mut runner) = { + let keypair = { + let keypair = base + .network + .node_key + .clone() + .into_keypair() + .context("Failed to convert network keypair")? + .to_protobuf_encoding() + .context("Failed to convert network keypair")?; + + subspace_networking::libp2p::identity::Keypair::from_protobuf_encoding(&keypair) + .expect("Address is correct") + }; + + let chain_spec_boot_nodes = base + .chain_spec + .properties() + .get("dsnBootstrapNodes") + .cloned() + .map(serde_json::from_value::>) + .transpose() + .context("Failed to decode DSN bootsrap nodes")? + .unwrap_or_default(); + + tracing::trace!("Subspace networking starting."); + + dsn.boot_nodes.extend(chain_spec_boot_nodes); + let bootstrap_nodes = + dsn.boot_nodes.clone().into_iter().map(Into::into).collect::>(); + + let segment_header_store = partial_components.other.segment_headers_store.clone(); + + let is_metrics_enabled = base.prometheus_config.is_some(); + + let (dsn, runner, metrics_registry) = dsn.build_dsn(DsnOptions { + client: partial_components.client.clone(), + keypair, + base_path: directory.as_ref().to_path_buf(), + get_piece_by_index: get_piece_by_index::, + get_segment_header_by_segment_indexes, + segment_header_store, + is_metrics_enabled, + })?; + + tracing::debug!("Subspace networking initialized: Node ID is {}", dsn.node.id()); + + ( + subspace_service::SubspaceNetworking::Reuse { + node: dsn.node.clone(), + bootstrap_nodes, + metrics_registry, + }, + dsn, + runner, + ) + }; + + let chain_spec_domains_bootstrap_nodes_map: serde_json::map::Map< + String, + serde_json::Value, + > = base + .chain_spec + .properties() + .get("domainsBootstrapNodes") + .map(|d| serde_json::from_value(d.clone())) + .transpose() + .map_err(|error| { + sc_service::Error::Other(format!( + "Failed to decode Domains bootstrap nodes: {error:?}" + )) + })? + .unwrap_or_default(); + + let consensus_state_pruning_mode = base.state_pruning.clone().unwrap_or_default(); + + // Default value are used for many of parameters + let configuration = SubspaceConfiguration { + base, + force_new_slot_notifications: false, + subspace_networking, + sync_from_dsn, + enable_subspace_block_relay, + is_timekeeper, + timekeeper_cpu_cores, + }; + + let node_runner_future = subspace_farmer::utils::run_future_in_dedicated_thread( + move || async move { + runner.run().await; + tracing::error!("Exited from node runner future"); + }, + format!("sdk-networking-{name}"), + ) + .context("Failed to run node runner future")?; + + let slot_proportion = sc_consensus_slots::SlotProportion::new(3f32 / 4f32); + let full_client = subspace_service::new_full::( + configuration, + partial_components, + true, + slot_proportion, + ) + .await + .context("Failed to build a full subspace node")?; + + let NewFull { + mut task_manager, + client, + rpc_handlers, + network_starter, + sync_service, + network_service, + + backend: _, + select_chain: _, + reward_signing_notification_stream: _, + archived_segment_notification_stream: _, + transaction_pool, + block_importing_notification_stream, + new_slot_notification_stream, + } = full_client; + + if let Some(storage_monitor) = storage_monitor { + sc_storage_monitor::StorageMonitorService::try_spawn( + storage_monitor.into(), + database_source, + &task_manager.spawn_essential_handle(), + ) + .context("Failed to start storage monitor")?; + } + + let mut destructors = DestructorSet::new("node-destructors"); + + let mut maybe_domain = None; + if let Some(domain_config) = self.domain { + let base_directory = directory.as_ref().to_owned().clone(); + + let chain_spec_domains_bootstrap_nodes = chain_spec_domains_bootstrap_nodes_map + .get(&format!("{}", domain_config.domain_id)) + .map(|d| serde_json::from_value(d.clone())) + .transpose() + .map_err(|error| { + sc_service::Error::Other(format!( + "Failed to decode Domain: {} bootstrap nodes: {error:?}", + domain_config.domain_id + )) + })? + .unwrap_or_default(); + + let mut xdm_gossip_worker_builder = GossipWorkerBuilder::new(); + + let relayer_worker = + domain_client_message_relayer::worker::relay_consensus_chain_messages( + client.clone(), + consensus_state_pruning_mode, + sync_service.clone(), + xdm_gossip_worker_builder.gossip_msg_sink(), + ); + + task_manager.spawn_essential_handle().spawn_essential_blocking( + "consensus-chain-relayer", + None, + Box::pin(relayer_worker), + ); + + let (consensus_msg_sink, consensus_msg_receiver) = + tracing_unbounded("consensus_message_channel", 100); + + // Start cross domain message listener for Consensus chain to receive messages + // from domains in the network + let consensus_listener = + cross_domain_message_gossip::start_cross_chain_message_listener( + ChainId::Consensus, + client.clone(), + transaction_pool.clone(), + network_service.clone(), + consensus_msg_receiver, + ); + + task_manager.spawn_essential_handle().spawn_essential_blocking( + "consensus-message-listener", + None, + Box::pin(consensus_listener), + ); + + xdm_gossip_worker_builder + .push_chain_tx_pool_sink(ChainId::Consensus, consensus_msg_sink); + + let (domain_message_sink, domain_message_receiver) = + tracing_unbounded("domain_message_channel", 100); + + xdm_gossip_worker_builder.push_chain_tx_pool_sink( + ChainId::Domain(domain_config.domain_id), + domain_message_sink, + ); + + let domain = domain_config + .build( + base_directory, + ConsensusNodeLink { + consensus_network: network_service.clone(), + consensus_client: client.clone(), + block_importing_notification_stream: block_importing_notification_stream + .clone(), + new_slot_notification_stream: new_slot_notification_stream.clone(), + consensus_sync_service: sync_service.clone(), + consensus_transaction_pool: transaction_pool.clone(), + gossip_message_sink: xdm_gossip_worker_builder.gossip_msg_sink(), + domain_message_receiver, + chain_spec_domains_bootstrap_nodes, + }, + ) + .await?; + + let cross_domain_message_gossip_worker = xdm_gossip_worker_builder + .build::(network_service.clone(), sync_service.clone()); + + task_manager.spawn_essential_handle().spawn_essential_blocking( + "cross-domain-gossip-message-worker", + None, + Box::pin(cross_domain_message_gossip_worker.run()), + ); + + maybe_domain = Some(domain); + } + + let (task_manager_drop_sender, task_manager_drop_receiver) = oneshot::channel(); + let (task_manager_result_sender, task_manager_result_receiver) = oneshot::channel(); + let task_manager_join_handle = sdk_utils::task_spawn( + format!("sdk-node-{name}-task-manager"), + { + async move { + futures::select! { + _ = task_manager_drop_receiver.fuse() => { + let _ = task_manager_result_sender.send(Ok(TaskOutput::Cancelled("received drop signal for task manager".into()))); + }, + result = task_manager.future().fuse() => { + let _ = task_manager_result_sender.send(result.map_err(anyhow::Error::new).map(TaskOutput::Value)); + } + _ = node_runner_future.fuse() => { + let _ = task_manager_result_sender.send(Ok(TaskOutput::Value(()))); + } + } + } + }, + ); + + destructors.add_async_destructor({ + async move { + let _ = task_manager_drop_sender.send(()); + task_manager_join_handle.await.expect("joining should not fail; qed"); + } + })?; + + let rpc_handle = sdk_utils::Rpc::new(&rpc_handlers); + network_starter.start_network(); + + // Disable proper exit for now. Because RPC server looses waker and can't exit + // in background. + // + // drop_collection.defer(move || { + // const BUSY_WAIT_INTERVAL: Duration = Duration::from_millis(100); + // + // // Busy wait till backend exits + // // TODO: is it the only wait to check that substrate node exited? + // while Arc::strong_count(&backend) != 1 { + // std::thread::sleep(BUSY_WAIT_INTERVAL); + // } + // }); + + tracing::debug!("Started node"); + + Ok(Node { + client, + network_service, + sync_service, + name, + rpc_handle, + dsn, + _destructors: destructors, + _farmer: Default::default(), + task_manager_result_receiver, + maybe_domain, + }) + } +} + +/// Executor dispatch for subspace runtime +pub struct ExecutorDispatch; + +impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + // /// Only enable the benchmarking host functions when we actually want to + // benchmark. #[cfg(feature = "runtime-benchmarks")] + // type ExtendHostFunctions = ( + // frame_benchmarking::benchmarking::HostFunctions, + // sp_consensus_subspace::consensus::HostFunctions, + // ) + // /// Otherwise we only use the default Substrate host functions. + // #[cfg(not(feature = "runtime-benchmarks"))] + type ExtendHostFunctions = + (sp_consensus_subspace::consensus::HostFunctions, sp_domains_fraud_proof::HostFunctions); + + fn dispatch(method: &str, data: &[u8]) -> Option> { + subspace_runtime::api::dispatch(method, data) + } + + fn native_version() -> sc_executor::NativeVersion { + subspace_runtime::native_version() + } +} + +/// Chain spec for subspace node +pub type ChainSpec = chain_spec::ChainSpec; +pub(crate) type FullClient = + subspace_service::FullClient; +pub(crate) type NewFull = subspace_service::NewFull; + +/// Node structure +#[derive(Derivative)] +#[derivative(Debug)] +#[must_use = "Node should be closed"] +pub struct Node { + #[derivative(Debug = "ignore")] + client: Arc, + #[derivative(Debug = "ignore")] + sync_service: Arc>, + #[derivative(Debug = "ignore")] + network_service: Arc>, + rpc_handle: sdk_utils::Rpc, + name: String, + dsn: DsnShared, + #[derivative(Debug = "ignore")] + _destructors: DestructorSet, + #[derivative(Debug = "ignore")] + _farmer: std::marker::PhantomData, + #[derivative(Debug = "ignore")] + task_manager_result_receiver: oneshot::Receiver>>, + #[derivative(Debug = "ignore")] + maybe_domain: Option, +} + +impl sdk_traits::Node for Node { + type Client = FullClient; + type Rpc = sdk_utils::Rpc; + type Table = F::Table; + + fn name(&self) -> &str { + &self.name + } + + fn dsn(&self) -> &DsnShared { + &self.dsn + } + + fn rpc(&self) -> &Self::Rpc { + &self.rpc_handle + } +} + +/// Hash type +pub type Hash = ::Hash; +/// Block number +pub type BlockNumber = BlockNumberFor; + +/// Chain info +#[derive(Debug, Clone)] +#[non_exhaustive] +pub struct ChainInfo { + /// Genesis hash of chain + pub genesis_hash: Hash, +} + +/// Node state info +#[derive(Debug, Clone)] +#[non_exhaustive] +pub struct Info { + /// Chain info + pub chain: ChainInfo, + /// Best block hash and number + pub best_block: (Hash, BlockNumber), + /// Finalized block hash and number + pub finalized_block: (Hash, BlockNumber), + /// Block gap which we need to sync + pub block_gap: Option>, + /// Runtime version + pub version: sp_version::RuntimeVersion, + /// Node telemetry name + pub name: String, + /// Number of peers connected to our node + pub connected_peers: u64, + /// Number of nodes that we know of but that we're not connected to + pub not_connected_peers: u64, + /// Total number of pieces stored on chain + pub history_size: HistorySize, +} + +/// New block notification +#[derive(Debug, Clone)] +#[non_exhaustive] +pub struct BlockHeader { + /// Block hash + pub hash: Hash, + /// Block number + pub number: BlockNumber, + /// Parent block hash + pub parent_hash: Hash, + /// Block state root + pub state_root: Hash, + /// Extrinsics root + pub extrinsics_root: Hash, + /// Block pre digest + pub pre_digest: Option>, +} + +impl From
for BlockHeader { + fn from(header: Header) -> Self { + let hash = header.hash(); + let Header { number, parent_hash, state_root, extrinsics_root, digest } = header; + let pre_digest = digest + .log(|it| if let DigestItem::PreRuntime(_, digest) = it { Some(digest) } else { None }) + .map(|pre_digest| { + parity_scale_codec::Decode::decode(&mut pre_digest.as_ref()) + .expect("Pre digest is always scale encoded") + }); + Self { hash, number, parent_hash, state_root, extrinsics_root, pre_digest } + } +} + +/// Syncing status +#[derive(Clone, Copy, Debug)] +pub enum SyncStatus { + /// Importing some block + Importing, + /// Downloading some block + Downloading, +} + +/// Current syncing progress +#[derive(Clone, Copy, Debug)] +pub struct SyncingProgress { + /// Imported this much blocks + pub at: BlockNumber, + /// Number of total blocks + pub target: BlockNumber, + /// Current syncing status + pub status: SyncStatus, +} + +#[pin_project::pin_project] +struct SyncingProgressStream { + #[pin] + inner: S, + at: BlockNumber, + target: BlockNumber, +} + +impl>> Stream for SyncingProgressStream { + type Item = Result; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let this = self.project(); + let next = this.inner.poll_next(cx); + if let std::task::Poll::Ready(Some(Ok(SyncingProgress { at, target, .. }))) = next { + *this.at = at; + *this.target = target; + } + next + } + + fn size_hint(&self) -> (usize, Option) { + (self.at as _, Some(self.target as _)) + } +} + +impl Node { + /// New node builder + pub fn builder() -> Builder { + Builder::new() + } + + /// Development configuration + pub fn dev() -> Builder { + Builder::dev() + } + + /// Gemini 3g configuration + pub fn gemini_3g() -> Builder { + Builder::gemini_3g() + } + + /// Devnet configuration + pub fn devnet() -> Builder { + Builder::devnet() + } + + /// Get listening addresses of the node + pub async fn listen_addresses(&self) -> anyhow::Result> { + let peer_id = self.network_service.local_peer_id(); + self.network_service + .network_state() + .await + .map(|state| { + state + .listened_addresses + .into_iter() + .map(|multiaddr| MultiaddrWithPeerId::new(multiaddr, peer_id)) + .collect() + }) + .map_err(|()| anyhow::anyhow!("Network worker exited")) + } + + /// Get listening addresses of the node + pub async fn dsn_listen_addresses(&self) -> anyhow::Result> { + let peer_id = + self.dsn.node.id().to_string().parse().expect("Conversion between 2 libp2p versions"); + Ok(self + .dsn + .node + .listeners() + .into_iter() + .map(|multiaddr| MultiaddrWithPeerId::new(multiaddr, peer_id)) + .collect()) + } + + /// Subscribe for node syncing progress + pub async fn subscribe_syncing_progress( + &self, + ) -> anyhow::Result> + Send + Unpin + 'static> + { + const CHECK_SYNCED_EVERY: Duration = Duration::from_millis(100); + let check_offline_backoff = backoff::ExponentialBackoffBuilder::new() + .with_max_elapsed_time(Some(Duration::from_secs(60))) + .build(); + let check_synced_backoff = backoff::ExponentialBackoffBuilder::new() + .with_initial_interval(Duration::from_secs(1)) + .with_max_elapsed_time(Some(Duration::from_secs(10 * 60))) + .build(); + + backoff::future::retry(check_offline_backoff, || { + futures::future::ready(if self.sync_service.is_offline() { + Err(backoff::Error::transient(())) + } else { + Ok(()) + }) + }) + .await + .map_err(|_| anyhow::anyhow!("Failed to connect to the network"))?; + + let (sender, receiver) = tokio::sync::mpsc::channel(10); + let inner = tokio_stream::wrappers::ReceiverStream::new(receiver); + + let result = backoff::future::retry(check_synced_backoff.clone(), || { + self.sync_service.status().map(|result| match result.map(|status| status.state) { + Ok(SyncState::Importing { target }) => Ok((target, SyncStatus::Importing)), + Ok(SyncState::Downloading { target }) => Ok((target, SyncStatus::Downloading)), + _ if self.sync_service.is_offline() => + Err(backoff::Error::transient(Some(anyhow::anyhow!("Node went offline")))), + Err(()) => Err(backoff::Error::transient(Some(anyhow::anyhow!( + "Failed to fetch networking status" + )))), + Ok(SyncState::Idle | SyncState::Pending) => Err(backoff::Error::transient(None)), + }) + }) + .await; + + let (target, status) = match result { + Ok(result) => result, + Err(Some(err)) => return Err(err), + // We are idle for quite some time + Err(None) => return Ok(SyncingProgressStream { inner, at: 0, target: 0 }), + }; + + let at = self.client.chain_info().best_number; + sender + .send(Ok(SyncingProgress { target, at, status })) + .await + .expect("We are holding receiver, so it will never panic"); + + tokio::spawn({ + let sync = Arc::clone(&self.sync_service); + let client = Arc::clone(&self.client); + async move { + loop { + tokio::time::sleep(CHECK_SYNCED_EVERY).await; + + let result = backoff::future::retry(check_synced_backoff.clone(), || { + sync.status().map(|result| match result.map(|status| status.state) { + Ok(SyncState::Importing { target }) => + Ok(Ok((target, SyncStatus::Importing))), + Ok(SyncState::Downloading { target }) => + Ok(Ok((target, SyncStatus::Downloading))), + Err(()) => + Ok(Err(anyhow::anyhow!("Failed to fetch networking status"))), + Ok(SyncState::Idle | SyncState::Pending) => + Err(backoff::Error::transient(())), + }) + }) + .await; + let Ok(result) = result else { break }; + + if sender + .send(result.map(|(target, status)| SyncingProgress { + target, + at: client.chain_info().best_number, + status, + })) + .await + .is_err() + { + break; + } + } + } + }); + + Ok(SyncingProgressStream { inner, at, target }) + } + + /// Wait till the end of node syncing + pub async fn sync(&self) -> anyhow::Result<()> { + self.subscribe_syncing_progress().await?.for_each(|_| async move {}).await; + Ok(()) + } + + /// Leaves the network and gracefully shuts down + pub async fn close(self) -> anyhow::Result<()> { + if let Some(domain) = self.maybe_domain { + domain.close().await?; + } + self._destructors.async_drop().await?; + let output = self.task_manager_result_receiver.await??; + match output { + TaskOutput::Value(_) => {} + TaskOutput::Cancelled(reason) => { + tracing::warn!("node task manager was cancelled due to reason: {}", reason); + } + } + Ok(()) + } + + /// Tells if the node was closed + pub async fn is_closed(&self) -> bool { + self._destructors.already_ran() + } + + /// Runs `.close()` and also wipes node's state + pub async fn wipe(path: impl AsRef) -> io::Result<()> { + tokio::fs::remove_dir_all(path).await + } + + /// Get node info + pub async fn get_info(&self) -> anyhow::Result { + let NetworkState { connected_peers, not_connected_peers, .. } = self + .network_service + .network_state() + .await + .map_err(|()| anyhow::anyhow!("Failed to fetch node info: node already exited"))?; + let sp_blockchain::Info { + best_hash, + best_number, + genesis_hash, + finalized_hash, + finalized_number, + block_gap, + .. + } = self.client.chain_info(); + let version = self.rpc_handle.runtime_version(Some(best_hash)).await?; + let FarmerProtocolInfo { history_size, .. } = + self.rpc_handle.farmer_app_info().await.map_err(anyhow::Error::msg)?.protocol_info; + Ok(Info { + chain: ChainInfo { genesis_hash }, + best_block: (best_hash, best_number), + finalized_block: (finalized_hash, finalized_number), + block_gap: block_gap.map(|(from, to)| from..to), + version, + name: self.name.clone(), + connected_peers: connected_peers.len() as u64, + not_connected_peers: not_connected_peers.len() as u64, + history_size, + }) + } + + /// Get block hash by block number + pub fn block_hash(&self, number: BlockNumber) -> anyhow::Result> { + use sc_client_api::client::BlockBackend; + + self.client.block_hash(number).context("Failed to get primary node block hash by number") + } + + /// Get block header by hash + pub fn block_header(&self, hash: Hash) -> anyhow::Result> { + self.client + .header(hash) + .context("Failed to get primary node block hash by number") + .map(|opt| opt.map(Into::into)) + } + + /// Subscribe to new heads imported + pub async fn subscribe_new_heads( + &self, + ) -> anyhow::Result + Send + Sync + Unpin + 'static> { + Ok(self + .rpc_handle + .subscribe_new_heads::() + .await + .context("Failed to subscribe to new blocks")? + .map(Into::into)) + } + + /// Subscribe to finalized heads + pub async fn subscribe_finalized_heads( + &self, + ) -> anyhow::Result + Send + Sync + Unpin + 'static> { + Ok(self + .rpc_handle + .subscribe_finalized_heads::() + .await + .context("Failed to subscribe to finalized blocks")? + .map(Into::into)) + } + + /// Get events at some block or at tip of the chain + pub async fn get_events(&self, block: Option) -> anyhow::Result> { + Ok(self + .rpc_handle + .get_events::(block) + .await? + .into_iter() + .map(|event_record| event_record.event) + .collect()) + } +} + +fn get_segment_header_by_segment_indexes( + req: &SegmentHeaderRequest, + segment_headers_store: &SegmentHeadersStore, +) -> Option { + let segment_indexes = match req { + SegmentHeaderRequest::SegmentIndexes { segment_indexes } => segment_indexes.clone(), + SegmentHeaderRequest::LastSegmentHeaders { segment_header_number } => { + let mut segment_headers_limit = *segment_header_number; + if *segment_header_number > SEGMENT_HEADERS_NUMBER_LIMIT { + tracing::debug!(%segment_header_number, "Segment header number exceeded the limit."); + + segment_headers_limit = SEGMENT_HEADERS_NUMBER_LIMIT; + } + + // Currently segment_headers_store.max_segment_index returns None if only + // genesis block is archived To maintain parity with monorepo + // implementation we are returning SegmentIndex::ZERO in that case. + let max_segment_index = + segment_headers_store.max_segment_index().unwrap_or(SegmentIndex::ZERO); + (SegmentIndex::ZERO..=max_segment_index) + .rev() + .take(segment_headers_limit as usize) + .collect::>() + } + }; + + let maybe_segment_headers = segment_indexes + .iter() + .map(|segment_index| segment_headers_store.get_segment_header(*segment_index)) + .collect::>>(); + + match maybe_segment_headers { + Some(segment_headers) => Some(SegmentHeaderResponse { segment_headers }), + None => { + tracing::error!("Segment header collection contained empty segment headers."); + None + } + } +} + +fn get_piece_by_index( + &PieceByIndexRequest { piece_index }: &PieceByIndexRequest, + weak_readers_and_pieces: std::sync::Weak< + parking_lot::Mutex>, + >, + farmer_piece_cache: Arc>>, +) -> impl std::future::Future> { + async move { + // Have to clone due to RAII guard is not `Send`, no impact on + // behaviour/performance as `FarmerPieceCache` uses `Arc` and + // `mpsc::Sender` underneath. + let maybe_farmer_piece_cache = farmer_piece_cache.read().clone(); + if let Some(farmer_piece_cache) = maybe_farmer_piece_cache { + let piece = + F::get_piece_by_index(piece_index, &farmer_piece_cache, &weak_readers_and_pieces) + .await; + Some(PieceByIndexResponse { piece }) + } else { + None + } + } + .in_current_span() +} diff --git a/sdk/subspace-sdk/Cargo.toml b/sdk/subspace-sdk/Cargo.toml new file mode 100644 index 00000000..e7eb6f8b --- /dev/null +++ b/sdk/subspace-sdk/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "subspace-sdk" +version = "0.1.0" +edition = "2021" + +[dependencies] +sdk-dsn = { path = "../dsn" } +sdk-farmer = { path = "../farmer", default-features = false } +sdk-node = { path = "../node" } +sdk-substrate = { path = "../substrate" } +sdk-utils = { path = "../utils" } +static_assertions = "1.1.0" + +subspace-proof-of-space = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } + +[dev-dependencies] +anyhow = "1" +clap = { version = "4", features = ["derive"] } +derive_builder = "0.12" +derive_more = "0.99" +fdlimit = "0.2" +futures = "0.3" +mimalloc = { version = "*", default-features = false } +serde_json = "1" +subspace-farmer-components = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +tempfile = "3" +tokio = { version = "1.34.0", features = ["rt-multi-thread", "macros"] } +tracing = "0.1" +tracing-futures = "0.2" +tracing-subscriber = "0.3" + + +[target.'cfg(tokio_unstable)'.dev-dependencies] +console-subscriber = "0.1" + +[features] +default = ["numa"] +numa = [ + "sdk-farmer/numa", +] diff --git a/sdk/subspace-sdk/examples/complete.rs b/sdk/subspace-sdk/examples/complete.rs new file mode 100644 index 00000000..5c39b86d --- /dev/null +++ b/sdk/subspace-sdk/examples/complete.rs @@ -0,0 +1,84 @@ +use std::num::NonZeroU8; + +use futures::StreamExt; +use subspace_sdk::node::NetworkBuilder; +use subspace_sdk::{chain_spec, node, ByteSize, FarmDescription, Farmer, Node, PublicKey}; + +#[tokio::main] +async fn main() { + let plots = [FarmDescription::new("plot", ByteSize::gb(10))]; + let node: Node = Node::builder() + .blocks_pruning(node::BlocksPruning::Some(1000)) + .state_pruning(node::PruningMode::ArchiveCanonical) + .network(NetworkBuilder::new().name("i1i1")) + .build("node", chain_spec::dev_config()) + .await + .expect("Failed to init a node"); + + node.sync().await.unwrap(); + + let reward_address = PublicKey::from([0; 32]); + let farmer: Farmer = Farmer::builder() + // .ws_rpc("127.0.0.1:9955".parse().unwrap()) + // .listen_on("/ip4/0.0.0.0/tcp/40333".parse().unwrap()) + .build( + reward_address, + &node, + &plots, + NonZeroU8::new(1).expect("Static value should not fail; qed"), + ) + .await + .expect("Failed to init a farmer"); + + tokio::spawn({ + let mut solutions = + farmer.iter_farms().await.next().unwrap().subscribe_new_solutions().await; + async move { + while let Some(solution) = solutions.next().await { + eprintln!("Found solution: {solution:?}"); + } + } + }); + tokio::spawn({ + let mut new_blocks = node.subscribe_new_heads().await.unwrap(); + async move { + while let Some(block) = new_blocks.next().await { + eprintln!("New block: {block:?}"); + } + } + }); + + dbg!(node.get_info().await.unwrap()); + dbg!(farmer.get_info().await.unwrap()); + + farmer.close().await.unwrap(); + node.close().await.unwrap(); + + // Restarting + let node = Node::builder() + .blocks_pruning(node::BlocksPruning::Some(1000)) + .state_pruning(node::PruningMode::ArchiveCanonical) + .build("node", chain_spec::dev_config()) + .await + .expect("Failed to init a node"); + node.sync().await.unwrap(); + + let farmer = Farmer::builder() + .build( + reward_address, + &node, + &[FarmDescription::new("plot", ByteSize::gb(10))], + NonZeroU8::new(1).expect("Static value should not fail; qed"), + ) + .await + .expect("Failed to init a farmer"); + + farmer.close().await.unwrap(); + node.close().await.unwrap(); + + // Delete everything + for plot in plots { + plot.wipe().await.unwrap(); + } + Node::wipe("node").await.unwrap(); +} diff --git a/sdk/subspace-sdk/examples/mini-farmer.rs b/sdk/subspace-sdk/examples/mini-farmer.rs new file mode 100644 index 00000000..c2418311 --- /dev/null +++ b/sdk/subspace-sdk/examples/mini-farmer.rs @@ -0,0 +1,200 @@ +use std::num::NonZeroU8; +use std::path::PathBuf; + +use anyhow::Context; +use clap::{Parser, ValueEnum}; +use futures::prelude::*; +use subspace_sdk::node::{self, Event, Node, RewardsEvent, SubspaceEvent}; +use subspace_sdk::{ByteSize, FarmDescription, Farmer, PublicKey}; +use tracing_subscriber::prelude::*; + +#[global_allocator] +static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; + +#[derive(ValueEnum, Debug, Clone)] +enum Chain { + Gemini3f, + Devnet, + Dev, +} + +/// Mini farmer +#[derive(Parser, Debug)] +#[command(author, version, about)] +pub struct Args { + /// Set the chain + #[arg(value_enum)] + chain: Chain, + #[cfg(feature = "executor")] + /// Run executor with specified domain + #[arg(short, long)] + executor: bool, + /// Address for farming rewards + #[arg(short, long)] + reward_address: PublicKey, + /// Path for all data + #[arg(short, long)] + base_path: Option, + /// Size of the plot + #[arg(short, long)] + plot_size: ByteSize, + /// Cache size + #[arg(short, long, default_value_t = ByteSize::gib(1))] + cache_size: ByteSize, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + fdlimit::raise_fd_limit(); + + #[cfg(tokio_unstable)] + let registry = tracing_subscriber::registry().with(console_subscriber::spawn()); + #[cfg(not(tokio_unstable))] + let registry = tracing_subscriber::registry(); + + registry + .with(tracing_subscriber::fmt::layer()) + .with( + tracing_subscriber::EnvFilter::from_default_env() + .add_directive("info".parse().unwrap()), + ) + .init(); + + let Args { + chain, + #[cfg(feature = "executor")] + executor, + reward_address, + base_path, + plot_size, + cache_size: _, + } = Args::parse(); + let (base_path, _tmp_dir) = base_path.map(|x| (x, None)).unwrap_or_else(|| { + let tmp = tempfile::tempdir().expect("Failed to create temporary directory"); + (tmp.as_ref().to_owned(), Some(tmp)) + }); + + let node_dir = base_path.join("node"); + let node = match chain { + Chain::Gemini3f => Node::gemini_3g().dsn( + subspace_sdk::node::DsnBuilder::gemini_3g() + .provider_storage_path(node_dir.join("provider_storage")), + ), + Chain::Devnet => Node::devnet().dsn( + subspace_sdk::node::DsnBuilder::devnet() + .provider_storage_path(node_dir.join("provider_storage")), + ), + Chain::Dev => Node::dev().dsn( + subspace_sdk::node::DsnBuilder::dev() + .provider_storage_path(node_dir.join("provider_storage")), + ), + } + .role(node::Role::Authority); + + #[cfg(feature = "executor")] + let node = if executor { + node.system_domain( + node::domains::ConfigBuilder::new() + .rpc(subspace_sdk::node::RpcBuilder::new().addr("127.0.0.1:9990".parse().unwrap())) + .role(node::Role::Authority), + ) + } else { + node + }; + + let node = node + .build( + &node_dir, + match chain { + Chain::Gemini3f => node::chain_spec::gemini_3g(), + Chain::Devnet => node::chain_spec::devnet_config(), + Chain::Dev => node::chain_spec::dev_config(), + }, + ) + .await?; + + let sync = if !matches!(chain, Chain::Dev) { + futures::future::Either::Left(node.sync()) + } else { + futures::future::Either::Right(futures::future::ok(())) + }; + + tokio::select! { + result = sync => result?, + _ = tokio::signal::ctrl_c() => { + tracing::error!("Exitting..."); + return node.close().await.context("Failed to close node") + } + } + tracing::error!("Node was synced!"); + + let farmer = Farmer::builder() + .build( + reward_address, + &node, + &[FarmDescription::new(base_path.join("plot"), plot_size)], + NonZeroU8::new(1).expect("static value should not fail; qed"), + ) + .await?; + + tokio::spawn({ + let initial_plotting = + farmer.iter_farms().await.next().unwrap().subscribe_initial_plotting_progress().await; + async move { + initial_plotting + .for_each(|progress| async move { + tracing::error!(?progress, "Plotting!"); + }) + .await; + tracing::error!("Finished initial plotting!"); + } + }); + + let rewards_sub = { + let node = &node; + + async move { + let mut new_blocks = node.subscribe_finalized_heads().await?; + while let Some(header) = new_blocks.next().await { + let events = node.get_events(Some(header.hash)).await?; + + for event in events { + match event { + Event::Rewards( + RewardsEvent::VoteReward { reward, voter: author } + | RewardsEvent::BlockReward { reward, block_author: author }, + ) if author == reward_address.into() => + tracing::error!(%reward, "Received a reward!"), + Event::Subspace(SubspaceEvent::FarmerVote { + reward_address: author, + height: block_number, + .. + }) if author == reward_address.into() => + tracing::error!(block_number, "Vote counted for block"), + _ => (), + }; + } + + if let Some(pre_digest) = header.pre_digest { + if pre_digest.solution().reward_address == reward_address { + tracing::error!("We authored a block"); + } + } + } + + anyhow::Ok(()) + } + }; + + tokio::select! { + _ = rewards_sub => {}, + _ = tokio::signal::ctrl_c() => { + tracing::error!("Exitting..."); + } + } + + node.close().await.context("Failed to close node")?; + farmer.close().await.context("Failed to close farmer")?; + + Ok(()) +} diff --git a/sdk/subspace-sdk/examples/simple.rs b/sdk/subspace-sdk/examples/simple.rs new file mode 100644 index 00000000..a5e2a89b --- /dev/null +++ b/sdk/subspace-sdk/examples/simple.rs @@ -0,0 +1,40 @@ +use std::num::NonZeroU8; + +use futures::prelude::*; + +#[tokio::main] +async fn main() { + tracing_subscriber::fmt().init(); + let plots = [subspace_sdk::FarmDescription::new("plot", subspace_sdk::ByteSize::mb(100))]; + let node = subspace_sdk::Node::builder() + .force_authoring(true) + .role(subspace_sdk::node::Role::Authority) + // Starting a new chain + .build("node", subspace_sdk::chain_spec::dev_config()) + .await + .unwrap(); + + let farmer = subspace_sdk::Farmer::builder() + .build( + subspace_sdk::PublicKey::from([0; 32]), + &node, + &plots, + NonZeroU8::new(1).expect("Static value should not fail; qed"), + ) + .await + .expect("Failed to init a farmer"); + + for plot in farmer.iter_farms().await { + let mut plotting_progress = plot.subscribe_initial_plotting_progress().await; + while plotting_progress.next().await.is_some() {} + } + tracing::info!("Initial plotting completed"); + + node.subscribe_new_heads() + .await + .unwrap() + // Wait 10 blocks and exit + .take(10) + .for_each(|header| async move { tracing::info!(?header, "New block!") }) + .await; +} diff --git a/sdk/subspace-sdk/examples/sync.rs b/sdk/subspace-sdk/examples/sync.rs new file mode 100644 index 00000000..8ba08646 --- /dev/null +++ b/sdk/subspace-sdk/examples/sync.rs @@ -0,0 +1,108 @@ +use std::num::NonZeroU8; +use std::path::PathBuf; + +use clap::Parser; +use futures::stream::StreamExt; +use subspace_sdk::node::NetworkBuilder; +use subspace_sdk::{ + chain_spec, ByteSize, FarmDescription, Farmer, MultiaddrWithPeerId, Node, PublicKey, +}; +use tempfile::TempDir; + +#[derive(clap::Parser, Debug)] +enum Args { + Farm { + /// Path to the plot + #[arg(short, long)] + plot: PathBuf, + + /// Size of the plot + #[arg(long)] + plot_size: ByteSize, + + /// Path to the node directory + #[arg(short, long)] + node: PathBuf, + + /// Path to the chain spec + #[arg(short, long)] + spec: PathBuf, + }, + Sync { + /// Bootstrap nodes + #[arg(short, long)] + boot_nodes: Vec, + + /// Path to the chain spec + #[arg(short, long)] + spec: PathBuf, + }, + GenerateSpec { + path: PathBuf, + }, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_subscriber::fmt().init(); + + let args = Args::parse(); + match args { + Args::GenerateSpec { path } => + tokio::fs::write(path, serde_json::to_string_pretty(&chain_spec::dev_config())?).await?, + Args::Farm { plot, plot_size, node, spec } => { + let chain_spec = serde_json::from_str(&tokio::fs::read_to_string(spec).await?)?; + let (plot_size, _cache_size) = + (ByteSize::b(plot_size.as_u64() * 9 / 10), ByteSize::b(plot_size.as_u64() / 10)); + let plots = [FarmDescription::new(plot.join("plot"), plot_size)]; + + let node = Node::builder() + .network( + NetworkBuilder::new() + .listen_addresses(vec!["/ip4/127.0.0.1/tcp/0".parse().unwrap()]) + .force_synced(true), + ) + .force_authoring(true) + .role(subspace_sdk::node::Role::Authority) + .build(node, chain_spec) + .await?; + + let _farmer: Farmer = Farmer::builder() + .build( + PublicKey::from([13; 32]), + &node, + &plots, + NonZeroU8::new(1).expect("Static value should not fail; qed"), + ) + .await?; + + let addr = node.listen_addresses().await?.into_iter().next().unwrap(); + tracing::info!(%addr, "Node listening at"); + + node.subscribe_new_heads() + .await? + .for_each(|header| async move { tracing::info!(?header, "New block!") }) + .await; + } + Args::Sync { boot_nodes, spec } => { + let node = TempDir::new()?; + let chain_spec = serde_json::from_str(&tokio::fs::read_to_string(spec).await?)?; + let node = Node::builder() + .force_authoring(true) + .role(subspace_sdk::node::Role::Authority) + .network(NetworkBuilder::new().boot_nodes(boot_nodes)) + .build(node.as_ref(), chain_spec) + .await?; + + node.sync().await.unwrap(); + tracing::info!("Node was synced!"); + + node.subscribe_new_heads() + .await? + .for_each(|header| async move { tracing::info!(?header, "New block!") }) + .await; + } + } + + Ok(()) +} diff --git a/sdk/subspace-sdk/src/lib.rs b/sdk/subspace-sdk/src/lib.rs new file mode 100644 index 00000000..55a915ac --- /dev/null +++ b/sdk/subspace-sdk/src/lib.rs @@ -0,0 +1,52 @@ +//! Subspace SDK for easy running of both Subspace node and farmer + +#![warn( + missing_docs, + clippy::dbg_macro, + clippy::unwrap_used, + clippy::disallowed_types, + unused_features +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +/// Module related to the farmer +pub use sdk_farmer::{Builder as FarmerBuilder, FarmDescription, Info as FarmerInfo}; +pub use sdk_node::{chain_spec, Builder as NodeBuilder, Info as NodeInfo}; +pub use sdk_utils::{ByteSize, Multiaddr, MultiaddrWithPeerId, PublicKey, Ss58ParsingError}; +use subspace_proof_of_space::chia::ChiaTable; + +static_assertions::assert_impl_all!(Node: Send, Sync); +static_assertions::assert_impl_all!(Farmer: Send, Sync); +static_assertions::assert_impl_all!(Farm: Send, Sync); + +/// Subspace farmer type +pub type Farmer = sdk_farmer::Farmer; +/// Subspace farmer's plot +pub type Farm = sdk_farmer::Farm; +/// Subspace primary node +pub type Node = sdk_node::Node; + +/// Farmer related things located here +pub mod farmer { + pub use sdk_farmer::FarmDescription; + + pub use super::{Farm, Farmer}; +} + +/// Node related things located here +pub mod node { + pub use sdk_dsn::*; + pub use sdk_node::chain_spec::ChainSpec; + pub use sdk_node::{ + chain_spec, BlockNumber, DomainConfigBuilder, Event, Hash, RewardsEvent, SubspaceEvent, + SyncingProgress, + }; + pub use sdk_substrate::*; + + pub use super::Node; +} + +/// SDK utilities, mainly used by tests +pub mod utils { + pub use sdk_utils::*; +} diff --git a/sdk/subspace-sdk/tests/integration/common.rs b/sdk/subspace-sdk/tests/integration/common.rs new file mode 100644 index 00000000..72c48a69 --- /dev/null +++ b/sdk/subspace-sdk/tests/integration/common.rs @@ -0,0 +1,185 @@ +use std::num::NonZeroU8; +use std::path::PathBuf; +use std::sync::Arc; + +use derive_builder::Builder; +use derive_more::{Deref, DerefMut}; +use subspace_sdk::farmer::FarmDescription; +use subspace_sdk::node::{ + chain_spec, ChainSpec, DomainConfigBuilder, DsnBuilder, NetworkBuilder, Role, +}; +use subspace_sdk::utils::ByteSize; +use subspace_sdk::MultiaddrWithPeerId; +use tempfile::TempDir; +use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::util::SubscriberInitExt; +use tracing_subscriber::Layer; + +pub fn setup() { + #[cfg(tokio_unstable)] + let registry = tracing_subscriber::registry().with(console_subscriber::spawn()); + #[cfg(not(tokio_unstable))] + let registry = tracing_subscriber::registry(); + + let _ = registry + .with( + tracing_subscriber::fmt::layer().with_test_writer().with_filter( + "debug,parity-db=info,cranelift_codegen=info,wasmtime_cranelift=info,\ + subspace_sdk=trace,subspace_farmer=trace,subspace_service=trace,\ + subspace_farmer::utils::parity_db_store=debug,trie-cache=info,\ + wasm_overrides=info,jsonrpsee_core=info,libp2p_gossipsub::behaviour=info,\ + libp2p_core=info,libp2p_tcp=info,multistream_select=info,yamux=info,\ + libp2p_swarm=info,libp2p_ping=info,subspace_networking::node_runner=info,\ + subspace_networking::utils::piece_announcement=info,\ + subspace_farmer::utils::farmer_provider_record_processor=debug,\ + subspace_farmer::utils::farmer_piece_cache=debug,wasmtime_jit=info,\ + wasm-runtime=info" + .parse::() + .expect("Env filter directives are correct"), + ), + ) + .try_init(); +} + +#[derive(Builder)] +#[builder(pattern = "immutable", build_fn(private, name = "_build"), name = "NodeBuilder")] +pub struct InnerNode { + #[builder(default)] + not_force_synced: bool, + #[builder(default)] + boot_nodes: Vec, + #[builder(default)] + dsn_boot_nodes: Vec, + #[builder(default)] + not_authority: bool, + #[builder(default = "chain_spec::dev_config()")] + chain: ChainSpec, + #[builder(default = "TempDir::new().map(Arc::new).unwrap()")] + path: Arc, + #[cfg(feature = "core-payments")] + #[builder(default)] + enable_core: bool, +} + +#[derive(Deref, DerefMut)] +pub struct Node { + #[deref] + #[deref_mut] + node: subspace_sdk::Node, + pub path: Arc, + pub chain: ChainSpec, +} + +impl NodeBuilder { + pub async fn build(self, enable_domains: bool) -> Node { + let InnerNode { + not_force_synced, + boot_nodes, + dsn_boot_nodes, + not_authority, + chain, + path, + #[cfg(feature = "core-payments")] + enable_core, + } = self._build().expect("Infallible"); + let node = subspace_sdk::Node::dev() + .dsn( + DsnBuilder::dev() + .listen_addresses(vec!["/ip4/127.0.0.1/tcp/0".parse().unwrap()]) + .boot_nodes(dsn_boot_nodes), + ) + .network( + NetworkBuilder::dev() + .force_synced(!not_force_synced) + .listen_addresses(vec!["/ip4/127.0.0.1/tcp/0".parse().unwrap()]) + .boot_nodes(boot_nodes), + ) + .role(if not_authority { Role::Full } else { Role::Authority }) + .is_timekeeper(!not_authority); + + let node = if enable_domains { + node.domain(Some(DomainConfigBuilder::dev().configuration())) + } else { + node + }; + + #[cfg(all(feature = "core-payments", feature = "executor"))] + let node = if enable_core { + node.system_domain(subspace_sdk::node::domains::ConfigBuilder::new().core_payments( + subspace_sdk::node::domains::core_payments::ConfigBuilder::new().build(), + )) + } else { + node + }; + + let node = node.build(path.path().join("node"), chain.clone()).await.unwrap(); + + Node { node, path, chain } + } +} + +impl Node { + pub fn dev() -> NodeBuilder { + NodeBuilder::default() + } + + pub fn path(&self) -> Arc { + Arc::clone(&self.path) + } + + pub async fn close(self) { + self.node.close().await.unwrap(); + } +} + +#[derive(Builder)] +#[builder(pattern = "immutable", build_fn(private, name = "_build"), name = "FarmerBuilder")] +pub struct InnerFarmer { + #[builder(default)] + reward_address: subspace_sdk::PublicKey, + #[builder(default = "50")] + pieces_in_sector: u16, +} + +#[derive(Deref, DerefMut)] +pub struct Farmer { + #[deref] + #[deref_mut] + farmer: subspace_sdk::Farmer, + pub path: Arc, +} + +impl FarmerBuilder { + pub async fn build(self, node: &Node, space_pledged: ByteSize) -> Farmer { + let InnerFarmer { reward_address, pieces_in_sector } = self._build().expect("Infallible"); + let farmer = subspace_sdk::Farmer::builder() + .max_pieces_in_sector(Some(pieces_in_sector)) + .build( + reward_address, + &**node, + &[FarmDescription::new( + node.path().path().join("plot"), + // TODO: account for overhead here + space_pledged, + )], + NonZeroU8::new(20).expect("Static value should not fail; qed"), + ) + .await + .unwrap(); + Farmer { farmer, path: node.path() } + } +} + +impl Farmer { + pub fn dev() -> FarmerBuilder { + FarmerBuilder::default() + } + + pub fn plot_dir(&self) -> PathBuf { + self.path.path().join("plot") + } + + pub async fn close(self) { + self.farmer.close().await.unwrap() + } +} diff --git a/sdk/subspace-sdk/tests/integration/domains.rs b/sdk/subspace-sdk/tests/integration/domains.rs new file mode 100644 index 00000000..6461400b --- /dev/null +++ b/sdk/subspace-sdk/tests/integration/domains.rs @@ -0,0 +1,34 @@ +use futures::prelude::*; +use sdk_utils::ByteSize; + +use crate::common::{Farmer, Node}; + +#[tokio::test(flavor = "multi_thread")] +async fn core_start() { + crate::common::setup(); + + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().enable_core(true).build().await; + let farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await; + + node.system_domain() + .unwrap() + .payments() + .unwrap() + .subscribe_new_heads() + .await + .unwrap() + .next() + .await + .unwrap(); + + farmer.close().await; + node.close().await; +} diff --git a/sdk/subspace-sdk/tests/integration/farmer.rs b/sdk/subspace-sdk/tests/integration/farmer.rs new file mode 100644 index 00000000..e7ed6083 --- /dev/null +++ b/sdk/subspace-sdk/tests/integration/farmer.rs @@ -0,0 +1,138 @@ +use futures::prelude::*; +use subspace_sdk::utils::ByteSize; + +use crate::common::{Farmer, Node}; + +#[tokio::test(flavor = "multi_thread")] +#[ignore = "We need api from single disk plot to calculate precise target sector count"] +async fn track_progress() { + crate::common::setup(); + + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().build(true).await; + let farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await; + + let progress = farmer + .iter_farms() + .await + .next() + .unwrap() + .subscribe_initial_plotting_progress() + .await + .collect::>() + .await; + assert_eq!(progress.len(), number_of_sectors); + + farmer.close().await; + node.close().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn new_solution() { + crate::common::setup(); + + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().build(true).await; + let farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await; + + farmer + .iter_farms() + .await + .next() + .unwrap() + .subscribe_new_solutions() + .await + .next() + .await + .expect("Farmer should send new solutions"); + + farmer.close().await; + node.close().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn progress_restart() { + crate::common::setup(); + + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().build(true).await; + let farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await; + + let plot = farmer.iter_farms().await.next().unwrap(); + + plot.subscribe_initial_plotting_progress().await.for_each(|_| async {}).await; + + tokio::time::timeout( + std::time::Duration::from_secs(5), + plot.subscribe_initial_plotting_progress().await.for_each(|_| async {}), + ) + .await + .unwrap(); + + farmer.close().await; + node.close().await; +} + +#[tokio::test(flavor = "multi_thread")] +#[ignore = "Stack overflows for now"] +async fn farmer_restart() { + crate::common::setup(); + + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().build(true).await; + + for _ in 0..10 { + Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await + .close() + .await; + } + + node.close().await; +} + +#[tokio::test(flavor = "multi_thread")] +async fn farmer_close() { + crate::common::setup(); + + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().build(true).await; + let farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await; + + farmer.close().await; + node.close().await; +} diff --git a/sdk/subspace-sdk/tests/integration/main.rs b/sdk/subspace-sdk/tests/integration/main.rs new file mode 100644 index 00000000..4bba246f --- /dev/null +++ b/sdk/subspace-sdk/tests/integration/main.rs @@ -0,0 +1,13 @@ +pub mod common; +#[cfg(all(feature = "core-payments", feature = "executor"))] +mod domains; +mod farmer; +mod node; + +#[global_allocator] +static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; + +#[test] +fn pubkey_parse() { + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY".parse::().unwrap(); +} diff --git a/sdk/subspace-sdk/tests/integration/node.rs b/sdk/subspace-sdk/tests/integration/node.rs new file mode 100644 index 00000000..179ea016 --- /dev/null +++ b/sdk/subspace-sdk/tests/integration/node.rs @@ -0,0 +1,205 @@ +use std::sync::Arc; + +use futures::prelude::*; +use subspace_sdk::utils::ByteSize; +use tempfile::TempDir; +use tracing_futures::Instrument; + +use crate::common::{Farmer, Node}; + +async fn sync_block_inner() { + crate::common::setup(); + + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().build(true).await; + let farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await; + + let farm_blocks = 5; + + node.subscribe_new_heads() + .await + .unwrap() + .skip_while(|notification| futures::future::ready(notification.number < farm_blocks)) + .next() + .await + .unwrap(); + + farmer.close().await; + + let other_node = Node::dev() + .chain(node.chain.clone()) + .boot_nodes(node.listen_addresses().await.unwrap()) + .not_force_synced(true) + .not_authority(true) + .build(false) + .await; + + other_node.subscribe_syncing_progress().await.unwrap().for_each(|_| async {}).await; + assert_eq!(other_node.get_info().await.unwrap().best_block.1, farm_blocks); + + node.close().await; + other_node.close().await; +} + +#[tokio::test(flavor = "multi_thread")] +//#[cfg_attr(any(tarpaulin, not(target_os = "linux")), ignore = "Slow tests are +//#[cfg_attr(any(tarpaulin, run only on linux")] +async fn sync_block() { + tokio::time::timeout(std::time::Duration::from_secs(60 * 60), sync_block_inner()).await.unwrap() +} + +async fn sync_farm_inner() { + crate::common::setup(); + + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node_span = tracing::trace_span!("node 1"); + let node = Node::dev().build(true).instrument(node_span.clone()).await; + + let farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .instrument(node_span.clone()) + .await; + + let farm_blocks = 4; + + node.subscribe_new_heads() + .await + .unwrap() + .skip_while(|notification| futures::future::ready(notification.number < farm_blocks)) + .next() + .await + .unwrap(); + + let other_node_span = tracing::trace_span!("node 2"); + let other_node = Node::dev() + .dsn_boot_nodes(node.dsn_listen_addresses().await.unwrap()) + .boot_nodes(node.listen_addresses().await.unwrap()) + .not_force_synced(true) + .chain(node.chain.clone()) + .build(false) + .instrument(other_node_span.clone()) + .await; + + while other_node.get_info().await.unwrap().best_block.1 + < node.get_info().await.unwrap().best_block.1 + { + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + + let other_farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&other_node, ByteSize::b(space_pledged as u64)) + .instrument(other_node_span.clone()) + .await; + + let farm = other_farmer.iter_farms().await.next().unwrap(); + farm.subscribe_initial_plotting_progress().await.for_each(|_| async {}).await; + farmer.close().await; + + farm.subscribe_new_solutions().await.next().await.expect("Solution stream never ends"); + + node.close().await; + other_node.close().await; + other_farmer.close().await; +} + +#[tokio::test(flavor = "multi_thread")] +//#[cfg_attr(any(tarpaulin, not(target_os = "linux")), ignore = "Slow tests are +//#[cfg_attr(any(tarpaulin, run only on linux")] +async fn sync_farm() { + tokio::time::timeout(std::time::Duration::from_secs(60 * 60), sync_farm_inner()).await.unwrap() +} + +#[tokio::test(flavor = "multi_thread")] +#[ignore = "Substrate rpc server doesn't let node to properly exit"] +async fn node_restart() { + crate::common::setup(); + let dir = Arc::new(TempDir::new().unwrap()); + + for i in 0..4 { + tracing::error!(i, "Running new node"); + Node::dev().path(dir.clone()).build(true).await.close().await; + } +} + +#[tokio::test(flavor = "multi_thread")] +//#[cfg_attr(any(tarpaulin, not(target_os = "linux")), ignore = "Slow tests are +//#[cfg_attr(any(tarpaulin, run only on linux")] +async fn node_events() { + crate::common::setup(); + + tokio::time::timeout(std::time::Duration::from_secs(30 * 60), async { + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().build(true).await; + let farmer = Farmer::dev() + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await; + + let events = node + .subscribe_new_heads() + .await + .unwrap() + // Skip genesis + .skip(1) + .then(|_| node.get_events(None).boxed()) + .take(1) + .next() + .await + .unwrap() + .unwrap(); + + assert!(!events.is_empty()); + + farmer.close().await; + node.close().await; + }) + .await + .unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +//#[cfg_attr(any(tarpaulin, not(target_os = "linux")), ignore = "Slow tests are +//#[cfg_attr(any(tarpaulin, run only on linux")] +async fn fetch_block_author() { + crate::common::setup(); + + tokio::time::timeout(std::time::Duration::from_secs(30 * 60), async { + let number_of_sectors = 10; + let pieces_in_sector = 50u16; + let sector_size = subspace_farmer_components::sector::sector_size(pieces_in_sector as _); + let space_pledged = sector_size * number_of_sectors; + + let node = Node::dev().build(false).await; + let reward_address = Default::default(); + let farmer = Farmer::dev() + .reward_address(reward_address) + .pieces_in_sector(pieces_in_sector) + .build(&node, ByteSize::b(space_pledged as u64)) + .await; + + let block = node.subscribe_new_heads().await.unwrap().skip(1).take(1).next().await.unwrap(); + assert_eq!(block.pre_digest.unwrap().solution().reward_address, reward_address); + + farmer.close().await; + node.close().await; + }) + .await + .unwrap(); +} diff --git a/sdk/substrate/Cargo.toml b/sdk/substrate/Cargo.toml new file mode 100644 index 00000000..63631f68 --- /dev/null +++ b/sdk/substrate/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "sdk-substrate" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +bytesize = "1.1" +derivative = "2.2.0" +derive_builder = "0.12" +derive_more = "0.99" +names = { version = "0.14.0", default-features = false } +sc-chain-spec = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-executor = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-informant = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-network = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-service = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", default-features = false } +sc-state-db = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sc-storage-monitor = { version = "0.1.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", default-features = false } +sdk-utils = { path = "../utils" } +serde = { version = "1", features = ["derive"] } +serde_json = "1" +sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +tokio = { version = "1.34.0", features = ["fs", "rt", "tracing"] } diff --git a/sdk/substrate/build.rs b/sdk/substrate/build.rs new file mode 100644 index 00000000..364bd6f5 --- /dev/null +++ b/sdk/substrate/build.rs @@ -0,0 +1,5 @@ +fn main() { + let output = std::process::Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap(); + let git_hash = String::from_utf8(output.stdout).unwrap(); + println!("cargo:rustc-env=GIT_HASH={git_hash}"); +} diff --git a/sdk/substrate/src/lib.rs b/sdk/substrate/src/lib.rs new file mode 100644 index 00000000..38307eba --- /dev/null +++ b/sdk/substrate/src/lib.rs @@ -0,0 +1,452 @@ +//! Crate with abstraction over substrate logic + +#![warn( + missing_docs, + clippy::dbg_macro, + clippy::unwrap_used, + clippy::disallowed_types, + unused_features +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![feature(concat_idents)] + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::path::Path; + +use derivative::Derivative; +use derive_builder::Builder; +use sc_executor::{WasmExecutionMethod, WasmtimeInstantiationStrategy}; +use sc_network::config::{NodeKeyConfig, Secret}; +use sc_service::config::{KeystoreConfig, NetworkConfiguration, TransportConfig}; +use sc_service::{BasePath, Configuration, DatabaseSource, TracingReceiver}; +use sdk_utils::{Multiaddr, MultiaddrWithPeerId}; +use serde::{Deserialize, Serialize}; +pub use types::*; + +mod types; + +#[doc(hidden)] +#[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq)] +#[derivative(Default)] +#[builder(pattern = "owned", build_fn(private, name = "_build"), name = "BaseBuilder")] +#[non_exhaustive] +pub struct Base { + /// Force block authoring + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub force_authoring: bool, + /// Set node role + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub role: Role, + /// Blocks pruning options + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub blocks_pruning: BlocksPruning, + /// State pruning options + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub state_pruning: PruningMode, + /// Implementation name + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub impl_name: ImplName, + /// Implementation version + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub impl_version: ImplVersion, + /// Rpc settings + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub rpc: Rpc, + /// Network settings + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub network: Network, + /// Offchain worker settings + #[builder(setter(into), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub offchain_worker: OffchainWorker, + /// Enable color for substrate informant + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub informant_enable_color: bool, + /// Additional telemetry endpoints + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub telemetry: Vec<(Multiaddr, u8)>, + /// Dev key seed + #[builder(setter(strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub dev_key_seed: Option, +} + +#[doc(hidden)] +#[macro_export] +macro_rules! derive_base { + ( + $(< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)? @ $base:ty => $builder:ident { + $( + #[doc = $doc:literal] + $field:ident : $field_ty:ty + ),+ + $(,)? + } + ) => { + impl $(< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? $builder $(< $($lt),+ >)? { + $( + #[doc = $doc] + pub fn $field(mut self, $field: impl Into<$field_ty>) -> Self { + self.base = self.base.$field($field.into()); + self + } + )* + } + }; + + ( $(< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)? @ $base:ty => $builder:ident ) => { + $crate::derive_base!( + $(< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? @ $base => $builder { + /// Force block authoring + force_authoring: bool, + /// Set node role + role: $crate::Role, + /// Blocks pruning options + blocks_pruning: $crate::BlocksPruning, + /// State pruning options + state_pruning: $crate::PruningMode, + /// Implementation name + impl_name: $crate::ImplName, + /// Implementation version + impl_version: $crate::ImplVersion, + /// Rpc settings + rpc: $crate::Rpc, + /// Network settings + network: $crate::Network, + /// Offchain worker settings + offchain_worker: $crate::OffchainWorker, + /// Enable color for substrate informant + informant_enable_color: bool, + /// Additional telemetry endpoints + telemetry: Vec<(sdk_utils::Multiaddr, u8)>, + /// Dev key seed + dev_key_seed: String + }); + } +} + +impl Base { + const NODE_NAME_MAX_LENGTH: usize = 64; + + pub async fn configuration( + self, + directory: impl AsRef, + chain_spec: CS, + ) -> Configuration + where + CS: sc_chain_spec::ChainSpec + + serde::Serialize + + serde::de::DeserializeOwned + + sp_runtime::BuildStorage + + 'static, + { + const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; + const DEFAULT_NETWORK_CONFIG_PATH: &str = "network"; + + let Self { + force_authoring, + role, + blocks_pruning, + state_pruning, + impl_name: ImplName(impl_name), + impl_version: ImplVersion(impl_version), + rpc: + Rpc { + addr: rpc_addr, + port: rpc_port, + max_connections: rpc_max_connections, + cors: rpc_cors, + methods: rpc_methods, + max_request_size: rpc_max_request_size, + max_response_size: rpc_max_response_size, + max_subs_per_conn: rpc_max_subs_per_conn, + }, + network, + offchain_worker, + informant_enable_color, + telemetry, + dev_key_seed, + } = self; + + let base_path = BasePath::new(directory.as_ref()); + let config_dir = base_path.config_dir(chain_spec.id()); + + let mut network = { + let Network { + listen_addresses, + boot_nodes, + force_synced, + name, + client_id, + enable_mdns, + allow_private_ip, + allow_non_globals_in_dht, + } = network; + let name = name.unwrap_or_else(|| { + names::Generator::with_naming(names::Name::Numbered) + .next() + .filter(|name| name.chars().count() < Self::NODE_NAME_MAX_LENGTH) + .expect("RNG is available on all supported platforms; qed") + }); + + let client_id = client_id.unwrap_or_else(|| format!("{impl_name}/v{impl_version}")); + let config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); + let listen_addresses = listen_addresses.into_iter().map(Into::into).collect::>(); + + NetworkConfiguration { + listen_addresses, + boot_nodes: chain_spec + .boot_nodes() + .iter() + .cloned() + .chain(boot_nodes.into_iter().map(Into::into)) + .collect(), + force_synced, + transport: TransportConfig::Normal { enable_mdns, allow_private_ip }, + allow_non_globals_in_dht, + ..NetworkConfiguration::new( + name, + client_id, + NodeKeyConfig::Ed25519(Secret::File(config_dir.join(NODE_KEY_ED25519_FILE))), + Some(config_dir), + ) + } + }; + + // Increase default value of 25 to improve success rate of sync + network.default_peers_set.out_peers = 50; + // Full + Light clients + network.default_peers_set.in_peers = 25 + 100; + let keystore = KeystoreConfig::InMemory; + + // HACK: Tricky way to add extra endpoints as we can't push into telemetry + // endpoints + let telemetry_endpoints = match chain_spec.telemetry_endpoints() { + Some(endpoints) => { + let Ok(serde_json::Value::Array(extra_telemetry)) = + serde_json::to_value(&telemetry) + else { + unreachable!("Will always return an array") + }; + let Ok(serde_json::Value::Array(telemetry)) = serde_json::to_value(endpoints) + else { + unreachable!("Will always return an array") + }; + + serde_json::from_value(serde_json::Value::Array( + telemetry.into_iter().chain(extra_telemetry).collect::>(), + )) + .expect("Serialization is always valid") + } + None => sc_service::config::TelemetryEndpoints::new( + telemetry.into_iter().map(|(endpoint, n)| (endpoint.to_string(), n)).collect(), + ) + .expect("Never returns an error"), + }; + + Configuration { + impl_name, + impl_version, + tokio_handle: tokio::runtime::Handle::current(), + transaction_pool: Default::default(), + network, + keystore, + database: DatabaseSource::ParityDb { path: config_dir.join("paritydb").join("full") }, + trie_cache_maximum_size: Some(67_108_864), + state_pruning: Some(state_pruning.into()), + blocks_pruning: blocks_pruning.into(), + wasm_method: WasmExecutionMethod::Compiled { + instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, + }, + wasm_runtime_overrides: None, + rpc_addr, + rpc_port: rpc_port.unwrap_or_default(), + rpc_methods: rpc_methods.into(), + rpc_max_connections: rpc_max_connections.unwrap_or_default() as u32, + rpc_cors, + rpc_max_request_size: rpc_max_request_size.unwrap_or_default() as u32, + rpc_max_response_size: rpc_max_response_size.unwrap_or_default() as u32, + rpc_id_provider: None, + rpc_max_subs_per_conn: rpc_max_subs_per_conn.unwrap_or_default() as u32, + prometheus_config: None, + telemetry_endpoints: Some(telemetry_endpoints), + default_heap_pages: None, + offchain_worker: offchain_worker.into(), + force_authoring, + disable_grandpa: false, + dev_key_seed, + tracing_targets: None, + tracing_receiver: TracingReceiver::Log, + chain_spec: Box::new(chain_spec), + max_runtime_instances: 8, + announce_block: true, + role: role.into(), + base_path, + data_path: config_dir, + informant_output_format: sc_informant::OutputFormat { + enable_color: informant_enable_color, + }, + runtime_cache_size: 2, + } + } +} + +/// Node RPC builder +#[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq, Eq)] +#[derivative(Default)] +#[builder(pattern = "owned", build_fn(private, name = "_build"), name = "RpcBuilder")] +#[non_exhaustive] +pub struct Rpc { + /// Rpc address + #[builder(setter(strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub addr: Option, + /// RPC port + #[builder(setter(strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub port: Option, + /// Maximum number of connections for RPC server. `None` if default. + #[builder(setter(strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub max_connections: Option, + /// CORS settings for HTTP & WS servers. `None` if all origins are + /// allowed. + #[builder(setter(strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub cors: Option>, + /// RPC methods to expose (by default only a safe subset or all of + /// them). + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub methods: RpcMethods, + /// Maximum payload of a rpc request + #[builder(setter(strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub max_request_size: Option, + /// Maximum payload of a rpc request + #[builder(setter(strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub max_response_size: Option, + /// Maximum allowed subscriptions per rpc connection + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub max_subs_per_conn: Option, +} + +impl RpcBuilder { + /// Dev configuration + pub fn dev() -> Self { + Self::default() + } + + /// Local test configuration to have rpc exposed locally + pub fn local_test(port: u16) -> Self { + Self::dev() + .addr(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port)) + .port(port) + .max_connections(100) + .max_request_size(10 * 1024) + .max_response_size(10 * 1024) + .max_subs_per_conn(Some(100)) + } + + /// Gemini 3g configuration + pub fn gemini_3g() -> Self { + Self::new().addr("127.0.0.1:9944".parse().expect("hardcoded value is true")).cors(vec![ + "http://localhost:*".to_owned(), + "http://127.0.0.1:*".to_owned(), + "https://localhost:*".to_owned(), + "https://127.0.0.1:*".to_owned(), + "https://polkadot.js.org".to_owned(), + ]) + } + + /// Devnet configuration + pub fn devnet() -> Self { + Self::new().addr("127.0.0.1:9944".parse().expect("hardcoded value is true")).cors(vec![ + "http://localhost:*".to_owned(), + "http://127.0.0.1:*".to_owned(), + "https://localhost:*".to_owned(), + "https://127.0.0.1:*".to_owned(), + "https://polkadot.js.org".to_owned(), + ]) + } +} + +/// Node network builder +#[derive(Debug, Default, Clone, Builder, Deserialize, Serialize, PartialEq)] +#[builder(pattern = "owned", build_fn(private, name = "_build"), name = "NetworkBuilder")] +#[non_exhaustive] +pub struct Network { + /// Listen on some address for other nodes + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub enable_mdns: bool, + /// Listen on some address for other nodes + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub allow_private_ip: bool, + /// Allow non globals in network DHT + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub allow_non_globals_in_dht: bool, + /// Listen on some address for other nodes + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub listen_addresses: Vec, + /// Boot nodes + #[builder(default)] + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub boot_nodes: Vec, + /// Force node to think it is synced + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub force_synced: bool, + /// Node name + #[builder(setter(into, strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub name: Option, + /// Client id for telemetry (default is `{IMPL_NAME}/v{IMPL_VERSION}`) + #[builder(setter(into, strip_option), default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub client_id: Option, +} + +impl NetworkBuilder { + /// Dev chain configuration + pub fn dev() -> Self { + Self::default().force_synced(true).allow_private_ip(true) + } + + /// Gemini 3g configuration + pub fn gemini_3g() -> Self { + Self::default() + .listen_addresses(vec![ + "/ip6/::/tcp/30333".parse().expect("hardcoded value is true"), + "/ip4/0.0.0.0/tcp/30333".parse().expect("hardcoded value is true"), + ]) + .enable_mdns(true) + } + + /// Dev network configuration + pub fn devnet() -> Self { + Self::default() + .listen_addresses(vec![ + "/ip6/::/tcp/30333".parse().expect("hardcoded value is true"), + "/ip4/0.0.0.0/tcp/30333".parse().expect("hardcoded value is true"), + ]) + .enable_mdns(true) + } +} + +sdk_utils::generate_builder!(Base, Rpc, Network); diff --git a/sdk/substrate/src/types.rs b/sdk/substrate/src/types.rs new file mode 100644 index 00000000..f60bddd1 --- /dev/null +++ b/sdk/substrate/src/types.rs @@ -0,0 +1,222 @@ +use derivative::Derivative; +use derive_builder::Builder; +use derive_more::{Deref, DerefMut, Display, From}; +use sdk_utils::ByteSize; +use serde::{Deserialize, Serialize}; + +/// Block pruning settings. +#[derive(Debug, Clone, Copy, PartialEq, Default, Serialize, Deserialize, Eq, PartialOrd, Ord)] +pub enum BlocksPruning { + #[default] + /// Keep full block history, of every block that was ever imported. + KeepAll, + /// Keep full finalized block history. + KeepFinalized, + /// Keep N recent finalized blocks. + Some(u32), +} + +impl From for BlocksPruning { + fn from(value: sc_service::BlocksPruning) -> Self { + match value { + sc_service::BlocksPruning::KeepAll => Self::KeepAll, + sc_service::BlocksPruning::KeepFinalized => Self::KeepFinalized, + sc_service::BlocksPruning::Some(n) => Self::Some(n), + } + } +} + +impl From for sc_service::BlocksPruning { + fn from(value: BlocksPruning) -> Self { + match value { + BlocksPruning::KeepAll => Self::KeepAll, + BlocksPruning::KeepFinalized => Self::KeepFinalized, + BlocksPruning::Some(n) => Self::Some(n), + } + } +} + +/// Pruning constraints. If none are specified pruning is +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] +pub struct Constraints { + /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping + /// only non-canonical states. + pub max_blocks: Option, +} + +impl From for sc_state_db::Constraints { + fn from(Constraints { max_blocks }: Constraints) -> Self { + Self { max_blocks } + } +} + +impl From for Constraints { + fn from(sc_state_db::Constraints { max_blocks }: sc_state_db::Constraints) -> Self { + Self { max_blocks } + } +} + +/// Pruning mode. +#[derive(Debug, Clone, Eq, PartialEq, Default, Serialize, Deserialize)] +pub enum PruningMode { + /// No pruning. Canonicalization is a no-op. + #[default] + ArchiveAll, + /// Canonicalization discards non-canonical nodes. All the canonical + /// nodes are kept in the DB. + ArchiveCanonical, + /// Maintain a pruning window. + Constrained(Constraints), +} + +impl From for sc_service::PruningMode { + fn from(value: PruningMode) -> Self { + match value { + PruningMode::ArchiveAll => Self::ArchiveAll, + PruningMode::ArchiveCanonical => Self::ArchiveCanonical, + PruningMode::Constrained(c) => Self::Constrained(c.into()), + } + } +} + +impl From for PruningMode { + fn from(value: sc_service::PruningMode) -> Self { + match value { + sc_service::PruningMode::ArchiveAll => Self::ArchiveAll, + sc_service::PruningMode::ArchiveCanonical => Self::ArchiveCanonical, + sc_service::PruningMode::Constrained(c) => Self::Constrained(c.into()), + } + } +} + +/// Type wrapper with default value for implementation name +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct ImplName( + #[derivative(Default(value = "env!(\"CARGO_PKG_NAME\").to_owned()"))] pub String, +); + +/// Type wrapper with default value for implementation version +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct ImplVersion( + #[derivative(Default( + value = "format!(\"{}-{}\", env!(\"CARGO_PKG_VERSION\"), env!(\"GIT_HASH\"))" + ))] + pub String, +); + +/// Storage monitor +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct StorageMonitor { + /// How much space do we want to reserve + pub threshold: ByteSize, + /// Polling period for threshold + pub polling_period: std::time::Duration, +} + +impl From for sc_storage_monitor::StorageMonitorParams { + fn from(StorageMonitor { threshold, polling_period }: StorageMonitor) -> Self { + Self { + threshold: (threshold.as_u64() / bytesize::MIB).max(1), + polling_period: polling_period.as_secs().max(1) as u32, + } + } +} + +/// Wrapper with default value for max subscriptions per connection +#[derive( + Debug, Clone, Derivative, Deserialize, Serialize, PartialEq, Eq, From, Deref, DerefMut, Display, +)] +#[derivative(Default)] +#[serde(transparent)] +pub struct MaxSubsPerConn(#[derivative(Default(value = "1024"))] pub usize); + +/// Offchain worker config +#[derive(Debug, Clone, Derivative, Builder, Deserialize, Serialize, PartialEq, Eq)] +#[derivative(Default)] +#[builder(pattern = "owned", build_fn(name = "_build"), name = "OffchainWorkerBuilder")] +#[non_exhaustive] +pub struct OffchainWorker { + /// Is enabled + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub enabled: bool, + /// Is indexing enabled + #[builder(default)] + #[serde(default, skip_serializing_if = "sdk_utils::is_default")] + pub indexing_enabled: bool, +} + +impl OffchainWorkerBuilder { + /// Dev chain configuration + pub fn dev() -> Self { + Self::default() + } + + /// Gemini 3g configuration + pub fn gemini_3g() -> Self { + Self::default().enabled(true) + } + + /// Devnet configuration + pub fn devnet() -> Self { + Self::default().enabled(true) + } +} + +impl From for sc_service::config::OffchainWorkerConfig { + fn from(OffchainWorker { enabled, indexing_enabled }: OffchainWorker) -> Self { + Self { enabled, indexing_enabled } + } +} + +sdk_utils::generate_builder!(OffchainWorker); + +/// Role of the local node. +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq, Eq)] +pub enum Role { + #[default] + /// Regular full node. + Full, + /// Actual authority. + Authority, +} + +impl From for sc_service::Role { + fn from(value: Role) -> Self { + match value { + Role::Full => sc_service::Role::Full, + Role::Authority => sc_service::Role::Authority, + } + } +} + +/// Available RPC methods. +#[derive(Debug, Copy, Clone, Default, Serialize, Deserialize, PartialEq, Eq)] +pub enum RpcMethods { + /// Expose every RPC method only when RPC is listening on `localhost`, + /// otherwise serve only safe RPC methods. + #[default] + Auto, + /// Allow only a safe subset of RPC methods. + Safe, + /// Expose every RPC method (even potentially unsafe ones). + Unsafe, +} + +impl From for sc_service::RpcMethods { + fn from(value: RpcMethods) -> Self { + match value { + RpcMethods::Auto => Self::Auto, + RpcMethods::Safe => Self::Safe, + RpcMethods::Unsafe => Self::Unsafe, + } + } +} diff --git a/sdk/traits/Cargo.toml b/sdk/traits/Cargo.toml new file mode 100644 index 00000000..e39fa4a1 --- /dev/null +++ b/sdk/traits/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "sdk-traits" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = "0.1" +parking_lot = "0.12" +sc-client-api = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sdk-dsn = { path = "../dsn" } +subspace-core-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e", default-features = false } +subspace-proof-of-space = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } + diff --git a/sdk/traits/src/lib.rs b/sdk/traits/src/lib.rs new file mode 100644 index 00000000..a8ea2bdf --- /dev/null +++ b/sdk/traits/src/lib.rs @@ -0,0 +1,47 @@ +//! Crate with interfaces for SDK + +#![warn( + missing_docs, + clippy::dbg_macro, + clippy::unwrap_used, + clippy::disallowed_types, + unused_features +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use subspace_farmer::piece_cache::PieceCache as FarmerPieceCache; + +/// Trait which abstracts farmer for node +#[async_trait::async_trait] +pub trait Farmer { + /// Proof of space table + type Table: subspace_proof_of_space::Table; + + /// Fetch piece by its hash + async fn get_piece_by_index( + piece_index: subspace_core_primitives::PieceIndex, + piece_cache: &FarmerPieceCache, + weak_readers_and_pieces: &std::sync::Weak< + parking_lot::Mutex< + Option, + >, + >, + ) -> Option; +} + +/// Trait which abstracts node for farmer +pub trait Node { + /// Client for aux store for DSN + type Client: sc_client_api::AuxStore + Send + Sync + 'static; + /// Proof of space table type + type Table: subspace_proof_of_space::Table; + /// Rpc implementation + type Rpc: subspace_farmer::node_client::NodeClient + Clone; + + /// Node name in telemetry + fn name(&self) -> &str; + /// Shared dsn configuration + fn dsn(&self) -> &sdk_dsn::DsnShared; + /// Rpc + fn rpc(&self) -> &Self::Rpc; +} diff --git a/sdk/utils/Cargo.toml b/sdk/utils/Cargo.toml new file mode 100644 index 00000000..5216cc7c --- /dev/null +++ b/sdk/utils/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "sdk-utils" +version = "0.1.0" +edition = "2021" +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1" +async-trait = "0.1" +base58 = "0.2" +blake2 = "0.10.5" +bytesize = "1" +bytesize-serde = "0.2" +derivative = "2.2.0" +derive_more = "0.99" +frame-support = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +frame-system = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +futures = "0.3" +jsonrpsee-core = "0.16" +libp2p-core = { git = "https://github.com/subspace/rust-libp2p", rev = "d6339da35589d86bae6ecb25a5121c02f2e5b90e" } +parity-scale-codec = "3.6.3" +sc-consensus-subspace-rpc = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +sc-network = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", default-features = false } +sc-rpc = { version = "4.0.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", default-features = false } +sc-rpc-api = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", default-features = false } +sc-service = { version = "0.10.0-dev", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c", default-features = false } +serde = { version = "1", features = ["derive"] } +serde_json = "1.0.106" +sp-core = { version = "21.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sp-core-hashing = { version = "9.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sp-runtime = { version = "24.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +sp-storage = { version = "13.0.0", git = "https://github.com/subspace/polkadot-sdk", rev = "c63a8b28a9fd26d42116b0dcef1f2a5cefb9cd1c" } +ss58-registry = "1.33" +# Unused for now. TODO: add `serde` feature to `subspace-core-primitives` in `subspace-archiver` +subspace-core-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-farmer = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e", default-features = false } +subspace-rpc-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-runtime = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +subspace-runtime-primitives = { git = "https://github.com/subspace/subspace", rev = "bd435100200b3dcce6d6f50534d52e3cd039ca8e" } +thiserror = "1" +tokio = { version = "1.34.0", features = ["fs", "rt", "tracing", "macros", "parking_lot", "rt-multi-thread", "signal"] } +tracing = "0.1" diff --git a/sdk/utils/src/lib.rs b/sdk/utils/src/lib.rs new file mode 100644 index 00000000..34e58149 --- /dev/null +++ b/sdk/utils/src/lib.rs @@ -0,0 +1,998 @@ +//! Utilities crate shared across all SDK crates + +#![warn( + missing_docs, + clippy::dbg_macro, + clippy::unwrap_used, + clippy::disallowed_types, + unused_features +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] + +use std::pin::Pin; +use std::sync::Arc; +use std::vec::Drain; + +use anyhow::{anyhow, Context, Result}; +use derive_more::{Deref, DerefMut, Display, From, FromStr, Into}; +use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; +use futures::prelude::*; +use jsonrpsee_core::client::{ + BatchResponse, ClientT, Subscription, SubscriptionClientT, SubscriptionKind, +}; +use jsonrpsee_core::params::BatchRequestBuilder; +use jsonrpsee_core::server::rpc_module::RpcModule; +use jsonrpsee_core::traits::ToRpcParams; +use jsonrpsee_core::Error; +use parity_scale_codec::{Decode, Encode}; +pub use parse_ss58::Ss58ParsingError; +use sc_consensus_subspace_rpc::SubspaceRpcApiClient; +use sc_rpc_api::state::StateApiClient; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use subspace_core_primitives::{Piece, PieceIndex, SegmentHeader, SegmentIndex, PUBLIC_KEY_LENGTH}; +use subspace_farmer::jsonrpsee::tracing; +use subspace_farmer::node_client::{Error as NodeClientError, NodeClient}; +use subspace_rpc_primitives::{ + FarmerAppInfo, NodeSyncStatus, RewardSignatureResponse, RewardSigningInfo, SlotInfo, + SolutionResponse, +}; + +/// Output that indicates whether the task was cancelled or successfully +/// completed +pub enum TaskOutput { + /// Task completed with value of type `T` + Value(T), + /// Task was cancelled due to reason `E` + Cancelled(E), +} + +/// Rpc implementation over jsonrpsee_core debug rpc module +#[derive(Clone, Debug)] +pub struct Rpc { + inner: Arc>, +} + +impl Rpc { + /// Constructor for our rpc from substrate rpc handlers + pub fn new(handlers: &sc_service::RpcHandlers) -> Self { + let inner = handlers.handle(); + Self { inner } + } + + /// Subscribe to new block headers + pub async fn subscribe_new_heads<'a, 'b, T>( + &'a self, + ) -> Result> + Send + Sync + Unpin + 'static, Error> + where + T: frame_system::Config + sp_runtime::traits::GetRuntimeBlockType, + T::RuntimeBlock: serde::de::DeserializeOwned + sp_runtime::DeserializeOwned + 'static, + HeaderFor: serde::de::DeserializeOwned + sp_runtime::DeserializeOwned + 'static, + 'a: 'b, + { + let stream = sc_rpc::chain::ChainApiClient::< + BlockNumberFor, + T::Hash, + HeaderFor, + sp_runtime::generic::SignedBlock, + >::subscribe_new_heads(self) + .await? + .filter_map(|result| futures::future::ready(result.ok())); + + Ok(stream) + } + + /// Subscribe to new finalized block headers + pub async fn subscribe_finalized_heads<'a, 'b, T>( + &'a self, + ) -> Result> + Send + Sync + Unpin + 'static, Error> + where + T: frame_system::Config + sp_runtime::traits::GetRuntimeBlockType, + T::RuntimeBlock: serde::de::DeserializeOwned + sp_runtime::DeserializeOwned + 'static, + HeaderFor: serde::de::DeserializeOwned + sp_runtime::DeserializeOwned + 'static, + 'a: 'b, + { + let stream = sc_rpc::chain::ChainApiClient::< + BlockNumberFor, + T::Hash, + HeaderFor, + sp_runtime::generic::SignedBlock, + >::subscribe_finalized_heads(self) + .await? + .filter_map(|result| futures::future::ready(result.ok())); + + Ok(stream) + } + + /// Get substrate events for some block + pub async fn get_events( + &self, + block: Option, + ) -> anyhow::Result>> + where + T: frame_system::Config, + T::Hash: serde::ser::Serialize + serde::de::DeserializeOwned + Send + Sync + 'static, + Vec>: parity_scale_codec::Decode, + { + match self + .get_storage::(StorageKey::events(), block) + .await + .context("Failed to get events from storage")? + { + Some(sp_storage::StorageData(events)) => + parity_scale_codec::DecodeAll::decode_all(&mut events.as_ref()) + .context("Failed to decode events"), + None => Ok(vec![]), + } + } +} + +#[async_trait::async_trait] +impl NodeClient for Rpc { + async fn farmer_app_info(&self) -> Result { + Ok(self.get_farmer_app_info().await?) + } + + async fn subscribe_slot_info( + &self, + ) -> Result + Send + 'static>>, NodeClientError> { + Ok(Box::pin( + SubspaceRpcApiClient::subscribe_slot_info(self) + .await? + .filter_map(|result| futures::future::ready(result.ok())), + )) + } + + async fn submit_solution_response( + &self, + solution_response: SolutionResponse, + ) -> Result<(), NodeClientError> { + Ok(SubspaceRpcApiClient::submit_solution_response(self, solution_response).await?) + } + + async fn subscribe_reward_signing( + &self, + ) -> Result + Send + 'static>>, NodeClientError> + { + Ok(Box::pin( + SubspaceRpcApiClient::subscribe_reward_signing(self) + .await? + .filter_map(|result| futures::future::ready(result.ok())), + )) + } + + async fn submit_reward_signature( + &self, + reward_signature: RewardSignatureResponse, + ) -> Result<(), NodeClientError> { + Ok(SubspaceRpcApiClient::submit_reward_signature(self, reward_signature).await?) + } + + async fn subscribe_archived_segment_headers( + &self, + ) -> Result + Send + 'static>>, NodeClientError> { + Ok(Box::pin( + SubspaceRpcApiClient::subscribe_archived_segment_header(self) + .await? + .filter_map(|result| futures::future::ready(result.ok())), + )) + } + + async fn subscribe_node_sync_status_change( + &self, + ) -> Result + Send + 'static>>, NodeClientError> { + Ok(Box::pin( + SubspaceRpcApiClient::subscribe_node_sync_status_change(self) + .await? + .filter_map(|result| futures::future::ready(result.ok())), + )) + } + + async fn segment_headers( + &self, + segment_indexes: Vec, + ) -> Result>, NodeClientError> { + Ok(SubspaceRpcApiClient::segment_headers(self, segment_indexes).await?) + } + + async fn piece(&self, piece_index: PieceIndex) -> Result, NodeClientError> { + let result = SubspaceRpcApiClient::piece(self, piece_index).await?; + + if let Some(bytes) = result { + let piece = Piece::try_from(bytes.as_slice()) + .map_err(|_| format!("Cannot convert piece. PieceIndex={}", piece_index))?; + + return Ok(Some(piece)); + } + + Ok(None) + } + + async fn acknowledge_archived_segment_header( + &self, + segment_index: SegmentIndex, + ) -> Result<(), NodeClientError> { + Ok(SubspaceRpcApiClient::acknowledge_archived_segment_header(self, segment_index).await?) + } +} + +#[async_trait::async_trait] +impl ClientT for Rpc { + async fn notification(&self, method: &str, params: Params) -> Result<(), Error> + where + Params: ToRpcParams + Send, + { + self.inner.call(method, params).await + } + + async fn request(&self, method: &str, params: Params) -> Result + where + R: DeserializeOwned, + Params: ToRpcParams + Send, + { + self.inner.call(method, params).await + } + + #[allow(clippy::diverging_sub_expression)] + async fn batch_request<'a, R>( + &self, + _batch: BatchRequestBuilder<'a>, + ) -> Result, Error> + where + R: DeserializeOwned + std::fmt::Debug + 'a, + { + unreachable!("It isn't called at all") + } +} + +#[async_trait::async_trait] +impl SubscriptionClientT for Rpc { + async fn subscribe<'a, Notif, Params>( + &self, + subscribe_method: &'a str, + params: Params, + _unsubscribe_method: &'a str, + ) -> Result, Error> + where + Params: ToRpcParams + Send, + Notif: DeserializeOwned, + { + let mut subscription = Arc::clone(&self.inner).subscribe(subscribe_method, params).await?; + let kind = subscription.subscription_id().clone().into_owned(); + let (to_back, _) = futures::channel::mpsc::channel(10); + let (mut notifs_tx, notifs_rx) = futures::channel::mpsc::channel(10); + tokio::spawn(async move { + while let Some(result) = subscription.next().await { + let Ok((item, _)) = result else { break }; + if notifs_tx.send(item).await.is_err() { + break; + } + } + }); + + Ok(Subscription::new(to_back, notifs_rx, SubscriptionKind::Subscription(kind))) + } + + #[allow(clippy::diverging_sub_expression)] + async fn subscribe_to_method<'a, Notif>( + &self, + _method: &'a str, + ) -> Result, Error> + where + Notif: DeserializeOwned, + { + unreachable!("It isn't called") + } +} + +/// Useful predicate for serde, which allows to skip type during serialization +pub fn is_default(t: &T) -> bool { + t == &T::default() +} + +struct Defer(Option); + +impl Defer { + pub fn new(f: F) -> Self { + Self(Some(f)) + } +} + +impl Drop for Defer { + fn drop(&mut self) { + (self.0.take().expect("Always set"))(); + } +} + +/// Useful type which will ensure that things will be dropped +#[derive(Default, derivative::Derivative)] +#[derivative(Debug)] +struct DropCollection { + #[derivative(Debug = "ignore")] + vec: Vec>, +} + +impl DropCollection { + /// Constructor + pub fn new() -> Self { + Self::default() + } + + /// Run closure during drop + pub fn defer(&mut self, f: F) { + self.push(Defer::new(f)) + } + + /// Add something to drop collection + pub fn push(&mut self, t: T) { + self.vec.push(Box::new(t)) + } + + /// Drain the underlying vector + pub fn drain(&mut self) -> Drain<'_, Box> { + self.vec.drain(..) + } +} + +impl FromIterator for DropCollection { + fn from_iter>(iter: I) -> Self { + let mut me = Self::new(); + for item in iter { + me.push(item); + } + me + } +} + +impl Extend for DropCollection { + fn extend>(&mut self, iter: I) { + for item in iter { + self.push(item); + } + } +} + +/// Type for dropping things asynchronously +#[derive(Default, derivative::Derivative)] +#[derivative(Debug)] +struct AsyncDropFutures { + #[derivative(Debug = "ignore")] + vec: Vec + Send + Sync>>>, +} + +impl AsyncDropFutures { + /// Constructor + pub fn new() -> Self { + Self::default() + } + + /// Push some future + pub fn push + Send + Sync + 'static>(&mut self, fut: F) { + self.vec.push(Box::pin(fut)) + } + + /// Drain the underlying vector + pub fn drain(&mut self) -> Drain<'_, Pin + Send + Sync>>> { + self.vec.drain(..) + } +} + +/// Enum identifying which of the item we should be destructing +#[derive(derivative::Derivative)] +#[derivative(Debug)] +enum ToDestruct { + Sync, + Async, + Item, +} + +/// A General purpose set of destructors consist of sync destructor, async +/// destructor and normal object it invokes destructors and destroy normal in +/// reverse order +#[derive(Default, derivative::Derivative)] +#[derivative(Debug)] +pub struct DestructorSet { + name: String, + items_to_drop: DropCollection, + sync_destructors: DropCollection, + async_destructors: AsyncDropFutures, + order: Vec, + already_ran: bool, + allow_async: bool, +} + +impl Drop for DestructorSet { + fn drop(&mut self) { + // already closed, nothing to do. + if self.already_ran { + return; + } + + if self.allow_async { + tracing::warn!( + "Destructor set: {} with async allowed is being dropped. Async destructors won't \ + run. Are you missing the `async_drop` call?", + self.name + ); + } + + // Try to drop as much stuff as we could + let mut sync_fns_drain = self.sync_destructors.drain().rev(); + let mut async_fns_drain = self.async_destructors.drain().rev(); + let mut items_drain = self.items_to_drop.drain().rev(); + let order_drain = self.order.drain(..).rev(); + + for order in order_drain { + match order { + ToDestruct::Sync => { + let sync_fn = sync_fns_drain.next().expect("sync fn always set"); + drop(sync_fn); + } + ToDestruct::Async => { + let async_fn = async_fns_drain.next().expect("async fn always set"); + // We cannot run async function here, we can only drop them. + drop(async_fn); + } + ToDestruct::Item => { + let item = items_drain.next().expect("item always set"); + drop(item); + } + } + } + } +} + +impl DestructorSet { + /// Creates an empty Destructors object with async destructors allowed + pub fn new(name: impl Into) -> DestructorSet { + DestructorSet { + name: name.into(), + items_to_drop: DropCollection::new(), + sync_destructors: DropCollection::new(), + async_destructors: AsyncDropFutures::new(), + order: vec![], + already_ran: false, + allow_async: true, + } + } + + /// Returns a bool indicating if the destructor set has already ran + pub fn already_ran(&self) -> bool { + self.already_ran + } + + /// Creates an empty Destructors object with async destructors not allowed + pub fn new_without_async(name: impl Into) -> DestructorSet { + DestructorSet { + name: name.into(), + items_to_drop: DropCollection::new(), + sync_destructors: DropCollection::new(), + async_destructors: AsyncDropFutures::new(), + order: vec![], + already_ran: false, + allow_async: false, + } + } + + /// Add sync destructor in the sync destructor collection + pub fn add_sync_destructor(&mut self, f: F) -> Result<()> { + if self.already_ran { + return Err(anyhow!("Destructor set: {} has been run already", self.name)); + } + self.order.push(ToDestruct::Sync); + self.sync_destructors.defer(f); + Ok(()) + } + + /// Add async destructor in the async destructor collection + pub fn add_async_destructor + Send + Sync + 'static>( + &mut self, + fut: F, + ) -> Result<()> { + if self.already_ran { + return Err(anyhow!("Destructor set: {} has been run already", self.name)); + } + if !self.allow_async { + return Err(anyhow!("async destructors are disabled in Destructor set: {}", self.name)); + } + self.order.push(ToDestruct::Async); + self.async_destructors.push(fut); + Ok(()) + } + + /// Add normal object to drop + pub fn add_items_to_drop(&mut self, t: T) -> Result<()> { + if self.already_ran { + return Err(anyhow!("Destructor set: {} has been run already", self.name)); + } + self.order.push(ToDestruct::Item); + self.items_to_drop.push(t); + Ok(()) + } + + /// run the destructors + pub async fn async_drop(mut self) -> Result<()> { + // already closed, nothing to do. + if self.already_ran { + return Err(anyhow!("Destructor set: {} has been run already", self.name)); + } + + if !self.allow_async { + return Err(anyhow!( + "Destructor set: {} is only configured to run sync destructors. To run them drop \ + this instance.", + self.name + )); + } + + let mut sync_fns_drain = self.sync_destructors.drain().rev(); + let mut async_fns_drain = self.async_destructors.drain().rev(); + let mut items_drain = self.items_to_drop.drain().rev(); + let order_drain = self.order.drain(..).rev(); + + for order in order_drain { + match order { + ToDestruct::Sync => { + let sync_fn = sync_fns_drain.next().expect("sync fn always set"); + drop(sync_fn); + } + ToDestruct::Async => { + let async_fn = async_fns_drain.next().expect("async fn always set"); + async_fn.await; + } + ToDestruct::Item => { + let item = items_drain.next().expect("item always set"); + drop(item); + } + } + } + self.already_ran = true; + Ok(()) + } +} + +/// Container for number of bytes. +#[derive( + Clone, + Copy, + Debug, + Default, + Deref, + DerefMut, + Deserialize, + Display, + Eq, + From, + FromStr, + Into, + Ord, + PartialEq, + PartialOrd, + Serialize, +)] +#[serde(transparent)] +pub struct ByteSize(#[serde(with = "bytesize_serde")] pub bytesize::ByteSize); + +impl ByteSize { + /// Constructor for bytes + pub const fn b(n: u64) -> Self { + Self(bytesize::ByteSize::b(n)) + } + + /// Constructor for kilobytes + pub const fn kb(n: u64) -> Self { + Self(bytesize::ByteSize::kb(n)) + } + + /// Constructor for kibibytes + pub const fn kib(n: u64) -> Self { + Self(bytesize::ByteSize::kib(n)) + } + + /// Constructor for megabytes + pub const fn mb(n: u64) -> Self { + Self(bytesize::ByteSize::mb(n)) + } + + /// Constructor for mibibytes + pub const fn mib(n: u64) -> Self { + Self(bytesize::ByteSize::mib(n)) + } + + /// Constructor for gigabytes + pub const fn gb(n: u64) -> Self { + Self(bytesize::ByteSize::gb(n)) + } + + /// Constructor for gibibytes + pub const fn gib(n: u64) -> Self { + Self(bytesize::ByteSize::gib(n)) + } + + /// Convert to u64 + pub fn to_u64(&self) -> u64 { + self.0.as_u64() + } +} + +/// Multiaddr is a wrapper around libp2p one +#[derive( + Clone, + Debug, + Deref, + DerefMut, + Deserialize, + Display, + Eq, + From, + FromStr, + Into, + PartialEq, + Serialize, +)] +#[serde(transparent)] +pub struct Multiaddr(pub libp2p_core::Multiaddr); + +impl From for sc_network::Multiaddr { + fn from(value: Multiaddr) -> Self { + value.0.to_string().parse().expect("Conversion between 2 libp2p versions is always right") + } +} + +impl From for Multiaddr { + fn from(value: sc_network::Multiaddr) -> Self { + value.to_string().parse().expect("Conversion between 2 libp2p versions is always right") + } +} + +/// Multiaddr with peer id +#[derive( + Debug, Clone, Deserialize, Serialize, PartialEq, From, Into, FromStr, Deref, DerefMut, Display, +)] +#[serde(transparent)] +pub struct MultiaddrWithPeerId(pub sc_service::config::MultiaddrWithPeerId); + +impl MultiaddrWithPeerId { + /// Constructor for peer id + pub fn new(multiaddr: impl Into, peer_id: sc_network::PeerId) -> Self { + Self(sc_service::config::MultiaddrWithPeerId { + multiaddr: multiaddr.into().into(), + peer_id, + }) + } +} + +impl From for libp2p_core::Multiaddr { + fn from(value: MultiaddrWithPeerId) -> Self { + value.0.to_string().parse().expect("Conversion between 2 libp2p versions is always right") + } +} + +impl From for sc_network::Multiaddr { + fn from(multiaddr: MultiaddrWithPeerId) -> Self { + multiaddr.to_string().parse().expect("Conversion between 2 libp2p versions is always right") + } +} + +impl From for Multiaddr { + fn from(multiaddr: MultiaddrWithPeerId) -> Self { + multiaddr.to_string().parse().expect("Conversion between 2 libp2p versions is always right") + } +} + +/// Spawn task with provided name (if possible) +#[cfg(not(tokio_unstable))] +pub fn task_spawn(name: impl AsRef, future: F) -> tokio::task::JoinHandle +where + F: Future + Send + 'static, + F::Output: Send + 'static, +{ + let _ = name; + tokio::task::spawn(future) +} + +/// Spawn task with provided name (if possible) +#[cfg(tokio_unstable)] +pub fn task_spawn(name: impl AsRef, future: F) -> tokio::task::JoinHandle +where + F: Future + Send + 'static, + F::Output: Send + 'static, +{ + tokio::task::Builder::new() + .name(name.as_ref()) + .spawn(future) + .expect("Spawning task never fails") +} + +/// Spawn task with provided name (if possible) +#[cfg(not(tokio_unstable))] +pub fn task_spawn_blocking(name: impl AsRef, f: F) -> tokio::task::JoinHandle +where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, +{ + let _ = name; + tokio::task::spawn_blocking(f) +} + +/// Spawn task with provided name (if possible) +#[cfg(tokio_unstable)] +pub fn task_spawn_blocking(name: impl AsRef, f: F) -> tokio::task::JoinHandle +where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, +{ + tokio::task::Builder::new() + .name(name.as_ref()) + .spawn_blocking(f) + .expect("Spawning task never fails") +} + +/// Substrate storage key abstraction +pub struct StorageKey(pub Vec); + +impl StorageKey { + /// Constructor which accepts storage keys + pub fn new(keys: IT) -> Self + where + IT: IntoIterator, + K: AsRef<[u8]>, + { + Self(keys.into_iter().flat_map(|key| sp_core_hashing::twox_128(key.as_ref())).collect()) + } + + /// Storage key for events + pub fn events() -> Self { + Self::new(["System", "Events"]) + } +} + +impl Rpc { + pub(crate) async fn get_storage( + &self, + StorageKey(key): StorageKey, + block: Option, + ) -> anyhow::Result> + where + H: Send + Sync + 'static + serde::ser::Serialize + serde::de::DeserializeOwned, + { + self.storage(sp_storage::StorageKey(key), block) + .await + .context("Failed to fetch storage entry") + } +} + +/// Public key type +#[derive( + Debug, + Default, + Decode, + Encode, + Copy, + Clone, + PartialEq, + Eq, + Ord, + PartialOrd, + Hash, + Deref, + DerefMut, + Serialize, + Deserialize, +)] +#[serde(transparent)] +pub struct PublicKey(pub subspace_core_primitives::PublicKey); + +impl PublicKey { + /// Construct public key from raw bytes + pub fn new(raw: [u8; PUBLIC_KEY_LENGTH]) -> Self { + Self(subspace_core_primitives::PublicKey::from(raw)) + } +} + +impl From<[u8; PUBLIC_KEY_LENGTH]> for PublicKey { + fn from(key: [u8; PUBLIC_KEY_LENGTH]) -> Self { + Self::new(key) + } +} + +impl From for PublicKey { + fn from(account_id: sp_core::crypto::AccountId32) -> Self { + From::<[u8; PUBLIC_KEY_LENGTH]>::from(*account_id.as_ref()) + } +} + +impl From for sp_core::crypto::AccountId32 { + fn from(account_id: PublicKey) -> Self { + Self::new(*account_id.0) + } +} + +impl std::fmt::Display for PublicKey { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + self.0.fmt(f) + } +} + +mod parse_ss58 { + // Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. + // Copyright (C) 2022 Subspace Labs, Inc. + // SPDX-License-Identifier: Apache-2.0 + + // Licensed under the Apache License, Version 2.0 (the "License"); + // you may not use this file except in compliance with the License. + // You may obtain a copy of the License at + // + // http://www.apache.org/licenses/LICENSE-2.0 + // + // Unless required by applicable law or agreed to in writing, software + // distributed under the License is distributed on an "AS IS" BASIS, + // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + // See the License for the specific language governing permissions and + // limitations under the License. + + //! Modified version of SS58 parser extracted from Substrate in order to not + //! pull the whole `sp-core` into farmer application + + use base58::FromBase58; + use blake2::digest::typenum::U64; + use blake2::digest::FixedOutput; + use blake2::{Blake2b, Digest}; + use ss58_registry::Ss58AddressFormat; + use subspace_core_primitives::{PublicKey, PUBLIC_KEY_LENGTH}; + use thiserror::Error; + + const PREFIX: &[u8] = b"SS58PRE"; + const CHECKSUM_LEN: usize = 2; + + /// An error type for SS58 decoding. + #[derive(Debug, Error)] + pub enum Ss58ParsingError { + /// Base 58 requirement is violated + #[error("Base 58 requirement is violated")] + BadBase58, + /// Length is bad + #[error("Length is bad")] + BadLength, + /// Invalid SS58 prefix byte + #[error("Invalid SS58 prefix byte")] + InvalidPrefix, + /// Disallowed SS58 Address Format for this datatype + #[error("Disallowed SS58 Address Format for this datatype")] + FormatNotAllowed, + /// Invalid checksum + #[error("Invalid checksum")] + InvalidChecksum, + } + + /// Some if the string is a properly encoded SS58Check address. + pub(crate) fn parse_ss58_reward_address(s: &str) -> Result { + let data = s.from_base58().map_err(|_| Ss58ParsingError::BadBase58)?; + if data.len() < 2 { + return Err(Ss58ParsingError::BadLength); + } + let (prefix_len, ident) = match data[0] { + 0..=63 => (1, data[0] as u16), + 64..=127 => { + // weird bit manipulation owing to the combination of LE encoding and missing + // two bits from the left. + // d[0] d[1] are: 01aaaaaa bbcccccc + // they make the LE-encoded 16-bit value: aaaaaabb 00cccccc + // so the lower byte is formed of aaaaaabb and the higher byte is 00cccccc + let lower = (data[0] << 2) | (data[1] >> 6); + let upper = data[1] & 0b00111111; + (2, (lower as u16) | ((upper as u16) << 8)) + } + _ => return Err(Ss58ParsingError::InvalidPrefix), + }; + if data.len() != prefix_len + PUBLIC_KEY_LENGTH + CHECKSUM_LEN { + return Err(Ss58ParsingError::BadLength); + } + let format: Ss58AddressFormat = ident.into(); + if format.is_reserved() { + return Err(Ss58ParsingError::FormatNotAllowed); + } + + let hash = ss58hash(&data[0..PUBLIC_KEY_LENGTH + prefix_len]); + let checksum = &hash[0..CHECKSUM_LEN]; + if data[PUBLIC_KEY_LENGTH + prefix_len..PUBLIC_KEY_LENGTH + prefix_len + CHECKSUM_LEN] + != *checksum + { + // Invalid checksum. + return Err(Ss58ParsingError::InvalidChecksum); + } + + let bytes: [u8; PUBLIC_KEY_LENGTH] = data[prefix_len..][..PUBLIC_KEY_LENGTH] + .try_into() + .map_err(|_| Ss58ParsingError::BadLength)?; + + Ok(PublicKey::from(bytes)) + } + + fn ss58hash(data: &[u8]) -> [u8; 64] { + let mut state = Blake2b::::new(); + state.update(PREFIX); + state.update(data); + state.finalize_fixed().into() + } + + impl std::str::FromStr for super::PublicKey { + type Err = Ss58ParsingError; + + fn from_str(s: &str) -> Result { + parse_ss58_reward_address(s).map(Self) + } + } +} + +pub mod chain_spec { + //! Subspace chain spec related utilities + + use frame_support::traits::Get; + use sc_service::Properties; + use serde_json::map::Map; + use serde_json::Value; + use sp_core::crypto::AccountId32; + use sp_core::{sr25519, Pair, Public}; + use sp_runtime::traits::IdentifyAccount; + use sp_runtime::MultiSigner; + use subspace_runtime::SS58Prefix; + use subspace_runtime_primitives::DECIMAL_PLACES; + + /// Shared chain spec properties related to the coin. + pub fn chain_spec_properties() -> Properties { + let mut properties = Properties::new(); + + properties.insert("dsnBootstrapNodes".to_string(), Vec::::new().into()); + properties.insert("ss58Format".to_string(), >::get().into()); + properties.insert("tokenDecimals".to_string(), DECIMAL_PLACES.into()); + properties.insert("tokenSymbol".to_string(), "tSSC".into()); + let domains_bootstrap_nodes = Map::::new(); + properties.insert("domainsBootstrapNodes".to_string(), domains_bootstrap_nodes.into()); + + properties + } + + /// Get public key from keypair seed. + pub fn get_public_key_from_seed( + seed: &'static str, + ) -> ::Public { + TPublic::Pair::from_string(&format!("//{seed}"), None) + .expect("Static values are valid; qed") + .public() + } + + /// Generate an account ID from seed. + pub fn get_account_id_from_seed(seed: &'static str) -> AccountId32 { + MultiSigner::from(get_public_key_from_seed::(seed)).into_account() + } +} + +/// Useful macro to generate some common methods and trait implementations for +/// builders +#[macro_export] +macro_rules! generate_builder { + ( $name:ident ) => { + impl concat_idents!($name, Builder) { + /// Constructor + pub fn new() -> Self { + Self::default() + } + + #[doc = concat!("Build ", stringify!($name))] + pub fn build(self) -> $name { + self._build().expect("Infallible") + } + } + + impl From for $name { + fn from(value: concat_idents!($name, Builder)) -> Self { + value.build() + } + } + }; + ( $name:ident, $($rest:ident),+ ) => { + $crate::generate_builder!($name); + $crate::generate_builder!($($rest),+); + }; +}