Skip to content

Commit

Permalink
Handle correctly non compacted segments. Update UI so it can run on l…
Browse files Browse the repository at this point in the history
…atest MacOS.
  • Loading branch information
bayk committed Jan 6, 2025
1 parent 01febb5 commit 22cd193
Show file tree
Hide file tree
Showing 13 changed files with 556 additions and 307 deletions.
383 changes: 214 additions & 169 deletions Cargo.lock

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ blake2-rfc = "0.2"
chrono = "0.4.11"
clap = { version = "2.33", features = ["yaml"] }
ctrlc = { version = "3.1", features = ["termination"] }
cursive_table_view = "0.14.0"
cursive_table_view = "0.15.0"
humansize = "1.1.0"
serde = "1"
futures = "0.3.19"
Expand All @@ -43,7 +43,7 @@ mwc_servers = { path = "./servers", version = "5.3.6" }
mwc_util = { path = "./util", version = "5.3.6" }

[dependencies.cursive]
version = "0.20"
version = "0.21"
default-features = false
features = ["pancurses-backend"]

Expand Down
35 changes: 27 additions & 8 deletions chain/src/txhashset/desegmenter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -559,18 +559,29 @@ impl Desegmenter {
self.outputs_bitmap_accumulator.read().root()
);
let bitmap = self.outputs_bitmap_accumulator.read().build_bitmap();
let mut bitmap_pairs: Bitmap = Bitmap::new();

for bit in bitmap.iter() {
bitmap_pairs.add(bit);
// also adding sibling
if bit % 2 == 0 {
bitmap_pairs.add(bit + 1);
} else {
bitmap_pairs.add(bit - 1);
}
}

let rangeproof_segments = Self::generate_segments(
constants::SINGLE_BULLET_PROOF_SIZE,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT,
self.archive_header.output_mmr_size,
Some(&bitmap),
Some(&bitmap_pairs),
);
let output_segments = Self::generate_segments(
constants::PEDERSEN_COMMITMENT_SIZE,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT,
self.archive_header.output_mmr_size,
Some(&bitmap),
Some(&bitmap_pairs),
);
let kernel_segments = Self::generate_segments(
TxKernel::DATA_SIZE,
Expand Down Expand Up @@ -825,16 +836,24 @@ impl Desegmenter {
return Err(Error::BitmapNotReady);
}

// Rough estimation of the segment size. The error comes from the hashes. Since it is not much data, we can ignore it.
// Rough estimation of the segment size. This method is overestimated on hashes, so we should be below real data size limit
fn estimate_segment_size(leaves_num: u64, capacity: u64, leaf_size: usize) -> u64 {
debug_assert!(leaves_num <= capacity);
debug_assert!(capacity > 0);
let fill_ratio = leaves_num as f64 / capacity as f64;

let mut hash_num = 0;
let mut cur_cap = capacity / 2;
debug_assert!(leaves_num % 2 == 0); // Ciblings are expected to be here
let hash_per_line = leaves_num / 2;
while cur_cap > 0 {
hash_num += std::cmp::min(cur_cap, hash_per_line);
cur_cap /= 2;
}

// Node hash is 32 bytes. Positions for all are 8 bytes. Assuming that empty is proportional to the fill ratio
let full_leaves_size = capacity * (leaf_size as u64 + 8);
let full_hashes_size = (capacity - 1) * (32 + 8);
(full_leaves_size as f64 * fill_ratio + full_hashes_size as f64 * fill_ratio.sqrt()).round()
as u64
let full_leaves_size = leaves_num * (leaf_size as u64 + 8);
let full_hashes_size = hash_num * (32 + 8);
full_leaves_size + full_hashes_size
}

// Return the segments and position of the next leaf
Expand Down
54 changes: 31 additions & 23 deletions chain/src/txhashset/headers_desegmenter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,12 +255,14 @@ impl<T> HeadersRecieveCache<T> {
.unwrap()
.get(hash_idx as usize)
{
if header.hash() != *hash {
// need to check the first hash, if it doesn't match, let's reset all blockchain. Hashes are below horizon,
// if something not matching better to reset all the data, including block data and restart with headers download
self.chain.reset_chain_head(self.chain.genesis(), true)?;
} else {
break;
if let Some(hash) = hash {
if header.hash() != *hash {
// need to check the first hash, if it doesn't match, let's reset all blockchain. Hashes are below horizon,
// if something not matching better to reset all the data, including block data and restart with headers download
self.chain.reset_chain_head(self.chain.genesis(), true)?;
} else {
break;
}
}
}
}
Expand Down Expand Up @@ -331,23 +333,27 @@ impl<T> HeadersRecieveCache<T> {
last_in_cache = 0;
}

let hinfo: Option<&Hash> = headers
let hinfo: Option<&Option<Hash>> = headers
.header_pmmr
.data
.as_ref()
.unwrap()
.get(hash_idx as usize);
match hinfo {
Some(h) => {
let request = (h.clone(), hash_idx * HEADERS_PER_BATCH as u64);
// check if already requested first
if !request_tracker.contains_request(h) {
return_vec.push(request);
if return_vec.len() >= elements {
break;
Some(hash) => {
if let Some(h) = hash {
let request = (h.clone(), hash_idx * HEADERS_PER_BATCH as u64);
// check if already requested first
if !request_tracker.contains_request(h) {
return_vec.push(request);
if return_vec.len() >= elements {
break;
}
} else {
waiting_indexes.push((hash_idx, request));
}
} else {
waiting_indexes.push((hash_idx, request));
break;
}
}
None => break,
Expand Down Expand Up @@ -399,14 +405,16 @@ impl<T> HeadersRecieveCache<T> {
.expect("header_pmmr data must exist")
.get(hash_idx as usize + 1)
{
let last_header = bhs.last().unwrap();
if last_header.hash() != *next_hash {
return Err((
peer_info,
Error::InvalidSegment(
"Last header in the series doesn't match expected hash".to_string(),
),
));
if let Some(next_hash) = next_hash {
let last_header = bhs.last().unwrap();
if last_header.hash() != *next_hash {
return Err((
peer_info,
Error::InvalidSegment(
"Last header in the series doesn't match expected hash".to_string(),
),
));
}
}
}

Expand Down
17 changes: 10 additions & 7 deletions chain/src/txhashset/segmenter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use crate::pibd_params;
use crate::txhashset::{BitmapAccumulator, BitmapChunk, TxHashSet};
use crate::util::secp::pedersen::RangeProof;
use crate::util::RwLock;
use croaring::Bitmap;
use mwc_core::core::pmmr::{ReadonlyPMMR, VecBackend};
use mwc_util::secp::constants;
use std::{sync::Arc, time::Instant};
Expand All @@ -35,6 +36,7 @@ pub struct Segmenter {
header_pmmr: Arc<RwLock<VecBackend<Hash>>>,
txhashset: Arc<RwLock<TxHashSet>>,
bitmap_snapshot: Arc<BitmapAccumulator>,
bitmap: Bitmap,
header: BlockHeader,
}

Expand All @@ -49,6 +51,7 @@ impl Segmenter {
Segmenter {
header_pmmr,
txhashset,
bitmap: bitmap_snapshot.build_bitmap(),
bitmap_snapshot: Arc::new(bitmap_snapshot),
header,
}
Expand Down Expand Up @@ -83,7 +86,7 @@ impl Segmenter {
let segment = Segment::from_pmmr(
id,
&bitmap_pmmr,
false,
None,
BitmapChunk::LEN_BYTES,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT,
)?;
Expand All @@ -106,7 +109,7 @@ impl Segmenter {
let segment = Segment::from_pmmr(
id,
&header_pmmr,
false,
None,
Hash::LEN,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT,
)?;
Expand All @@ -132,9 +135,9 @@ impl Segmenter {
let segment = Segment::from_pmmr(
id,
&output_pmmr,
true,
Some(&self.bitmap),
constants::PEDERSEN_COMMITMENT_SIZE,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT * 4,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT * 2, // In reality 1x should be enough, using 2x to cover some edge cases
)?;
debug!(
"output_segment: id: {}, leaves: {}, hashes: {}, proof hashes: {}, took {}ms",
Expand All @@ -155,7 +158,7 @@ impl Segmenter {
let segment = Segment::from_pmmr(
id,
&kernel_pmmr,
false,
None,
TxKernel::DATA_SIZE,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT,
)?;
Expand All @@ -179,9 +182,9 @@ impl Segmenter {
let segment = Segment::from_pmmr(
id,
&pmmr,
true,
Some(&self.bitmap),
constants::SINGLE_BULLET_PROOF_SIZE,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT * 4,
pibd_params::PIBD_MESSAGE_SIZE_LIMIT * 2, // In reality 1x should be enough, using 2x to cover some edge cases
)?;
debug!(
"rangeproof_segment: id: {}, leaves: {}, hashes: {}, proof hashes: {}, took {}ms",
Expand Down
2 changes: 1 addition & 1 deletion chain/tests/bitmap_segment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ fn test_roundtrip(entries: usize) {
.unwrap();

let mmr = accumulator.readonly_pmmr();
let segment = Segment::from_pmmr(identifier, &mmr, false, 1, usize::MAX).unwrap();
let segment = Segment::from_pmmr(identifier, &mmr, None, 1, usize::MAX).unwrap();

// Convert to `BitmapSegment`
let bms = BitmapSegment::from(segment.clone());
Expand Down
62 changes: 45 additions & 17 deletions core/src/core/pmmr/pmmr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,7 @@ where
{
/// Number of nodes in the PMMR
pub size: u64,
index_offset: u64, // Needed to be able to operate with segment of bigger PMMR
backend: &'a mut B,
// only needed to parameterise Backend
_marker: marker::PhantomData<T>,
Expand All @@ -193,6 +194,7 @@ where
PMMR {
backend,
size: 0,
index_offset: 0,
_marker: marker::PhantomData,
}
}
Expand All @@ -203,6 +205,7 @@ where
PMMR {
backend,
size,
index_offset: 0,
_marker: marker::PhantomData,
}
}
Expand All @@ -212,11 +215,16 @@ where
ReadonlyPMMR::at(&self.backend, self.size)
}

/// Setting up the index offset if we want to use this PMMR as a segment from the bigger one
pub fn update_index_offset(&mut self, offset: u64) {
self.index_offset = offset;
}

/// Push a new element into the MMR. Computes new related peaks at
/// the same time if applicable.
pub fn push(&mut self, leaf: &T) -> Result<u64, String> {
let leaf_pos = self.size;
let mut current_hash = leaf.hash_with_index(leaf_pos);
let mut current_hash = leaf.hash_with_index(leaf_pos + self.index_offset);

let mut hashes = vec![current_hash];
let mut pos = leaf_pos;
Expand All @@ -229,14 +237,18 @@ where
let mut peak = 1;
while (peak_map & peak) != 0 {
let left_sibling = pos + 1 - 2 * peak;
let left_hash = self
.backend
.get_peak_from_file(left_sibling)
.ok_or("missing left sibling in tree, should not have been pruned")?;
peak *= 2;
pos += 1;
current_hash = (left_hash, current_hash).hash_with_index(pos);
hashes.push(current_hash);
match self.backend.get_peak_from_file(left_sibling) {
Some(left_hash) => {
peak *= 2;
pos += 1;
current_hash =
(left_hash, current_hash).hash_with_index(pos + self.index_offset);
hashes.push(current_hash);
}
None => {
return Err("missing left sibling in tree, should not have been pruned".into());
}
}
}

// append all the new nodes and update the MMR index
Expand Down Expand Up @@ -265,13 +277,17 @@ where
// is right sibling, we should be done
continue;
}
let left_hash = self
.backend
.get_hash(sibling)
.ok_or("missing left sibling in tree, should not have been pruned")?;
pos = parent;
current_hash = (left_hash, current_hash).hash_with_index(parent);
self.backend.append_hash(current_hash)?;
match self.backend.get_hash(sibling) {
Some(left_hash) => {
pos = parent;
current_hash =
(left_hash, current_hash).hash_with_index(parent + self.index_offset);
self.backend.append_hash(current_hash)?;
}
None => {
return Err("missing left sibling in tree, should not have been pruned".into());
}
}
}

// Round size up to next leaf, ready for insertion
Expand Down Expand Up @@ -354,7 +370,8 @@ where
if let Some(left_child_hs) = self.get_from_file(left_pos) {
if let Some(right_child_hs) = self.get_from_file(right_pos) {
// hash the two child nodes together with parent_pos and compare
if (left_child_hs, right_child_hs).hash_with_index(n)
if (left_child_hs, right_child_hs)
.hash_with_index(n + self.index_offset)
!= hash
{
return Err(format!("Invalid MMR, hash of parent at {} does not match children.", n + 1));
Expand Down Expand Up @@ -656,6 +673,17 @@ pub fn family(pos0: u64) -> (u64, u64) {
}
}

/// positions of Left and right children. For leaf return None
pub fn children(pos0: u64) -> Option<(u64, u64)> {
let height = bintree_postorder_height(pos0);
if height == 0 {
return None; // It is a leaf, no children
}
let left_pos = pos0 - (1 << height);
let right_pos = pos0 - 1;
Some((left_pos, right_pos))
}

/// Is the node at this pos the "left" sibling of its parent?
pub fn is_left_sibling(pos0: u64) -> bool {
let (peak_map, height) = peak_map_height(pos0);
Expand Down
Loading

0 comments on commit 22cd193

Please sign in to comment.