Skip to content

Commit

Permalink
Merge branch 'main' into open-db-for-read-only-access
Browse files Browse the repository at this point in the history
  • Loading branch information
teor2345 authored Dec 12, 2023
2 parents 7d00ade + 9ace6f8 commit 2cc8b7b
Show file tree
Hide file tree
Showing 32 changed files with 427 additions and 64 deletions.
13 changes: 3 additions & 10 deletions zebra-chain/src/block/hash.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,10 +85,9 @@ impl FromHex for Hash {
type Error = <[u8; 32] as FromHex>::Error;

fn from_hex<T: AsRef<[u8]>>(hex: T) -> Result<Self, Self::Error> {
let mut hash = <[u8; 32]>::from_hex(hex)?;
hash.reverse();
let hash = <[u8; 32]>::from_hex(hex)?;

Ok(hash.into())
Ok(Self::from_bytes_in_display_order(&hash))
}
}

Expand Down Expand Up @@ -148,12 +147,6 @@ impl ZcashDeserialize for Hash {
impl std::str::FromStr for Hash {
type Err = SerializationError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut bytes = [0; 32];
if hex::decode_to_slice(s, &mut bytes[..]).is_err() {
Err(SerializationError::Parse("hex decoding error"))
} else {
bytes.reverse();
Ok(Hash(bytes))
}
Ok(Self::from_hex(s)?)
}
}
5 changes: 5 additions & 0 deletions zebra-chain/src/serialization/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
use std::{array::TryFromSliceError, io, num::TryFromIntError, str::Utf8Error};

use hex::FromHexError;
use thiserror::Error;

/// A serialization error.
Expand Down Expand Up @@ -31,6 +32,10 @@ pub enum SerializationError {
#[error("CompactSize too large: {0}")]
TryFromIntError(#[from] TryFromIntError),

/// A string was not valid hexadecimal.
#[error("string was not hex: {0}")]
FromHexError(#[from] FromHexError),

/// An error caused when validating a zatoshi `Amount`
#[error("input couldn't be parsed as a zatoshi `Amount`: {source}")]
Amount {
Expand Down
5 changes: 4 additions & 1 deletion zebra-scan/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ proptest-impl = [
"proptest-derive",
"zebra-state/proptest-impl",
"zebra-chain/proptest-impl",
"zebra-test",
"bls12_381",
"ff",
"group",
Expand Down Expand Up @@ -63,6 +64,8 @@ jubjub = { version = "0.10.0", optional = true }
rand = { version = "0.8.5", optional = true }
zcash_note_encryption = { version = "0.4.0", optional = true }

zebra-test = { path = "../zebra-test", version = "1.0.0-beta.31", optional = true }

[dev-dependencies]

insta = { version = "1.33.0", features = ["ron", "redactions"] }
Expand All @@ -78,4 +81,4 @@ rand = "0.8.5"
zcash_note_encryption = "0.4.0"

zebra-state = { path = "../zebra-state", version = "1.0.0-beta.31", features = ["proptest-impl"] }
zebra-test = { path = "../zebra-test" }
zebra-test = { path = "../zebra-test", version = "1.0.0-beta.31" }
4 changes: 2 additions & 2 deletions zebra-scan/src/storage/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ pub use zebra_state::{

pub mod sapling;

#[cfg(test)]
mod tests;
#[cfg(any(test, feature = "proptest-impl"))]
pub mod tests;

/// The directory name used to distinguish the scanner database from Zebra's other databases or
/// flat files.
Expand Down
55 changes: 55 additions & 0 deletions zebra-scan/src/storage/db/tests.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,58 @@
//! General scanner database tests.
use std::sync::Arc;

use zebra_chain::{
block::{Block, Height},
parameters::Network::{self, *},
serialization::ZcashDeserializeInto,
};
use zebra_state::TransactionIndex;

use crate::{
storage::Storage,
tests::{FAKE_SAPLING_VIEWING_KEY, ZECPAGES_SAPLING_VIEWING_KEY},
Config,
};

#[cfg(test)]
mod snapshot;

/// Returns an empty `Storage` suitable for testing.
pub fn new_test_storage(network: Network) -> Storage {
Storage::new(&Config::ephemeral(), network)

Check failure on line 23 in zebra-scan/src/storage/db/tests.rs

View workflow job for this annotation

GitHub Actions / Build and Deploy Zebra Internal Docs

this function takes 3 arguments but 2 arguments were supplied

Check failure on line 23 in zebra-scan/src/storage/db/tests.rs

View workflow job for this annotation

GitHub Actions / Check Cargo.lock is up to date

this function takes 3 arguments but 2 arguments were supplied

Check failure on line 23 in zebra-scan/src/storage/db/tests.rs

View workflow job for this annotation

GitHub Actions / Test stable on ubuntu-latest

this function takes 3 arguments but 2 arguments were supplied

Check failure on line 23 in zebra-scan/src/storage/db/tests.rs

View workflow job for this annotation

GitHub Actions / Test beta on ubuntu-latest

this function takes 3 arguments but 2 arguments were supplied

Check failure on line 23 in zebra-scan/src/storage/db/tests.rs

View workflow job for this annotation

GitHub Actions / Clippy (stable) Results

this function takes 3 arguments but 2 arguments were supplied

error[E0061]: this function takes 3 arguments but 2 arguments were supplied --> zebra-scan/src/storage/db/tests.rs:23:5 | 23 | Storage::new(&Config::ephemeral(), network) | ^^^^^^^^^^^^------------------------------- an argument of type `bool` is missing | note: associated function defined here --> zebra-scan/src/storage.rs:58:12 | 58 | pub fn new(config: &Config, network: Network, read_only: bool) -> Self { | ^^^ --------------- ---------------- --------------- help: provide the argument | 23 | Storage::new(&Config::ephemeral(), network, /* bool */) | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}

/// Add fake keys to `storage` for testing purposes.
pub fn add_fake_keys(storage: &mut Storage) {
// Snapshot a birthday that is automatically set to activation height
storage.add_sapling_key(&ZECPAGES_SAPLING_VIEWING_KEY.to_string(), None);
// Snapshot a birthday above activation height
storage.add_sapling_key(&FAKE_SAPLING_VIEWING_KEY.to_string(), Height(1_000_000));
}

/// Add fake results to `storage` for testing purposes.
pub fn add_fake_results(storage: &mut Storage, network: Network, height: Height) {
let blocks = match network {
Mainnet => &*zebra_test::vectors::CONTINUOUS_MAINNET_BLOCKS,
Testnet => &*zebra_test::vectors::CONTINUOUS_TESTNET_BLOCKS,
};

let block: Arc<Block> = blocks
.get(&height.0)
.expect("block height has test data")
.zcash_deserialize_into()
.expect("test data deserializes");

// Fake results from the first few blocks
storage.add_sapling_results(
&ZECPAGES_SAPLING_VIEWING_KEY.to_string(),
height,
block
.transactions
.iter()
.enumerate()
.map(|(index, tx)| (TransactionIndex::from_usize(index), tx.hash().into()))
.collect(),
);
}
134 changes: 86 additions & 48 deletions zebra-scan/src/storage/db/tests/snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,40 +24,39 @@
//! Due to `serde` limitations, some object types can't be represented exactly,
//! so RON uses the closest equivalent structure.
use std::{collections::BTreeMap, sync::Arc};
use std::collections::BTreeMap;

use itertools::Itertools;

use zebra_chain::{
block::{Block, Height},
block::Height,
parameters::Network::{self, *},
serialization::ZcashDeserializeInto,
};
use zebra_state::{RawBytes, ReadDisk, TransactionIndex, KV};
use zebra_state::{RawBytes, ReadDisk, SaplingScannedDatabaseIndex, TransactionLocation, KV};

use crate::{
storage::{db::ScannerDb, Storage},
tests::{FAKE_SAPLING_VIEWING_KEY, ZECPAGES_SAPLING_VIEWING_KEY},
Config,
};
use crate::storage::{db::ScannerDb, Storage};

/// Snapshot test for RocksDB column families, and their key-value data.
/// Snapshot test for:
/// - RocksDB column families, and their raw key-value data, and
/// - typed scanner result data using high-level storage methods.
///
/// These snapshots contain the `default` column family, but it is not used by Zebra.
#[test]
fn test_raw_rocksdb_column_families() {
fn test_database_format() {
let _init_guard = zebra_test::init();

test_raw_rocksdb_column_families_with_network(Mainnet);
test_raw_rocksdb_column_families_with_network(Testnet);
test_database_format_with_network(Mainnet);
test_database_format_with_network(Testnet);
}

/// Snapshot raw column families for `network`.
/// Snapshot raw and typed database formats for `network`.
///
/// See [`test_raw_rocksdb_column_families`].
fn test_raw_rocksdb_column_families_with_network(network: Network) {
/// See [`test_database_format()`] for details.
fn test_database_format_with_network(network: Network) {
let mut net_suffix = network.to_string();
net_suffix.make_ascii_lowercase();

let mut storage = Storage::new(&Config::ephemeral(), network, false);
let mut storage = super::new_test_storage(network);

// Snapshot the column family names
let mut cf_names = storage.db.list_cf().expect("empty database is valid");
Expand All @@ -75,48 +74,32 @@ fn test_raw_rocksdb_column_families_with_network(network: Network) {

settings.set_snapshot_suffix("empty");
settings.bind(|| snapshot_raw_rocksdb_column_family_data(&storage.db, &cf_names));
settings.bind(|| snapshot_typed_result_data(&storage));

// Snapshot a birthday that is automatically set to activation height
storage.add_sapling_key(&ZECPAGES_SAPLING_VIEWING_KEY.to_string(), None);
// Snapshot a birthday above activation height
storage.add_sapling_key(&FAKE_SAPLING_VIEWING_KEY.to_string(), Height(1_000_000));
super::add_fake_keys(&mut storage);

// Assert that the key format doesn't change.
settings.set_snapshot_suffix(format!("{net_suffix}_keys"));
settings.bind(|| snapshot_raw_rocksdb_column_family_data(&storage.db, &cf_names));
settings.bind(|| snapshot_typed_result_data(&storage));

// Snapshot raw database data for:
// - mainnet and testnet
// - genesis, block 1, and block 2
let blocks = match network {
Mainnet => &*zebra_test::vectors::CONTINUOUS_MAINNET_BLOCKS,
Testnet => &*zebra_test::vectors::CONTINUOUS_TESTNET_BLOCKS,
};

// We limit the number of blocks, because the serialized data is a few kilobytes per block.
//
// We limit the number of blocks, because we create 2 snapshots per block, one for each network.
for height in 0..=2 {
let block: Arc<Block> = blocks
.get(&height)
.expect("block height has test data")
.zcash_deserialize_into()
.expect("test data deserializes");

// Fake results from the first few blocks
storage.add_sapling_results(
&ZECPAGES_SAPLING_VIEWING_KEY.to_string(),
Height(height),
block
.transactions
.iter()
.enumerate()
.map(|(index, tx)| (TransactionIndex::from_usize(index), tx.hash().into()))
.collect(),
);
super::add_fake_results(&mut storage, network, Height(height));

let mut settings = insta::Settings::clone_current();
settings.set_snapshot_suffix(format!("{net_suffix}_{height}"));

// Assert that the result format doesn't change.
settings.bind(|| snapshot_raw_rocksdb_column_family_data(&storage.db, &cf_names));
settings.bind(|| snapshot_typed_result_data(&storage));
}

// TODO: add an empty marker result after PR #8080 merges
}

/// Snapshot the data in each column family, using `cargo insta` and RON serialization.
Expand Down Expand Up @@ -150,15 +133,70 @@ fn snapshot_raw_rocksdb_column_family_data(db: &ScannerDb, original_cf_names: &[
if cf_name == "default" {
assert_eq!(cf_data.len(), 0, "default column family is never used");
} else if cf_data.is_empty() {
// distinguish column family names from empty column families
// Distinguish column family names from empty column families
empty_column_families.push(format!("{cf_name}: no entries"));
} else {
// The note commitment tree snapshots will change if the trees do not have cached roots.
// But we expect them to always have cached roots,
// because those roots are used to populate the anchor column families.
// Make sure the raw format doesn't accidentally change.
insta::assert_ron_snapshot!(format!("{cf_name}_raw_data"), cf_data);
}
}

insta::assert_ron_snapshot!("empty_column_families", empty_column_families);
}

/// Snapshot typed scanner result data using high-level storage methods,
/// using `cargo insta` and RON serialization.
fn snapshot_typed_result_data(storage: &Storage) {
// TODO: snapshot the latest scanned heights after PR #8080 merges
//insta::assert_ron_snapshot!("latest_heights", latest_scanned_heights);

// Make sure the typed key format doesn't accidentally change.
//
// TODO: update this after PR #8080
let sapling_keys_and_birthday_heights = storage.sapling_keys();
// HashMap has an unstable order across Rust releases, so we need to sort it here.
insta::assert_ron_snapshot!(
"sapling_keys",
sapling_keys_and_birthday_heights,
{
"." => insta::sorted_redaction()
}
);

// HashMap has an unstable order across Rust releases, so we need to sort it here as well.
for (key_index, (sapling_key, _birthday_height)) in sapling_keys_and_birthday_heights
.iter()
.sorted()
.enumerate()
{
let sapling_results = storage.sapling_results(sapling_key);

// Check internal database method consistency
for (height, results) in sapling_results.iter() {
let sapling_index_and_results =
storage.sapling_results_for_key_and_height(sapling_key, *height);

// The list of results for each height must match the results queried by that height.
let sapling_results_for_height: Vec<_> = sapling_index_and_results
.values()
.flatten()
.cloned()
.collect();
assert_eq!(results, &sapling_results_for_height);

for (index, result) in sapling_index_and_results {
let index = SaplingScannedDatabaseIndex {
sapling_key: sapling_key.clone(),
tx_loc: TransactionLocation::from_parts(*height, index),
};

// The result for each index must match the result queried by that index.
let sapling_result_for_index = storage.sapling_result_for_index(&index);
assert_eq!(result, sapling_result_for_index);
}
}

// Make sure the typed result format doesn't accidentally change.
insta::assert_ron_snapshot!(format!("sapling_key_{key_index}_results"), sapling_results);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
source: zebra-scan/src/storage/db/tests/snapshot.rs
expression: sapling_results
---
{
Height(0): [
SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"),
],
Height(419199): [],
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
source: zebra-scan/src/storage/db/tests/snapshot.rs
expression: sapling_results
---
{
Height(0): [
SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"),
],
Height(1): [
SaplingScannedResult("851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609"),
],
Height(419199): [],
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
---
source: zebra-scan/src/storage/db/tests/snapshot.rs
expression: sapling_results
---
{
Height(0): [
SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"),
],
Height(1): [
SaplingScannedResult("851bf6fbf7a976327817c738c489d7fa657752445430922d94c983c0b9ed4609"),
],
Height(2): [
SaplingScannedResult("8974d08d1c5f9c860d8b629d582a56659a4a1dcb2b5f98a25a5afcc2a784b0f4"),
],
Height(419199): [],
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
source: zebra-scan/src/storage/db/tests/snapshot.rs
expression: sapling_results
---
{
Height(419199): [],
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
source: zebra-scan/src/storage/db/tests/snapshot.rs
expression: sapling_results
---
{
Height(0): [
SaplingScannedResult("c4eaa58879081de3c24a7b117ed2b28300e7ec4c4c1dff1d3f1268b7857a4ddb"),
],
Height(279999): [],
}
Loading

0 comments on commit 2cc8b7b

Please sign in to comment.