diff --git a/Cargo.toml b/Cargo.toml index bd2cf086..51ea045a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,7 @@ ark-ec = { git = "https://github.com/arkworks-rs/algebra/" } ark-serialize = { git = "https://github.com/arkworks-rs/algebra/" } ark-poly = { git = "https://github.com/arkworks-rs/algebra/" } -ark-crypto-primitives = { git = "https://github.com/arkworks-rs/crypto-primitives" } +ark-crypto-primitives = { git = "https://github.com/arkworks-rs/crypto-primitives/" } ark-r1cs-std = { git = "https://github.com/arkworks-rs/r1cs-std/" } ark-bls12-377 = { git = "https://github.com/arkworks-rs/algebra/" } diff --git a/bench-templates/src/lib.rs b/bench-templates/src/lib.rs index 82c834c2..01ab73c2 100644 --- a/bench-templates/src/lib.rs +++ b/bench-templates/src/lib.rs @@ -1,17 +1,23 @@ -use ark_crypto_primitives::sponge::{ - poseidon::{PoseidonConfig, PoseidonSponge}, - CryptographicSponge, +use ark_crypto_primitives::{ + crh::{sha256::digest::Digest, CRHScheme}, + sponge::{ + poseidon::{PoseidonConfig, PoseidonSponge}, + CryptographicSponge, + }, }; use ark_ff::PrimeField; use ark_poly::Polynomial; use ark_serialize::{CanonicalSerialize, Compress}; use ark_std::{test_rng, UniformRand}; -use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaCha20Rng, +}; use core::time::Duration; -use std::time::Instant; +use std::{borrow::Borrow, marker::PhantomData, time::Instant}; -use ark_poly_commit::{LabeledPolynomial, PolynomialCommitment}; +use ark_poly_commit::{to_bytes, LabeledPolynomial, PolynomialCommitment}; pub use criterion::*; pub use paste::paste; @@ -276,3 +282,57 @@ macro_rules! bench { } }; } + +/**** Auxiliary methods for linear-code-based PCSs ****/ + +/// Needed for benches and tests. +pub struct LeafIdentityHasher; + +impl CRHScheme for LeafIdentityHasher { + type Input = Vec; + type Output = Vec; + type Parameters = (); + + fn setup(_: &mut R) -> Result { + Ok(()) + } + + fn evaluate>( + _: &Self::Parameters, + input: T, + ) -> Result { + Ok(input.borrow().to_vec().into()) + } +} + +/// Needed for benches and tests. +pub struct FieldToBytesColHasher +where + F: PrimeField + CanonicalSerialize, + D: Digest, +{ + _phantom: PhantomData<(F, D)>, +} + +impl CRHScheme for FieldToBytesColHasher +where + F: PrimeField + CanonicalSerialize, + D: Digest, +{ + type Input = Vec; + type Output = Vec; + type Parameters = (); + + fn setup(_rng: &mut R) -> Result { + Ok(()) + } + + fn evaluate>( + _parameters: &Self::Parameters, + input: T, + ) -> Result { + let mut dig = D::new(); + dig.update(to_bytes!(input.borrow()).unwrap()); + Ok(dig.finalize().to_vec()) + } +} diff --git a/poly-commit/Cargo.toml b/poly-commit/Cargo.toml index 65bd8ead..25ef2e6a 100644 --- a/poly-commit/Cargo.toml +++ b/poly-commit/Cargo.toml @@ -26,12 +26,19 @@ ark-r1cs-std = { version = "^0.4.0", default-features = false, optional = true } hashbrown = { version = "0.15", default-features = false, features = ["inline-more", "allocator-api2"], optional = true } rand = { version = "0.8.0", optional = true } rayon = { version = "1", optional = true } +merlin = { version = "3.0.0", default-features = false } [[bench]] name = "ipa_times" path = "benches/ipa_times.rs" harness = false +[[bench]] +name = "ligero_ml_times" +path = "benches/ligero_ml_times.rs" +harness = false + + [[bench]] name = "hyrax_times" path = "benches/hyrax_times.rs" @@ -53,10 +60,12 @@ ark-ed-on-bls12-381 = { version = "^0.4.0", default-features = false } ark-bls12-381 = { version = "^0.4.0", default-features = false, features = [ "curve" ] } ark-bls12-377 = { version = "^0.4.0", default-features = false, features = [ "curve" ] } ark-bn254 = { version = "^0.4.0", default-features = false, features = [ "curve" ] } - rand_chacha = { version = "0.3.0", default-features = false } ark-pcs-bench-templates = { path = "../bench-templates" } +[target.'cfg(target_arch = "aarch64")'.dependencies] +num-traits = { version = "0.2", default-features = false, features = ["libm"] } + [features] default = [ "std", "parallel" ] std = [ "ark-ff/std", "ark-ec/std", "ark-poly/std", "ark-std/std", "ark-relations/std", "ark-serialize/std", "ark-crypto-primitives/std"] diff --git a/poly-commit/benches/ligero_ml_times.rs b/poly-commit/benches/ligero_ml_times.rs new file mode 100644 index 00000000..1a193eee --- /dev/null +++ b/poly-commit/benches/ligero_ml_times.rs @@ -0,0 +1,55 @@ +use ark_crypto_primitives::{ + crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, + merkle_tree::{ByteDigestConverter, Config}, +}; +use ark_pcs_bench_templates::*; +use ark_poly::{DenseMultilinearExtension, MultilinearExtension}; + +use ark_bn254::Fr; +use ark_ff::PrimeField; + +use ark_poly_commit::linear_codes::{LinearCodePCS, MultilinearLigero}; +use blake2::Blake2s256; +use rand_chacha::ChaCha20Rng; + +// Ligero PCS over BN254 +struct MerkleTreeParams; +type LeafH = LeafIdentityHasher; +type CompressH = Sha256; +impl Config for MerkleTreeParams { + type Leaf = Vec; + + type LeafDigest = ::Output; + type LeafInnerDigestConverter = ByteDigestConverter; + type InnerDigest = ::Output; + + type LeafHash = LeafH; + type TwoToOneHash = CompressH; +} + +pub type MLE = DenseMultilinearExtension; +type MTConfig = MerkleTreeParams; +type ColHasher = FieldToBytesColHasher; +type Ligero = LinearCodePCS< + MultilinearLigero, ColHasher>, + F, + MLE, + MTConfig, + ColHasher, +>; + +fn rand_poly_ligero_ml( + num_vars: usize, + rng: &mut ChaCha20Rng, +) -> DenseMultilinearExtension { + DenseMultilinearExtension::rand(num_vars, rng) +} + +fn rand_point_ligero_ml(num_vars: usize, rng: &mut ChaCha20Rng) -> Vec { + (0..num_vars).map(|_| F::rand(rng)).collect() +} + +const MIN_NUM_VARS: usize = 12; +const MAX_NUM_VARS: usize = 22; + +bench!(Ligero, rand_poly_ligero_ml, rand_point_ligero_ml); diff --git a/poly-commit/src/ipa_pc/mod.rs b/poly-commit/src/ipa_pc/mod.rs index 8afbacd9..53aeb7f6 100644 --- a/poly-commit/src/ipa_pc/mod.rs +++ b/poly-commit/src/ipa_pc/mod.rs @@ -1,7 +1,7 @@ use crate::{ - BTreeMap, BTreeSet, BatchLCProof, DenseUVPolynomial, Error, Evaluations, LabeledCommitment, - LabeledPolynomial, LinearCombination, PCCommitmentState, PCCommitterKey, PCUniversalParams, - PolynomialCommitment, QuerySet, CHALLENGE_SIZE, + utils::inner_product, BTreeMap, BTreeSet, BatchLCProof, DenseUVPolynomial, Error, Evaluations, + LabeledCommitment, LabeledPolynomial, LinearCombination, PCCommitmentState, PCCommitterKey, + PCUniversalParams, PolynomialCommitment, QuerySet, CHALLENGE_SIZE, }; use ark_crypto_primitives::sponge::CryptographicSponge; use ark_ec::{AffineRepr, CurveGroup, VariableBaseMSM}; @@ -86,11 +86,6 @@ where challenge.unwrap() } - #[inline] - fn inner_product(l: &[G::ScalarField], r: &[G::ScalarField]) -> G::ScalarField { - ark_std::cfg_iter!(l).zip(r).map(|(li, ri)| *li * ri).sum() - } - /// The succinct portion of `PC::check`. This algorithm runs in time /// O(log d), where d is the degree of the committed polynomials. fn succinct_check<'a>( @@ -674,10 +669,10 @@ where let (key_proj_l, _) = key_proj.split_at_mut(n / 2); let l = Self::cm_commit(key_l, coeffs_r, None, None) - + &h_prime.mul(Self::inner_product(coeffs_r, z_l)); + + &h_prime.mul(inner_product(coeffs_r, z_l)); let r = Self::cm_commit(key_r, coeffs_l, None, None) - + &h_prime.mul(Self::inner_product(coeffs_l, z_r)); + + &h_prime.mul(inner_product(coeffs_l, z_r)); let lr = G::Group::normalize_batch(&[l, r]); l_vec.push(lr[0]); diff --git a/poly-commit/src/lib.rs b/poly-commit/src/lib.rs index 5479051d..e234f875 100644 --- a/poly-commit/src/lib.rs +++ b/poly-commit/src/lib.rs @@ -128,6 +128,11 @@ pub use marlin::marlin_pst13_pc; /// [bdfg]: https://eprint.iacr.org/2020/081.pdf pub mod streaming_kzg; +/// Scheme based on the Ligero construction in [[Ligero]][ligero]. +/// +/// [ligero]: https://eprint.iacr.org/2022/1608 +pub mod linear_codes; + /// A polynomial commitment scheme based on the hardness of the /// discrete logarithm problem in prime-order groups. This is a /// Fiat-Shamired version of the PCS described in the Hyrax paper diff --git a/poly-commit/src/linear_codes/data_structures.rs b/poly-commit/src/linear_codes/data_structures.rs new file mode 100644 index 00000000..60960ae8 --- /dev/null +++ b/poly-commit/src/linear_codes/data_structures.rs @@ -0,0 +1,124 @@ +use crate::{utils::Matrix, PCCommitment, PCCommitmentState}; +use ark_crypto_primitives::{ + crh::CRHScheme, + merkle_tree::{Config, LeafParam, Path, TwoToOneParam}, +}; +use ark_ff::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +#[cfg(not(feature = "std"))] +use ark_std::vec::Vec; +use ark_std::{marker::PhantomData, rand::RngCore}; + +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Clone(bound = ""), Debug(bound = ""))] +/// The public parameters for Ligero PCS. +pub struct LigeroPCParams { + pub(crate) _field: PhantomData, + /// The security parameter + pub(crate) sec_param: usize, + /// The inverse of the code rate. + pub(crate) rho_inv: usize, + /// This is a flag which determines if the random linear combination is done. + pub(crate) check_well_formedness: bool, + /// Parameters for hash function of Merkle tree leaves + #[derivative(Debug = "ignore")] + pub(crate) leaf_hash_param: LeafParam, + /// Parameters for hash function of Merke tree combining two nodes into one + #[derivative(Debug = "ignore")] + pub(crate) two_to_one_hash_param: TwoToOneParam, + // Parameters for obtaining leaf digest from leaf value. + #[derivative(Debug = "ignore")] + pub(crate) col_hash_params: H::Parameters, +} + +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub(crate) struct Metadata { + pub(crate) n_rows: usize, + pub(crate) n_cols: usize, + pub(crate) n_ext_cols: usize, +} + +/// The commitment to a polynomial is a root of the merkle tree, +/// where each node is a hash of the column of the encoded coefficient matrix U. +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct LinCodePCCommitment { + // number of rows resp. columns of the square matrix containing the coefficients of the polynomial + pub(crate) metadata: Metadata, + pub(crate) root: C::InnerDigest, +} + +impl PCCommitment for LinCodePCCommitment { + fn empty() -> Self { + LinCodePCCommitment::default() + } + + fn has_degree_bound(&self) -> bool { + false + } +} + +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct LinCodePCCommitmentState +where + F: PrimeField, + H: CRHScheme, +{ + pub(crate) mat: Matrix, + pub(crate) ext_mat: Matrix, + pub(crate) leaves: Vec, +} + +impl PCCommitmentState for LinCodePCCommitmentState +where + F: PrimeField, + H: CRHScheme, +{ + type Randomness = (); + fn empty() -> Self { + unimplemented!() + } + + fn rand( + _num_queries: usize, + _has_degree_bound: bool, + _num_vars: Option, + _rng: &mut R, + ) -> Self::Randomness { + unimplemented!() + } +} + +/// Proof of an individual linear code well-formedness check or opening +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub(crate) struct LinCodePCProofSingle +where + F: PrimeField, + C: Config, +{ + /// For each of the indices in q, `paths` contains the path from the root of the merkle tree to the leaf + pub(crate) paths: Vec>, + + /// v, s.t. E(v) = w + pub(crate) v: Vec, + + pub(crate) columns: Vec>, +} + +/// The Proof type for linear code PCS, which amounts to an array of individual proofs +#[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] +#[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] +pub struct LinCodePCProof +where + F: PrimeField, + C: Config, +{ + pub(crate) opening: LinCodePCProofSingle, + pub(crate) well_formedness: Option>, +} + +// Multiple poly at one point +pub(crate) type LPCPArray = Vec>; diff --git a/poly-commit/src/linear_codes/ligero.rs b/poly-commit/src/linear_codes/ligero.rs new file mode 100644 index 00000000..41dddf15 --- /dev/null +++ b/poly-commit/src/linear_codes/ligero.rs @@ -0,0 +1,143 @@ +use crate::{ + linear_codes::{utils::calculate_t, LigeroPCParams, LinCodeParametersInfo}, + utils::ceil_div, + PCCommitterKey, PCUniversalParams, PCVerifierKey, +}; +use ark_crypto_primitives::{ + crh::{CRHScheme, TwoToOneCRHScheme}, + merkle_tree::{Config, LeafParam, TwoToOneParam}, +}; +use ark_ff::PrimeField; +use ark_std::{log2, marker::PhantomData}; +#[cfg(not(feature = "std"))] +use num_traits::Float; + +impl LigeroPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + /// Create new UniversalParams + pub fn new( + sec_param: usize, + rho_inv: usize, + check_well_formedness: bool, + leaf_hash_param: LeafParam, + two_to_one_hash_param: TwoToOneParam, + col_hash_params: H::Parameters, + ) -> Self { + Self { + _field: PhantomData, + sec_param, + rho_inv, + check_well_formedness, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + } + } +} + +impl PCUniversalParams for LigeroPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + fn max_degree(&self) -> usize { + if F::TWO_ADICITY < self.rho_inv as u32 { + 0 + } else if (F::TWO_ADICITY - self.rho_inv as u32) * 2 < 64 { + 2_usize.pow((F::TWO_ADICITY - self.rho_inv as u32) * 2) + } else { + usize::MAX + } + } +} + +impl PCCommitterKey for LigeroPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + fn max_degree(&self) -> usize { + if (F::TWO_ADICITY - self.rho_inv as u32) * 2 < 64 { + 2_usize.pow((F::TWO_ADICITY - self.rho_inv as u32) * 2) + } else { + usize::MAX + } + } + + fn supported_degree(&self) -> usize { + as PCCommitterKey>::max_degree(self) + } +} + +impl PCVerifierKey for LigeroPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + fn max_degree(&self) -> usize { + if (F::TWO_ADICITY - self.rho_inv as u32) * 2 < 64 { + 2_usize.pow((F::TWO_ADICITY - self.rho_inv as u32) * 2) + } else { + usize::MAX + } + } + + fn supported_degree(&self) -> usize { + as PCVerifierKey>::max_degree(self) + } +} + +impl LinCodeParametersInfo for LigeroPCParams +where + F: PrimeField, + C: Config, + H: CRHScheme, +{ + fn check_well_formedness(&self) -> bool { + self.check_well_formedness + } + + fn distance(&self) -> (usize, usize) { + (self.rho_inv - 1, self.rho_inv) + } + + fn sec_param(&self) -> usize { + self.sec_param + } + + /// Compute the a suitable (for instance, FFT-friendly over F) matrix with at least poly_len entries. + /// The return pair (n, m) corresponds to the dimensions n x m. + /// FIXME: Maybe, there should be some checks for making sure the extended row can have an FFT. + fn compute_dimensions(&self, poly_len: usize) -> (usize, usize) { + assert_eq!( + (poly_len as f64) as usize, + poly_len, + "n cannot be converted to f64: aborting" + ); + let t = calculate_t::(self.sec_param(), self.distance(), poly_len).unwrap(); + let n = 1 << log2((ceil_div(2 * poly_len, t) as f64).sqrt().ceil() as usize); + let m = ceil_div(poly_len, n); + (n, m) + } + + fn leaf_hash_param(&self) -> &<::LeafHash as CRHScheme>::Parameters { + &self.leaf_hash_param + } + + fn two_to_one_hash_param( + &self, + ) -> &<::TwoToOneHash as TwoToOneCRHScheme>::Parameters { + &self.two_to_one_hash_param + } + + fn col_hash_params(&self) -> &::Parameters { + &self.col_hash_params + } +} diff --git a/poly-commit/src/linear_codes/mod.rs b/poly-commit/src/linear_codes/mod.rs new file mode 100644 index 00000000..ce3fa394 --- /dev/null +++ b/poly-commit/src/linear_codes/mod.rs @@ -0,0 +1,546 @@ +use crate::{ + to_bytes, + utils::{inner_product, Matrix}, + Error, LabeledCommitment, LabeledPolynomial, PCCommitterKey, PCUniversalParams, PCVerifierKey, + PolynomialCommitment, +}; +use ark_crypto_primitives::{ + crh::{CRHScheme, TwoToOneCRHScheme}, + merkle_tree::{Config, MerkleTree}, + sponge::{Absorb, CryptographicSponge}, +}; +use ark_ff::PrimeField; +use ark_poly::Polynomial; +use ark_std::{borrow::Borrow, marker::PhantomData, rand::RngCore}; +#[cfg(not(feature = "std"))] +use ark_std::{string::ToString, vec::Vec}; + +#[cfg(feature = "parallel")] +use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator}; + +mod utils; + +mod multilinear_ligero; +mod univariate_ligero; + +pub use multilinear_ligero::MultilinearLigero; +pub use univariate_ligero::UnivariateLigero; + +mod data_structures; +mod ligero; +use data_structures::*; + +pub use data_structures::{LigeroPCParams, LinCodePCProof}; + +use utils::{calculate_t, get_indices_from_sponge}; + +const FIELD_SIZE_ERROR: &str = "This field is not suitable for the proposed parameters"; + +/// For linear code PC schemes, the universal paramters, committer key +/// and verifier key are all the same. This trait abstracts the common +/// information contained in these. +pub trait LinCodeParametersInfo +where + C: Config, + H: CRHScheme, +{ + /// Get the security parameter. + fn sec_param(&self) -> usize; + + /// Get the distance of the code. + fn distance(&self) -> (usize, usize); + + /// See whether there should be a well-formedness check. + fn check_well_formedness(&self) -> bool; + + /// Compute the dimensions of the coefficient matrix. + fn compute_dimensions(&self, n: usize) -> (usize, usize); + + /// Get the hash parameters for obtaining leaf digest from leaf value. + fn leaf_hash_param(&self) -> &<::LeafHash as CRHScheme>::Parameters; + + /// Get the parameters for hashing nodes in the merkle tree. + fn two_to_one_hash_param( + &self, + ) -> &<::TwoToOneHash as TwoToOneCRHScheme>::Parameters; + + /// Get the parameters for hashing a vector of values, + /// representing a column of the coefficient matrix, into a leaf value. + fn col_hash_params(&self) -> &H::Parameters; +} + +/// A trait for linear codes. +pub trait LinearEncode +where + F: PrimeField, + C: Config, + H: CRHScheme, + P: Polynomial, +{ + /// For schemes like Brakedown and Ligero, PCCommiiterKey and + /// PCVerifierKey and PCUniversalParams are all the same. + type LinCodePCParams: PCUniversalParams + + PCCommitterKey + + PCVerifierKey + + LinCodeParametersInfo + + Sync; + + /// Does a default setup for the PCS. + fn setup( + max_degree: usize, + num_vars: Option, + rng: &mut R, + leaf_hash_param: <::LeafHash as CRHScheme>::Parameters, + two_to_one_hash_param: <::TwoToOneHash as TwoToOneCRHScheme>::Parameters, + col_hash_params: H::Parameters, + ) -> Self::LinCodePCParams; + + /// Encode a message, which is interpreted as a vector of coefficients + /// of a polynomial of degree m - 1. + fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Vec; + + /// Represent the polynomial as either coefficients, + /// in the univariate case, or evaluations over + /// the Boolean hypercube, in the multilinear case. + fn poly_to_vec(polynomial: &P) -> Vec; + + /// Represent the query point as a vector of Field elements. + fn point_to_vec(point: P::Point) -> Vec; + + /// Arrange the coefficients of the polynomial into a matrix, + /// and apply encoding to each row. + /// Returns the tuple (original_matrix, encoded_matrix). + fn compute_matrices(polynomial: &P, param: &Self::LinCodePCParams) -> (Matrix, Matrix) { + let mut coeffs = Self::poly_to_vec(polynomial); + + // 1. Computing the matrix dimensions. + let (n_rows, n_cols) = param.compute_dimensions(coeffs.len()); + + // padding the coefficient vector with zeroes + coeffs.resize(n_rows * n_cols, F::zero()); + + let mat = Matrix::new_from_flat(n_rows, n_cols, &coeffs); + + // 2. Apply encoding row-wise + let rows = mat.rows(); + let ext_mat = + Matrix::new_from_rows(cfg_iter!(rows).map(|r| Self::encode(r, param)).collect()); + + (mat, ext_mat) + } + + /// Tensor the query point z in the following sense: + /// For a polynomial p(X) represented by a matrix M + /// with n rows and m columns such that M_{i,j} = p_{i + n*j}, + /// we define the tensoring of `z`: (a, b) = tensor(z, n, m) such that: + /// p(z) = b^T.M.a + /// returns the evaluation of p at z. + fn tensor(z: &P::Point, n: usize, m: usize) -> (Vec, Vec); +} + +/// Any linear-code-based commitment scheme. +pub struct LinearCodePCS +where + F: PrimeField, + C: Config, + P: Polynomial, + H: CRHScheme, + L: LinearEncode, +{ + _phantom: PhantomData<(L, F, P, C, H)>, +} + +impl PolynomialCommitment for LinearCodePCS +where + L: LinearEncode, + F: PrimeField + Absorb, + P: Polynomial, + C: Config + 'static, + Vec: Borrow<::Input>, + H::Output: Into + Send, + C::Leaf: Sized + Clone + Default + Send + AsRef, + H: CRHScheme + 'static, +{ + type UniversalParams = L::LinCodePCParams; + + type CommitterKey = L::LinCodePCParams; + + type VerifierKey = L::LinCodePCParams; + + type Commitment = LinCodePCCommitment; + + type CommitmentState = LinCodePCCommitmentState; + + type Proof = LPCPArray; + + type BatchProof = Vec; + + type Error = Error; + + /// This is only a default setup with reasonable parameters. + /// To create your own public parameters (from which vk/ck can be derived by `trim`), + /// see the documentation for `LigeroPCUniversalParams`. + fn setup( + max_degree: usize, + num_vars: Option, + rng: &mut R, + ) -> Result { + let leaf_hash_param = ::setup(rng).unwrap(); + let two_to_one_hash_param = ::setup(rng) + .unwrap() + .clone(); + let col_hash_params = ::setup(rng).unwrap(); + let pp = L::setup::( + max_degree, + num_vars, + rng, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + ); + let real_max_degree = ::max_degree(&pp); + if max_degree > real_max_degree || real_max_degree == 0 { + return Err(Error::InvalidParameters(FIELD_SIZE_ERROR.to_string())); + } + Ok(pp) + } + + fn trim( + pp: &Self::UniversalParams, + _supported_degree: usize, + _supported_hiding_bound: usize, + _enforced_degree_bounds: Option<&[usize]>, + ) -> Result<(Self::CommitterKey, Self::VerifierKey), Self::Error> { + if ::max_degree(pp) == 0 { + return Err(Error::InvalidParameters(FIELD_SIZE_ERROR.to_string())); + } + Ok((pp.clone(), pp.clone())) + } + + fn commit<'a>( + ck: &Self::CommitterKey, + polynomials: impl IntoIterator>, + _rng: Option<&mut dyn RngCore>, + ) -> Result< + ( + Vec>, + Vec, + ), + Self::Error, + > + where + P: 'a, + { + let mut commitments = Vec::new(); + let mut states = Vec::new(); + + for labeled_polynomial in polynomials { + let polynomial = labeled_polynomial.polynomial(); + + // 1. Arrange the coefficients of the polynomial into a matrix, + // and apply encoding to get `ext_mat`. + let (mat, ext_mat) = L::compute_matrices(polynomial, ck); + let n_rows = mat.n; + let n_cols = mat.m; + let n_ext_cols = ext_mat.m; + + // 2. Create the Merkle tree from the hashes of each column. + let ext_mat_cols = ext_mat.cols(); + let leaves: Vec = cfg_into_iter!(ext_mat_cols) + .map(|col| { + H::evaluate(ck.col_hash_params(), col) + .map_err(|_| Error::HashingError) + .unwrap() + }) + .collect(); + let state = Self::CommitmentState { + mat, + ext_mat, + leaves, + }; + let mut leaves: Vec = + state.leaves.clone().into_iter().map(|h| h.into()).collect(); // TODO cfg_inter + let col_tree = create_merkle_tree::( + &mut leaves, + ck.leaf_hash_param(), + ck.two_to_one_hash_param(), + )?; + + // 3. Obtain the MT root + let root = col_tree.root(); + + // 4. The commitment is just the root, but since each commitment could be to a differently-sized polynomial, we also add some metadata. + let commitment = LinCodePCCommitment { + metadata: Metadata { + n_rows, + n_cols, + n_ext_cols, + }, + root, + }; + + commitments.push(LabeledCommitment::new( + labeled_polynomial.label().clone(), + commitment, + None, + )); + states.push(state); + } + Ok((commitments, states)) + } + + fn open<'a>( + ck: &Self::CommitterKey, + _labeled_polynomials: impl IntoIterator>, + commitments: impl IntoIterator>, + point: &'a P::Point, + sponge: &mut impl CryptographicSponge, + states: impl IntoIterator, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + P: 'a, + Self::CommitmentState: 'a, + Self::Commitment: 'a, + { + let mut proof_array = LPCPArray::default(); + + for (labeled_commitment, state) in commitments.into_iter().zip(states) { + let commitment = labeled_commitment.commitment(); + let n_rows = commitment.metadata.n_rows; + let n_cols = commitment.metadata.n_cols; + + // 1. Arrange the coefficients of the polynomial into a matrix, + // and apply encoding to get `ext_mat`. + // 2. Create the Merkle tree from the hashes of each column. + let Self::CommitmentState { + mat, + ext_mat, + leaves: col_hashes, + } = state; + let mut col_hashes: Vec = + col_hashes.clone().into_iter().map(|h| h.into()).collect(); // TODO cfg_inter + + let col_tree = create_merkle_tree::( + &mut col_hashes, + ck.leaf_hash_param(), + ck.two_to_one_hash_param(), + )?; + + // 3. Generate vector `b` to left-multiply the matrix. + let (_, b) = L::tensor(point, n_cols, n_rows); + + sponge.absorb(&to_bytes!(&commitment.root).map_err(|_| Error::TranscriptError)?); + + // If we are checking well-formedness, we need to compute the well-formedness proof (which is just r.M) and append it to the transcript. + let well_formedness = if ck.check_well_formedness() { + let r = sponge.squeeze_field_elements::(n_rows); + let v = mat.row_mul(&r); + + sponge.absorb(&v); + Some(v) + } else { + None + }; + + let point_vec = L::point_to_vec(point.clone()); + sponge.absorb(&point_vec); + + proof_array.push(LinCodePCProof { + // Compute the opening proof and append b.M to the transcript. + opening: generate_proof( + ck.sec_param(), + ck.distance(), + &b, + &mat, + &ext_mat, + &col_tree, + sponge, + )?, + well_formedness, + }); + } + + Ok(proof_array) + } + + fn check<'a>( + vk: &Self::VerifierKey, + commitments: impl IntoIterator>, + point: &'a P::Point, + values: impl IntoIterator, + proof_array: &Self::Proof, + sponge: &mut impl CryptographicSponge, + _rng: Option<&mut dyn RngCore>, + ) -> Result + where + Self::Commitment: 'a, + { + let leaf_hash_param: &<::LeafHash as CRHScheme>::Parameters = + vk.leaf_hash_param(); + let two_to_one_hash_param: &<::TwoToOneHash as TwoToOneCRHScheme>::Parameters = + vk.two_to_one_hash_param(); + + for (i, (labeled_commitment, value)) in commitments.into_iter().zip(values).enumerate() { + let proof = &proof_array[i]; + let commitment = labeled_commitment.commitment(); + let n_rows = commitment.metadata.n_rows; + let n_cols = commitment.metadata.n_cols; + let n_ext_cols = commitment.metadata.n_ext_cols; + let root = &commitment.root; + let t = calculate_t::(vk.sec_param(), vk.distance(), n_ext_cols)?; + + sponge.absorb(&to_bytes!(&commitment.root).map_err(|_| Error::TranscriptError)?); + + let out = if vk.check_well_formedness() { + if proof.well_formedness.is_none() { + return Err(Error::InvalidCommitment); + } + let tmp = &proof.well_formedness.as_ref(); + let v = tmp.unwrap(); + let r = sponge.squeeze_field_elements::(n_rows); + // Upon sending `v` to the Verifier, add it to the sponge. The claim is that v = r.M. + sponge.absorb(&v); + + (Some(v), Some(r)) + } else { + (None, None) + }; + + // 1. Seed the transcript with the point and the recieved vector + let point_vec = L::point_to_vec(point.clone()); + sponge.absorb(&point_vec); + sponge.absorb(&proof.opening.v); + + // 2. Ask random oracle for the `t` indices where the checks happen. + let indices = get_indices_from_sponge(n_ext_cols, t, sponge)?; + + // 3. Hash the received columns into leaf hashes. + let mut col_hashes: Vec = Vec::new(); + + for c in proof.opening.columns.iter() { + match H::evaluate(vk.col_hash_params(), c.clone()) { + Ok(a) => col_hashes.push(a.into()), + Err(_) => return Err(Error::HashingError), + } + } + + // 4. Verify the paths for each of the leaf hashes - this is only run once, + // even if we have a well-formedness check (i.e., we save sending and checking the columns). + // See "Concrete optimizations to the commitment scheme", p.12 of [Brakedown](https://eprint.iacr.org/2021/1043.pdf). + for (j, (leaf, q_j)) in col_hashes.iter().zip(indices.iter()).enumerate() { + let path = &proof.opening.paths[j]; + if path.leaf_index != *q_j { + return Err(Error::InvalidCommitment); + } + + if !path + .verify(leaf_hash_param, two_to_one_hash_param, root, leaf.clone()) + .map_err(|_| Error::InvalidCommitment)? + { + return Ok(false); + } + } + + // 5. Compute the encoding w = E(v). + let w = L::encode(&proof.opening.v, vk); + + // 6. Compute `a`, `b` to right- and left- multiply with the matrix `M`. + let (a, b) = L::tensor(point, n_cols, n_rows); + + // 7. Probabilistic checks that whatever the prover sent, + // matches with what the verifier computed for himself. + // Note: we sacrifice some code repetition in order not to repeat execution. + if let (Some(well_formedness), Some(r)) = out { + let w_well_formedness = L::encode(well_formedness, vk); + for (transcript_index, matrix_index) in indices.iter().enumerate() { + if inner_product(&r, &proof.opening.columns[transcript_index]) + != w_well_formedness[*matrix_index] + || inner_product(&b, &proof.opening.columns[transcript_index]) + != w[*matrix_index] + { + return Err(Error::InvalidCommitment); + } + } + } else { + for (transcript_index, matrix_index) in indices.iter().enumerate() { + if inner_product(&b, &proof.opening.columns[transcript_index]) + != w[*matrix_index] + { + return Err(Error::InvalidCommitment); + } + } + } + + if inner_product(&proof.opening.v, &a) != value { + eprintln!("Function check: claimed value in position {i} does not match the evaluation of the committed polynomial in the same position"); + return Ok(false); + } + } + + Ok(true) + } +} + +// TODO maybe this can go to utils +fn create_merkle_tree( + leaves: &mut Vec, + leaf_hash_param: &<::LeafHash as CRHScheme>::Parameters, + two_to_one_hash_param: &<::TwoToOneHash as TwoToOneCRHScheme>::Parameters, +) -> Result, Error> +where + C: Config, + C::Leaf: Default + Clone + Send + AsRef, +{ + // pad the column hashes with zeroes + let next_pow_of_two = leaves.len().next_power_of_two(); + leaves.resize(next_pow_of_two, ::default()); + + MerkleTree::::new(leaf_hash_param, two_to_one_hash_param, leaves) + .map_err(|_| Error::HashingError) +} + +fn generate_proof( + sec_param: usize, + distance: (usize, usize), + b: &[F], + mat: &Matrix, + ext_mat: &Matrix, + col_tree: &MerkleTree, + sponge: &mut S, +) -> Result, Error> +where + F: PrimeField + Absorb, + C: Config, + S: CryptographicSponge, +{ + let t = calculate_t::(sec_param, distance, ext_mat.m)?; + + // 1. left-multiply the matrix by `b`. + let v = mat.row_mul(b); + sponge.absorb(&v); + + // 2. Generate t column indices to test the linear combination on. + let indices = get_indices_from_sponge(ext_mat.m, t, sponge)?; + + // 3. Compute Merkle tree paths for the requested columns. + let mut queried_columns = Vec::with_capacity(t); + let mut paths = Vec::with_capacity(t); + + let ext_mat_cols = ext_mat.cols(); + + for i in indices { + queried_columns.push(ext_mat_cols[i].clone()); + paths.push( + col_tree + .generate_proof(i) + .map_err(|_| Error::TranscriptError)?, + ); + } + + Ok(LinCodePCProofSingle { + paths, + v, + columns: queried_columns, + }) +} diff --git a/poly-commit/src/linear_codes/multilinear_ligero/mod.rs b/poly-commit/src/linear_codes/multilinear_ligero/mod.rs new file mode 100644 index 00000000..4d8c8b86 --- /dev/null +++ b/poly-commit/src/linear_codes/multilinear_ligero/mod.rs @@ -0,0 +1,81 @@ +use super::{ + utils::{reed_solomon, tensor_vec}, + LigeroPCParams, LinearEncode, +}; +use ark_crypto_primitives::{ + crh::{CRHScheme, TwoToOneCRHScheme}, + merkle_tree::Config, +}; +use ark_ff::{FftField, PrimeField}; +use ark_poly::{MultilinearExtension, Polynomial}; +#[cfg(not(feature = "std"))] +use ark_std::vec::Vec; +use ark_std::{log2, marker::PhantomData}; + +mod tests; + +/// The multilinear Ligero polynomial commitment scheme based on [[Ligero]][ligero]. +/// The scheme defaults to the naive batching strategy. +/// +/// Note: The scheme currently does not support hiding. +/// +/// [ligero]: https://eprint.iacr.org/2022/1608.pdf +pub struct MultilinearLigero, H: CRHScheme> { + _phantom: PhantomData<(F, C, P, H)>, +} + +impl LinearEncode for MultilinearLigero +where + F: PrimeField + FftField, + C: Config, + P: MultilinearExtension, +

>::Point: Into>, + H: CRHScheme, +{ + type LinCodePCParams = LigeroPCParams; + + fn setup( + _max_degree: usize, + _num_vars: Option, + _rng: &mut R, + leaf_hash_param: <::LeafHash as CRHScheme>::Parameters, + two_to_one_hash_param: <::TwoToOneHash as TwoToOneCRHScheme>::Parameters, + col_hash_params: H::Parameters, + ) -> Self::LinCodePCParams { + Self::LinCodePCParams::new( + 128, + 2, + true, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + ) + } + + fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Vec { + reed_solomon(msg, param.rho_inv) + } + + fn poly_to_vec(polynomial: &P) -> Vec { + polynomial.to_evaluations() + } + + fn point_to_vec(point:

>::Point) -> Vec { + point + } + + /// For a multilinear polynomial in n+m variables it returns a tuple for k={n,m}: + /// ((1-z_1)*(1-z_2)*...*(1_z_k), z_1*(1-z_2)*...*(1-z_k), ..., z_1*z_2*...*z_k) + fn tensor( + point: &

>::Point, + left_len: usize, + _right_len: usize, + ) -> (Vec, Vec) { + let point: Vec = Self::point_to_vec(point.clone()); + + let split = log2(left_len) as usize; + let left = &point[..split]; + let right = &point[split..]; + (tensor_vec(left), tensor_vec(right)) + } +} diff --git a/poly-commit/src/linear_codes/multilinear_ligero/tests.rs b/poly-commit/src/linear_codes/multilinear_ligero/tests.rs new file mode 100644 index 00000000..6f8edc1f --- /dev/null +++ b/poly-commit/src/linear_codes/multilinear_ligero/tests.rs @@ -0,0 +1,256 @@ +#[cfg(test)] +mod tests { + use crate::{ + linear_codes::{LigeroPCParams, LinearCodePCS, MultilinearLigero, PolynomialCommitment}, + utils::test_sponge, + LabeledPolynomial, + }; + use ark_bls12_377::Fr; + use ark_bls12_381::Fr as Fr381; + use ark_crypto_primitives::{ + crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, + merkle_tree::{ByteDigestConverter, Config}, + }; + use ark_ff::{Field, PrimeField}; + use ark_poly::evaluations::multivariate::{MultilinearExtension, SparseMultilinearExtension}; + use ark_std::test_rng; + use blake2::Blake2s256; + use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; + + use ark_pcs_bench_templates::{FieldToBytesColHasher, LeafIdentityHasher}; + + type LeafH = LeafIdentityHasher; + type CompressH = Sha256; + type ColHasher = FieldToBytesColHasher; + + struct MerkleTreeParams; + + impl Config for MerkleTreeParams { + type Leaf = Vec; + + type LeafDigest = ::Output; + type LeafInnerDigestConverter = ByteDigestConverter; + type InnerDigest = ::Output; + + type LeafHash = LeafH; + type TwoToOneHash = CompressH; + } + + type MTConfig = MerkleTreeParams; + type LigeroPCS = LinearCodePCS< + MultilinearLigero, ColHasher>, + F, + SparseMultilinearExtension, + MTConfig, + ColHasher, + >; + + fn rand_poly( + _: usize, + num_vars: Option, + rng: &mut ChaCha20Rng, + ) -> SparseMultilinearExtension { + match num_vars { + Some(n) => SparseMultilinearExtension::rand(n, rng), + None => unimplemented!(), // should not happen in ML case! + } + } + + fn constant_poly( + _: usize, + num_vars: Option, + rng: &mut ChaCha20Rng, + ) -> SparseMultilinearExtension { + // f1 = (1-x1)(1-x2)(1-x3)(1-x5)[(1-x6)*x4 + 2(1-x4)*x6] + match num_vars { + Some(n) => { + let points = vec![(1, Fr::rand(rng))]; + SparseMultilinearExtension::from_evaluations(n, &points) + } + None => unimplemented!(), // should not happen in ML case! + } + } + + #[test] + fn test_construction() { + let mut rng = &mut test_rng(); + let num_vars = 10; + // just to make sure we have the right degree given the FFT domain for our field + let leaf_hash_param = ::setup(&mut rng).unwrap(); + let two_to_one_hash_param = ::setup(&mut rng) + .unwrap() + .clone(); + let col_hash_params = as CRHScheme>::setup(&mut rng).unwrap(); + let check_well_formedness = true; + + let pp: LigeroPCParams> = LigeroPCParams::new( + 128, + 4, + check_well_formedness, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + ); + + let (ck, vk) = LigeroPCS::::trim(&pp, 0, 0, None).unwrap(); + + let rand_chacha = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); + let labeled_poly = LabeledPolynomial::new( + "test".to_string(), + rand_poly(1, Some(num_vars), rand_chacha), + Some(num_vars), + Some(num_vars), + ); + + let mut test_sponge = test_sponge::(); + let (c, rands) = LigeroPCS::::commit(&ck, &[labeled_poly.clone()], None).unwrap(); + + let point = rand_point(Some(num_vars), rand_chacha); + + let value = labeled_poly.evaluate(&point); + + let proof = LigeroPCS::::open( + &ck, + &[labeled_poly], + &c, + &point, + &mut (test_sponge.clone()), + &rands, + None, + ) + .unwrap(); + assert!( + LigeroPCS::::check(&vk, &c, &point, [value], &proof, &mut test_sponge, None) + .unwrap() + ); + } + + fn rand_point(num_vars: Option, rng: &mut ChaCha20Rng) -> Vec { + match num_vars { + Some(n) => (0..n).map(|_| F::rand(rng)).collect(), + None => unimplemented!(), // should not happen! + } + } + + #[test] + fn single_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, LigeroPCS, _>( + Some(5), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, LigeroPCS, _>( + Some(10), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn constant_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, LigeroPCS, _>( + Some(10), + constant_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, LigeroPCS, _>( + Some(5), + constant_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn full_end_to_end_test() { + use crate::tests::*; + full_end_to_end_test::<_, _, LigeroPCS, _>( + Some(8), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_test::<_, _, LigeroPCS, _>( + Some(3), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn single_equation_test() { + use crate::tests::*; + single_equation_test::<_, _, LigeroPCS, _>( + Some(10), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + single_equation_test::<_, _, LigeroPCS, _>( + Some(5), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_test() { + use crate::tests::*; + two_equation_test::<_, _, LigeroPCS, _>( + Some(5), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_test::<_, _, LigeroPCS, _>( + Some(10), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn full_end_to_end_equation_test() { + use crate::tests::*; + full_end_to_end_equation_test::<_, _, LigeroPCS, _>( + Some(5), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_equation_test::<_, _, LigeroPCS, _>( + Some(8), + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } +} diff --git a/poly-commit/src/linear_codes/univariate_ligero/mod.rs b/poly-commit/src/linear_codes/univariate_ligero/mod.rs new file mode 100644 index 00000000..6ea7b133 --- /dev/null +++ b/poly-commit/src/linear_codes/univariate_ligero/mod.rs @@ -0,0 +1,84 @@ +use super::{utils::reed_solomon, LigeroPCParams, LinearEncode}; +use ark_crypto_primitives::{ + crh::{CRHScheme, TwoToOneCRHScheme}, + merkle_tree::Config, +}; +use ark_ff::PrimeField; +use ark_poly::DenseUVPolynomial; +use ark_std::marker::PhantomData; +#[cfg(not(feature = "std"))] +use ark_std::vec::Vec; + +mod tests; + +/// The univariate Ligero polynomial commitment scheme based on [[Ligero]][ligero]. +/// The scheme defaults to the naive batching strategy. +/// +/// Note: The scheme currently does not support hiding. +/// +/// [ligero]: https://eprint.iacr.org/2022/1608.pdf +pub struct UnivariateLigero, H: CRHScheme> { + _phantom: PhantomData<(F, C, P, H)>, +} + +impl LinearEncode for UnivariateLigero +where + F: PrimeField, + C: Config, + P: DenseUVPolynomial, + P::Point: Into, + H: CRHScheme, +{ + type LinCodePCParams = LigeroPCParams; + + fn setup( + _max_degree: usize, + _num_vars: Option, + _rng: &mut R, + leaf_hash_param: <::LeafHash as CRHScheme>::Parameters, + two_to_one_hash_param: <::TwoToOneHash as TwoToOneCRHScheme>::Parameters, + col_hash_params: H::Parameters, + ) -> Self::LinCodePCParams { + Self::LinCodePCParams::new( + 128, + 4, + true, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + ) + } + + fn encode(msg: &[F], param: &Self::LinCodePCParams) -> Vec { + reed_solomon(msg, param.rho_inv) + } + + /// For a univariate polynomial, we simply return the list of coefficients. + fn poly_to_vec(polynomial: &P) -> Vec { + polynomial.coeffs().to_vec() + } + + fn point_to_vec(point: P::Point) -> Vec { + vec![point] + } + + /// For a univariate polynomial it returns a tuple: + /// ((1, z, z^2, ..., z^n), (1, z^n, z^(2n), ..., z^((m-1)n))) + fn tensor(z: &F, left: usize, right: usize) -> (Vec, Vec) { + let mut left_out = Vec::with_capacity(left); + let mut pow_a = F::one(); + for _ in 0..left { + left_out.push(pow_a); + pow_a *= z; + } + + let mut right_out = Vec::with_capacity(right); + let mut pow_b = F::one(); + for _ in 0..right { + right_out.push(pow_b); + pow_b *= pow_a; + } + + (left_out, right_out) + } +} diff --git a/poly-commit/src/linear_codes/univariate_ligero/tests.rs b/poly-commit/src/linear_codes/univariate_ligero/tests.rs new file mode 100644 index 00000000..a890ffb3 --- /dev/null +++ b/poly-commit/src/linear_codes/univariate_ligero/tests.rs @@ -0,0 +1,368 @@ +#[cfg(test)] +mod tests { + use crate::{ + linear_codes::{LigeroPCParams, LinearCodePCS, PolynomialCommitment, UnivariateLigero}, + utils::test_sponge, + LabeledPolynomial, + }; + use ark_bls12_377::Fr; + use ark_bls12_381::Fr as Fr381; + use ark_crypto_primitives::{ + crh::{sha256::Sha256, CRHScheme, TwoToOneCRHScheme}, + merkle_tree::{ByteDigestConverter, Config}, + }; + use ark_ff::{Field, PrimeField}; + use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial}; + use ark_std::{test_rng, UniformRand}; + use blake2::Blake2s256; + use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; + + use ark_pcs_bench_templates::{FieldToBytesColHasher, LeafIdentityHasher}; + + type LeafH = LeafIdentityHasher; + type CompressH = Sha256; + type ColHasher = FieldToBytesColHasher; + + struct MerkleTreeParams; + + impl Config for MerkleTreeParams { + type Leaf = Vec; + + type LeafDigest = ::Output; + type LeafInnerDigestConverter = ByteDigestConverter; + type InnerDigest = ::Output; + + type LeafHash = LeafH; + type TwoToOneHash = CompressH; + } + + type MTConfig = MerkleTreeParams; + type LigeroPCS = LinearCodePCS< + UnivariateLigero, ColHasher>, + Fr, + DensePolynomial, + MTConfig, + ColHasher, + >; + + type LigeroPcsF = LinearCodePCS< + UnivariateLigero, ColHasher>, + F, + DensePolynomial, + MTConfig, + ColHasher, + >; + + fn rand_poly( + degree: usize, + _: Option, + rng: &mut ChaCha20Rng, + ) -> DensePolynomial { + DensePolynomial::rand(degree, rng) + } + + fn constant_poly( + _: usize, + _: Option, + rng: &mut ChaCha20Rng, + ) -> DensePolynomial { + DensePolynomial::from_coefficients_slice(&[Fr::rand(rng)]) + } + + #[test] + fn test_construction() { + let degree = 4; + let mut rng = &mut test_rng(); + // just to make sure we have the right degree given the FFT domain for our field + let leaf_hash_param = ::setup(&mut rng).unwrap(); + let two_to_one_hash_param = ::setup(&mut rng) + .unwrap() + .clone(); + let col_hash_params = as CRHScheme>::setup(&mut rng).unwrap(); + let check_well_formedness = true; + + let pp: LigeroPCParams> = LigeroPCParams::new( + 128, + 4, + check_well_formedness, + leaf_hash_param, + two_to_one_hash_param, + col_hash_params, + ); + + let (ck, vk) = LigeroPCS::trim(&pp, 0, 0, None).unwrap(); + + let rand_chacha = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); + let labeled_poly = LabeledPolynomial::new( + "test".to_string(), + rand_poly(degree, None, rand_chacha), + None, + None, + ); + + let mut test_sponge = test_sponge::(); + let (c, rands) = LigeroPCS::commit(&ck, &[labeled_poly.clone()], None).unwrap(); + + let point = Fr::rand(rand_chacha); + + let value = labeled_poly.evaluate(&point); + + let proof = LigeroPCS::open( + &ck, + &[labeled_poly], + &c, + &point, + &mut (test_sponge.clone()), + &rands, + None, + ) + .unwrap(); + assert!( + LigeroPCS::check(&vk, &c, &point, [value], &proof, &mut test_sponge, None).unwrap() + ); + } + + fn rand_point(_: Option, rng: &mut ChaCha20Rng) -> F { + F::rand(rng) + } + + #[test] + fn single_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, LigeroPCS, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, LigeroPcsF, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn constant_poly_test() { + use crate::tests::*; + single_poly_test::<_, _, LigeroPCS, _>( + None, + constant_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + single_poly_test::<_, _, LigeroPcsF, _>( + None, + constant_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn quadratic_poly_degree_bound_multiple_queries_test() { + use crate::tests::*; + quadratic_poly_degree_bound_multiple_queries_test::<_, _, LigeroPCS, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + quadratic_poly_degree_bound_multiple_queries_test::<_, _, LigeroPcsF, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn linear_poly_degree_bound_test() { + use crate::tests::*; + linear_poly_degree_bound_test::<_, _, LigeroPCS, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + linear_poly_degree_bound_test::<_, _, LigeroPcsF, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn single_poly_degree_bound_test() { + use crate::tests::*; + single_poly_degree_bound_test::<_, _, LigeroPCS, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + single_poly_degree_bound_test::<_, _, LigeroPcsF, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn single_poly_degree_bound_multiple_queries_test() { + use crate::tests::*; + single_poly_degree_bound_multiple_queries_test::<_, _, LigeroPCS, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + single_poly_degree_bound_multiple_queries_test::<_, _, LigeroPcsF, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn two_polys_degree_bound_single_query_test() { + use crate::tests::*; + two_polys_degree_bound_single_query_test::<_, _, LigeroPCS, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + two_polys_degree_bound_single_query_test::<_, _, LigeroPcsF, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + } + + #[test] + fn full_end_to_end_test() { + use crate::tests::*; + full_end_to_end_test::<_, _, LigeroPCS, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_test::<_, _, LigeroPcsF, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn single_equation_test() { + use crate::tests::*; + single_equation_test::<_, _, LigeroPCS, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + single_equation_test::<_, _, LigeroPcsF, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_test() { + use crate::tests::*; + two_equation_test::<_, _, LigeroPCS, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_test::<_, _, LigeroPcsF, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn two_equation_degree_bound_test() { + use crate::tests::*; + two_equation_degree_bound_test::<_, _, LigeroPCS, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + two_equation_degree_bound_test::<_, _, LigeroPcsF, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + fn full_end_to_end_equation_test() { + use crate::tests::*; + full_end_to_end_equation_test::<_, _, LigeroPCS, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + full_end_to_end_equation_test::<_, _, LigeroPcsF, _>( + None, + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-381"); + println!("Finished bls12-381"); + } + + #[test] + #[should_panic] + fn bad_degree_bound_test() { + use crate::tests::*; + use ark_bls12_381::Fq as Fq381; + bad_degree_bound_test::<_, _, LigeroPcsF, _>( + rand_poly::, + rand_point::, + poseidon_sponge_for_test::, + ) + .expect("test failed for bls12-377"); + println!("Finished bls12-377"); + } +} diff --git a/poly-commit/src/linear_codes/utils.rs b/poly-commit/src/linear_codes/utils.rs new file mode 100644 index 00000000..dc4fe91a --- /dev/null +++ b/poly-commit/src/linear_codes/utils.rs @@ -0,0 +1,178 @@ +use crate::{utils::ceil_div, Error}; +use ark_crypto_primitives::sponge::CryptographicSponge; +use ark_ff::{FftField, PrimeField}; +use ark_poly::{EvaluationDomain, GeneralEvaluationDomain}; +#[cfg(not(feature = "std"))] +use ark_std::{string::ToString, vec::Vec}; +#[cfg(not(feature = "std"))] +use num_traits::Float; + +/// Apply reed-solomon encoding to msg. +/// Assumes msg.len() is equal to the order of some FFT domain in F. +/// Returns a vector of length equal to the smallest FFT domain of size at least msg.len() * RHO_INV. +pub(crate) fn reed_solomon( + // msg, of length m, is interpreted as a vector of coefficients of a polynomial of degree m - 1 + msg: &[F], + rho_inv: usize, +) -> Vec { + let m = msg.len(); + + let extended_domain = GeneralEvaluationDomain::::new(m * rho_inv).unwrap_or_else(|| { + panic!( + "The field F cannot accomodate FFT for msg.len() * RHO_INV = {} elements (too many)", + m * rho_inv + ) + }); + + extended_domain.fft(msg) +} + +#[inline] +pub(crate) fn get_num_bytes(n: usize) -> usize { + ceil_div((usize::BITS - n.leading_zeros()) as usize, 8) +} + +/// Generate `t` (not necessarily distinct) random points in `[0, n)` +/// using the current state of the `transcript`. +pub(crate) fn get_indices_from_sponge( + n: usize, + t: usize, + sponge: &mut S, +) -> Result, Error> { + let bytes_to_squeeze = get_num_bytes(n); + let mut indices = Vec::with_capacity(t); + for _ in 0..t { + let bytes = sponge.squeeze_bytes(bytes_to_squeeze); + sponge.absorb(&bytes); + + // get the usize from Vec: + let ind = bytes.iter().fold(0, |acc, &x| (acc << 8) + x as usize); + // modulo the number of columns in the encoded matrix + indices.push(ind % n); + } + Ok(indices) +} + +#[inline] +pub(crate) fn calculate_t( + sec_param: usize, + distance: (usize, usize), + codeword_len: usize, +) -> Result { + // Took from the analysis by BCI+20 and Ligero + // We will find the smallest $t$ such that + // $(1-\delta)^t + (\rho+\delta)^t + \frac{n}{F} < 2^{-\lambda}$. + // With $\delta = \frac{1-\rho}{2}$, the expreesion is + // $2 * (\frac{1+\rho}{2})^t + \frac{n}{F} < 2^(-\lambda)$. + + let field_bits = F::MODULUS_BIT_SIZE as i32; + let sec_param = sec_param as i32; + + let residual = codeword_len as f64 / 2.0_f64.powi(field_bits); + let rhs = (2.0_f64.powi(-sec_param) - residual).log2(); + if !(rhs.is_normal()) { + return Err(Error::InvalidParameters("For the given codeword length and the required security guarantee, the field is not big enough.".to_string())); + } + let nom = rhs - 1.0; + let denom = (1.0 - 0.5 * distance.0 as f64 / distance.1 as f64).log2(); + if !(denom.is_normal()) { + return Err(Error::InvalidParameters( + "The distance is wrong".to_string(), + )); + } + let t = (nom / denom).ceil() as usize; + Ok(if t < codeword_len { t } else { codeword_len }) +} + +pub(crate) fn tensor_vec(values: &[F]) -> Vec { + let one = F::one(); + let anti_values: Vec = values.iter().map(|v| one - *v).collect(); + + let mut layer: Vec = vec![one]; + + for i in 0..values.len() { + let mut new_layer = Vec::new(); + for v in &layer { + new_layer.push(*v * anti_values[i]); + } + for v in &layer { + new_layer.push(*v * values[i]); + } + layer = new_layer; + } + + layer +} + +#[cfg(test)] +pub(crate) mod tests { + + use super::*; + + use ark_bls12_377::Fq; + use ark_bls12_377::Fr; + use ark_poly::{ + domain::general::GeneralEvaluationDomain, univariate::DensePolynomial, DenseUVPolynomial, + Polynomial, + }; + use ark_std::test_rng; + use rand_chacha::{rand_core::SeedableRng, ChaCha20Rng}; + + #[test] + fn test_reed_solomon() { + let rho_inv = 3; + // `i` is the min number of evaluations we need to interpolate a poly of degree `i - 1` + for i in 1..10 { + let deg = (1 << i) - 1; + + let rand_chacha = &mut ChaCha20Rng::from_rng(test_rng()).unwrap(); + let mut pol = DensePolynomial::rand(deg, rand_chacha); + + while pol.degree() != deg { + pol = DensePolynomial::rand(deg, rand_chacha); + } + + let coeffs = &pol.coeffs; + + // size of evals might be larger than deg + 1 (the min. number of evals needed to interpolate): we could still do R-S encoding on smaller evals, but the resulting polynomial will differ, so for this test to work we should pass it in full + let m = deg + 1; + + let encoded = reed_solomon(&coeffs, rho_inv); + + let large_domain = GeneralEvaluationDomain::::new(m * rho_inv).unwrap(); + + // the encoded elements should agree with the evaluations of the polynomial in the larger domain + for j in 0..(rho_inv * m) { + assert_eq!(pol.evaluate(&large_domain.element(j)), encoded[j]); + } + } + } + + #[test] + fn test_get_num_bytes() { + assert_eq!(get_num_bytes(0), 0); + assert_eq!(get_num_bytes(1), 1); + assert_eq!(get_num_bytes(9), 1); + assert_eq!(get_num_bytes(1 << 11), 2); + assert_eq!(get_num_bytes(1 << 32 - 1), 4); + assert_eq!(get_num_bytes(1 << 32), 5); + assert_eq!(get_num_bytes(1 << 32 + 1), 5); + } + + #[test] + fn test_calculate_t_with_good_parameters() { + assert!(calculate_t::(128, (3, 4), 2_usize.pow(32)).unwrap() < 200); + assert!(calculate_t::(256, (3, 4), 2_usize.pow(32)).unwrap() < 400); + } + + #[test] + fn test_calculate_t_with_bad_parameters() { + calculate_t::( + (Fq::MODULUS_BIT_SIZE - 60) as usize, + (3, 4), + 2_usize.pow(60), + ) + .unwrap_err(); + calculate_t::(400, (3, 4), 2_usize.pow(32)).unwrap_err(); + } +} diff --git a/poly-commit/src/utils.rs b/poly-commit/src/utils.rs index 85986cbe..a542927e 100644 --- a/poly-commit/src/utils.rs +++ b/poly-commit/src/utils.rs @@ -8,6 +8,18 @@ use rayon::{ prelude::IndexedParallelIterator, }; +/// Takes as input a struct, and converts them to a series of bytes. All traits +/// that implement `CanonicalSerialize` can be automatically converted to bytes +/// in this manner. +/// From jellyfish lib +#[macro_export] +macro_rules! to_bytes { + ($x:expr) => {{ + let mut buf = ark_std::vec![]; + ark_serialize::CanonicalSerialize::serialize_compressed($x, &mut buf).map(|_| buf) + }}; +} + /// Return ceil(x / y). pub(crate) fn ceil_div(x: usize, y: usize) -> usize { // XXX. warning: this expression can overflow. @@ -16,13 +28,36 @@ pub(crate) fn ceil_div(x: usize, y: usize) -> usize { #[derive(Derivative, CanonicalSerialize, CanonicalDeserialize)] #[derivative(Default(bound = ""), Clone(bound = ""), Debug(bound = ""))] -pub(crate) struct Matrix { +pub struct Matrix { pub(crate) n: usize, pub(crate) m: usize, entries: Vec>, } impl Matrix { + /// Returns a Matrix of dimensions n x m given a list of n * m field elements. + /// The list should be ordered row-first, i.e. [a11, ..., a1m, a21, ..., a2m, ...]. + /// + /// # Panics + /// Panics if the dimensions do not match the length of the list + pub(crate) fn new_from_flat(n: usize, m: usize, entry_list: &[F]) -> Self { + assert_eq!( + entry_list.len(), + n * m, + "Invalid matrix construction: dimensions are {} x {} but entry vector has {} entries", + n, + m, + entry_list.len() + ); + + // TODO more efficient to run linearly? + let entries: Vec> = (0..n) + .map(|row| (0..m).map(|col| entry_list[m * row + col]).collect()) + .collect(); + + Self { n, m, entries } + } + /// Returns a Matrix given a list of its rows, each in turn represented as a list of field elements. /// /// # Panics @@ -45,6 +80,28 @@ impl Matrix { } } + /// Returns the entry in position (i, j). **Indexing starts at 0 in both coordinates**, + /// i.e. the first element is in position (0, 0) and the last one in (n - 1, j - 1), + /// where n and m are the number of rows and columns, respectively. + /// + /// Index bound checks are waived for efficiency and behaviour under invalid indexing is undefined + #[cfg(test)] + pub(crate) fn entry(&self, i: usize, j: usize) -> F { + self.entries[i][j] + } + + /// Returns self as a list of rows + pub(crate) fn rows(&self) -> Vec> { + self.entries.clone() + } + + /// Returns self as a list of columns + pub(crate) fn cols(&self) -> Vec> { + (0..self.m) + .map(|col| (0..self.n).map(|row| self.entries[row][col]).collect()) + .collect() + } + /// Returns the product v * self, where v is interpreted as a row vector. In other words, /// it returns a linear combination of the rows of self with coefficients given by v. /// @@ -92,6 +149,12 @@ pub(crate) fn vector_sum(v1: &[F], v2: &[F]) -> Vec { .collect() } +#[inline] +#[cfg(test)] +pub(crate) fn to_field(v: Vec) -> Vec { + v.iter().map(|x| F::from(*x)).collect::>() +} + // TODO: replace by https://github.com/arkworks-rs/crypto-primitives/issues/112. #[cfg(test)] use ark_crypto_primitives::sponge::poseidon::PoseidonSponge; @@ -127,3 +190,80 @@ pub(crate) fn test_sponge() -> PoseidonSponge { let config = PoseidonConfig::new(full_rounds, partial_rounds, alpha, mds, v, 2, 1); PoseidonSponge::new(&config) } + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use ark_bls12_377::Fr; + + #[test] + fn test_matrix_constructor_flat() { + let entries: Vec = to_field(vec![10, 100, 4, 67, 44, 50]); + let mat = Matrix::new_from_flat(2, 3, &entries); + assert_eq!(mat.entry(1, 2), Fr::from(50)); + } + + #[test] + fn test_matrix_constructor_flat_square() { + let entries: Vec = to_field(vec![10, 100, 4, 67]); + let mat = Matrix::new_from_flat(2, 2, &entries); + assert_eq!(mat.entry(1, 1), Fr::from(67)); + } + + #[test] + #[should_panic(expected = "dimensions are 2 x 3 but entry vector has 5 entries")] + fn test_matrix_constructor_flat_panic() { + let entries: Vec = to_field(vec![10, 100, 4, 67, 44]); + Matrix::new_from_flat(2, 3, &entries); + } + + #[test] + fn test_matrix_constructor_rows() { + let rows: Vec> = vec![ + to_field(vec![10, 100, 4]), + to_field(vec![23, 1, 0]), + to_field(vec![55, 58, 9]), + ]; + let mat = Matrix::new_from_rows(rows); + assert_eq!(mat.entry(2, 0), Fr::from(55)); + } + + #[test] + #[should_panic(expected = "not all rows have the same length")] + fn test_matrix_constructor_rows_panic() { + let rows: Vec> = vec![ + to_field(vec![10, 100, 4]), + to_field(vec![23, 1, 0]), + to_field(vec![55, 58]), + ]; + Matrix::new_from_rows(rows); + } + + #[test] + fn test_cols() { + let rows: Vec> = vec![ + to_field(vec![4, 76]), + to_field(vec![14, 92]), + to_field(vec![17, 89]), + ]; + + let mat = Matrix::new_from_rows(rows); + + assert_eq!(mat.cols()[1], to_field(vec![76, 92, 89])); + } + + #[test] + fn test_row_mul() { + let rows: Vec> = vec![ + to_field(vec![10, 100, 4]), + to_field(vec![23, 1, 0]), + to_field(vec![55, 58, 9]), + ]; + + let mat = Matrix::new_from_rows(rows); + let v: Vec = to_field(vec![12, 41, 55]); + // by giving the result in the integers and then converting to Fr + // we ensure the test will still pass even if Fr changes + assert_eq!(mat.row_mul(&v), to_field::(vec![4088, 4431, 543])); + } +}