diff --git a/benches/Cargo.toml b/benches/Cargo.toml index de60e03..bf27185 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -7,19 +7,19 @@ edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dev-dependencies] -ark-ec = { version = "0.3", features = [ "parallel" ] } -ark-ff = { version = "0.3", features = [ "parallel" ] } -ark-poly = { version = "0.3", features = [ "parallel" ] } -ark-std = { version = "0.3", features = [ "parallel" ] } -ark-groth16 = { version = "0.3", features = [ "parallel", "r1cs" ] } -ark-crypto-primitives = { version = "0.3", features = [ "parallel", "r1cs" ] } -ark-bls12-381 = { version = "0.3", features = [ "curve" ] } -ark-ed-on-bls12-381 = "0.3" -ark-bls12-377 = { version = "0.3", features = [ "curve", "r1cs" ] } -ark-bw6-761 = "0.3" - -ark-relations = "0.3" -ark-r1cs-std = "0.3" +ark-ec = { version = "0.4", features = [ "parallel" ] } +ark-ff = { version = "0.4", features = [ "parallel" ] } +ark-poly = { version = "0.4", features = [ "parallel" ] } +ark-std = { version = "0.4", features = [ "parallel" ] } +ark-groth16 = { version = "0.4", features = [ "parallel", "r1cs" ] } +ark-crypto-primitives = { version = "0.4", features = [ "parallel", "r1cs", "prf" ] } +ark-bls12-381 = { version = "0.4", features = [ "curve" ] } +ark-ed-on-bls12-381 = "0.4" +ark-bls12-377 = { version = "0.4", features = [ "curve", "r1cs" ] } +ark-bw6-761 = "0.4" + +ark-relations = "0.4" +ark-r1cs-std = "0.4" digest = "0.9" blake2 = "0.9" diff --git a/benches/benches/gipa.rs b/benches/benches/gipa.rs index 0851de9..e66a4b7 100644 --- a/benches/benches/gipa.rs +++ b/benches/benches/gipa.rs @@ -5,11 +5,9 @@ use ark_dh_commitments::{ pedersen::PedersenCommitment, DoublyHomomorphicCommitment, }; -use ark_ec::PairingEngine; +use ark_ec::pairing::{Pairing, PairingOutput}; use ark_ff::UniformRand; -use ark_inner_products::{ - ExtensionFieldElement, InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct, -}; +use ark_inner_products::{InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct}; use ark_ip_proofs::gipa::GIPA; use ark_std::rand::{rngs::StdRng, Rng, SeedableRng}; @@ -70,7 +68,7 @@ fn main() { const LEN: usize = 16; type GC1 = AFGHOCommitmentG1; type GC2 = AFGHOCommitmentG2; - type SC1 = PedersenCommitment<::G1Projective>; + type SC1 = PedersenCommitment<::G1>; let mut rng = StdRng::seed_from_u64(0u64); println!("Benchmarking GIPA with vector length: {}", LEN); @@ -80,20 +78,17 @@ fn main() { PairingInnerProduct, GC1, GC2, - IdentityCommitment, ::Fr>, + IdentityCommitment, ::ScalarField>, Blake2b, StdRng, >(&mut rng, LEN); println!("2) Multiexponentiation G1 inner product..."); bench_gipa::< - MultiexponentiationInnerProduct<::G1Projective>, + MultiexponentiationInnerProduct<::G1>, GC1, SC1, - IdentityCommitment< - ::G1Projective, - ::Fr, - >, + IdentityCommitment<::G1, ::ScalarField>, Blake2b, StdRng, >(&mut rng, LEN); diff --git a/benches/benches/groth16_aggregation/bench.rs b/benches/benches/groth16_aggregation/bench.rs index fc1fec4..fe708df 100644 --- a/benches/benches/groth16_aggregation/bench.rs +++ b/benches/benches/groth16_aggregation/bench.rs @@ -1,7 +1,4 @@ -use ark_bls12_377::{ - constraints::PairingVar as BLS12PairingVar, Bls12_377, Fr as BLS12Fr, - FrParameters as BLS12FrParameters, -}; +use ark_bls12_377::{constraints::PairingVar as BLS12PairingVar, Bls12_377, Fr as BLS12Fr}; use ark_bw6_761::{Fr as BW6Fr, BW6_761}; use ark_crypto_primitives::{ prf::{ @@ -11,10 +8,11 @@ use ark_crypto_primitives::{ }, snark::*, }; -use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; -use ark_ff::{ - biginteger::BigInteger, FftParameters, One, PrimeField, ToConstraintField, UniformRand, +use ark_ec::{ + pairing::{MillerLoopOutput, Pairing}, + CurveGroup, }; +use ark_ff::{One, PrimeField, ToConstraintField, UniformRand}; use ark_groth16::{constraints::*, Groth16, PreparedVerifyingKey, Proof, VerifyingKey}; use ark_r1cs_std::prelude::*; use ark_relations::r1cs::{ConstraintSynthesizer, ConstraintSystemRef, SynthesisError}; @@ -97,7 +95,7 @@ impl ConstraintSynthesizer for AggregateBlake2SCircuitVerificationCircuit .iter() .map(|bls_fr| { bls_fr - .into_repr() + .into_bigint() .as_ref() .iter() .map(|bls_fr_int| bls_fr_int.to_le_bytes().to_vec()) @@ -116,8 +114,7 @@ impl ConstraintSynthesizer for AggregateBlake2SCircuitVerificationCircuit // Now split BW6-761 byte representation back to iterator over BLS12-377 field element byte representations .iter() .map(|h_as_bls_fr_bytes| { - let bls_field_element_size_in_bytes = - ::BigInt::NUM_LIMBS * 8; + let bls_field_element_size_in_bytes = (BLS12Fr::MODULUS_BIT_SIZE as usize + 7) / 8; h_as_bls_fr_bytes .chunks(bls_field_element_size_in_bytes) .map(|bls_field_element_chunk| bls_field_element_chunk.to_vec()) @@ -182,7 +179,7 @@ impl ToConstraintField for AggregateBlake2SCircuitVerificationCircuitInpu .iter() .map(|bls_fr| { bls_fr - .into_repr() + .into_bigint() .as_ref() .iter() .map(|bls_fr_int| bls_fr_int.to_le_bytes().to_vec()) @@ -322,7 +319,7 @@ fn main() { &hash_outputs .iter() .map(|h| h.to_field_elements()) - .collect::::Fr>>>>() + .collect::::ScalarField>>>>() .unwrap(), &proofs, ) @@ -381,7 +378,7 @@ fn main() { &hash_outputs .iter() .map(|h| h.to_field_elements()) - .collect::::Fr>>>>() + .collect::::ScalarField>>>>() .unwrap(), &aggregate_proof, ) @@ -528,15 +525,15 @@ fn main() { } } -pub fn batch_verify_proof( +pub fn batch_verify_proof( pvk: &PreparedVerifyingKey, - public_inputs: &[Vec], + public_inputs: &[Vec], proofs: &[Proof], ) -> Result { let mut rng = StdRng::seed_from_u64(0u64); let mut r_powers = Vec::with_capacity(proofs.len()); for _ in 0..proofs.len() { - let challenge: E::Fr = u128::rand(&mut rng).into(); + let challenge: E::ScalarField = u128::rand(&mut rng).into(); r_powers.push(challenge); } @@ -544,48 +541,55 @@ pub fn batch_verify_proof( .iter() .zip(&r_powers) .map(|(input, r)| { - let mut g_ic = pvk.vk.gamma_abc_g1[0].into_projective(); - for (i, b) in input.iter().zip(pvk.vk.gamma_abc_g1.iter().skip(1)) { - g_ic += &b.mul(i.into_repr()); + let mut g_ic: E::G1 = pvk.vk.gamma_abc_g1[0].into(); + for (&i, &b) in input.iter().zip(pvk.vk.gamma_abc_g1.iter().skip(1)) { + g_ic += b * i; } - g_ic.mul(r.into_repr()) + g_ic * r }) - .sum::() - .into_affine(); + .sum::() + .into(); let combined_proof_a_s = proofs .iter() .zip(&r_powers) - .map(|(proof, r)| proof.a.mul(*r)) + .map(|(proof, r)| proof.a * r) .collect::>(); - let combined_proof_a_s = E::G1Projective::batch_normalization_into_affine(&combined_proof_a_s); - let ml_inputs = proofs - .iter() - .zip(&combined_proof_a_s) - .map(|(proof, a)| ((*a).into(), proof.b.into())) + let combined_proof_a_s = E::G1::normalize_batch(&combined_proof_a_s); + let combined_proof_a_s = combined_proof_a_s + .into_iter() + .map(E::G1Prepared::from) .collect::>(); - let a_r_times_b = E::miller_loop(ml_inputs.iter()); + let combined_proof_b_s = proofs + .into_iter() + .map(|proof| proof.b.into()) + .collect::>(); + let a_r_times_b = E::multi_miller_loop(combined_proof_a_s, combined_proof_b_s); let combined_c_s = proofs .iter() .zip(&r_powers) - .map(|(proof, r)| proof.c.mul(*r)) - .sum::() + .map(|(proof, r)| proof.c * r) + .sum::() .into_affine(); - let sum_of_rs = (&r_powers).iter().copied().sum::(); - let combined_alpha = (-pvk.vk.alpha_g1.mul(sum_of_rs)).into_affine(); - let qap = E::miller_loop( + let sum_of_rs = (&r_powers).iter().copied().sum::(); + let combined_alpha = (-(pvk.vk.alpha_g1 * sum_of_rs)).into_affine(); + let qap = E::multi_miller_loop( + [ + E::G1Prepared::from(combined_alpha), + combined_inputs.into(), + combined_c_s.into(), + ], [ - (combined_alpha.into(), pvk.vk.beta_g2.into()), - (combined_inputs.into(), pvk.gamma_g2_neg_pc.clone()), - (combined_c_s.into(), pvk.delta_g2_neg_pc.clone()), - ] - .iter(), + E::G2Prepared::from(pvk.vk.beta_g2), + pvk.gamma_g2_neg_pc.clone().into(), + pvk.delta_g2_neg_pc.clone().into(), + ], ); - let test = - E::final_exponentiation(&(qap * &a_r_times_b)).ok_or(SynthesisError::UnexpectedIdentity)?; + let test = E::final_exponentiation(MillerLoopOutput(qap.0 * a_r_times_b.0)) + .ok_or(SynthesisError::UnexpectedIdentity)?; - Ok(test == E::Fqk::one()) + Ok(test.0 == E::TargetField::one()) } diff --git a/benches/benches/inner_products.rs b/benches/benches/inner_products.rs index 54838ed..f900894 100644 --- a/benches/benches/inner_products.rs +++ b/benches/benches/inner_products.rs @@ -1,5 +1,5 @@ use ark_bls12_381::Bls12_381; -use ark_ec::PairingEngine; +use ark_ec::pairing::Pairing; use ark_ff::UniformRand; use ark_inner_products::{InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct}; @@ -33,14 +33,12 @@ fn main() { bench_inner_product::, StdRng>(&mut rng, LEN); println!("2) Multiexponentiation G1 inner product..."); - bench_inner_product::< - MultiexponentiationInnerProduct<::G1Projective>, - StdRng, - >(&mut rng, LEN); + bench_inner_product::::G1>, StdRng>( + &mut rng, LEN, + ); println!("3) Multiexponentiation G2 inner product..."); - bench_inner_product::< - MultiexponentiationInnerProduct<::G2Projective>, - StdRng, - >(&mut rng, LEN); + bench_inner_product::::G2>, StdRng>( + &mut rng, LEN, + ); } diff --git a/benches/benches/poly_commit.rs b/benches/benches/poly_commit.rs index 90facbd..770b216 100644 --- a/benches/benches/poly_commit.rs +++ b/benches/benches/poly_commit.rs @@ -1,12 +1,12 @@ use ark_bls12_381::Bls12_381; -use ark_ec::PairingEngine; +use ark_ec::pairing::Pairing; use ark_ff::UniformRand; use ark_ip_proofs::applications::poly_commit::{ transparent::UnivariatePolynomialCommitment as TransparentIPA, UnivariatePolynomialCommitment as IPA, KZG, }; use ark_poly::polynomial::{ - univariate::DensePolynomial as UnivariatePolynomial, Polynomial, UVPolynomial, + univariate::DensePolynomial as UnivariatePolynomial, DenseUVPolynomial, Polynomial, }; use ark_std::rand::{rngs::StdRng, SeedableRng}; @@ -67,7 +67,7 @@ fn main() { csv_writer.flush().unwrap(); for i in 1..num_trials + 1 { let polynomial = UnivariatePolynomial::rand(degree, &mut rng); - let point = ::Fr::rand(&mut rng); + let point = ::ScalarField::rand(&mut rng); let eval = polynomial.evaluate(&point); // Commit @@ -139,7 +139,7 @@ fn main() { csv_writer.flush().unwrap(); for i in 1..num_trials + 1 { let polynomial = UnivariatePolynomial::rand(degree, &mut rng); - let point = ::Fr::rand(&mut rng); + let point = ::ScalarField::rand(&mut rng); let eval = polynomial.evaluate(&point); // Commit @@ -213,7 +213,7 @@ fn main() { csv_writer.flush().unwrap(); for i in 1..num_trials + 1 { let polynomial = UnivariatePolynomial::rand(degree, &mut rng); - let point = ::Fr::rand(&mut rng); + let point = ::ScalarField::rand(&mut rng); let eval = polynomial.evaluate(&point); // Commit diff --git a/benches/benches/tipa.rs b/benches/benches/tipa.rs index 0ae6709..9f49227 100644 --- a/benches/benches/tipa.rs +++ b/benches/benches/tipa.rs @@ -5,11 +5,12 @@ use ark_dh_commitments::{ pedersen::PedersenCommitment, DoublyHomomorphicCommitment, }; -use ark_ec::{group::Group, PairingEngine}; -use ark_ff::{Field, UniformRand}; -use ark_inner_products::{ - ExtensionFieldElement, InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct, +use ark_ec::{ + pairing::{Pairing, PairingOutput}, + Group, }; +use ark_ff::{Field, UniformRand}; +use ark_inner_products::{InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct}; use ark_ip_proofs::tipa::{ structured_scalar_message::{structured_scalar_power, TIPAWithSSM}, TIPACompatibleSetup, TIPA, @@ -24,23 +25,22 @@ use std::{ops::MulAssign, time::Instant}; fn bench_tipa(rng: &mut R, len: usize) where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct< LeftMessage = LMC::Message, RightMessage = RMC::Message, Output = IPC::Message, >, - LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, - RMC: DoublyHomomorphicCommitment - + TIPACompatibleSetup, + LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, + RMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, IPC: DoublyHomomorphicCommitment, - LMC::Message: MulAssign, - RMC::Message: MulAssign, - IPC::Message: MulAssign, - IPC::Key: MulAssign, - LMC::Output: MulAssign, - RMC::Output: MulAssign, - IPC::Output: MulAssign, + LMC::Message: MulAssign, + RMC::Message: MulAssign, + IPC::Message: MulAssign, + IPC::Key: MulAssign, + LMC::Output: MulAssign, + RMC::Output: MulAssign, + IPC::Output: MulAssign, IPC::Output: MulAssign, IP::LeftMessage: UniformRand, IP::RightMessage: UniformRand, @@ -74,23 +74,22 @@ where fn bench_tipa_srs_shift(rng: &mut R, len: usize) where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct< LeftMessage = LMC::Message, RightMessage = RMC::Message, Output = IPC::Message, >, - LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, - RMC: DoublyHomomorphicCommitment - + TIPACompatibleSetup, + LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, + RMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, IPC: DoublyHomomorphicCommitment, - LMC::Message: MulAssign + Group, - RMC::Message: MulAssign + Group, - IPC::Message: MulAssign, - IPC::Key: MulAssign, - LMC::Output: MulAssign, - RMC::Output: MulAssign, - IPC::Output: MulAssign, + LMC::Message: MulAssign + Group, + RMC::Message: MulAssign + Group, + IPC::Message: MulAssign, + IPC::Key: MulAssign, + LMC::Output: MulAssign, + RMC::Output: MulAssign, + IPC::Output: MulAssign, IPC::Output: MulAssign, IP::LeftMessage: UniformRand + Group, IP::RightMessage: UniformRand + Group, @@ -107,17 +106,17 @@ where let v_srs = srs.get_verifier_key(); let com_l = LMC::commit(&ck_l, &l).unwrap(); let com_r = RMC::commit(&ck_r, &r).unwrap(); - let a_scalar = ::rand(rng); + let a_scalar = ::rand(rng); let r_vec = structured_scalar_power(len, &a_scalar); let l_a = l .iter() .zip(&r_vec) - .map(|(a, r)| a.mul(r)) + .map(|(&a, r)| a * r) .collect::>(); let ck_l_a = ck_l .iter() .zip(&r_vec) - .map(|(ck, r)| ck.mul(&r.inverse().unwrap())) + .map(|(&ck, r)| ck * r.inverse().unwrap()) .collect::>(); let t = vec![IP::inner_product(&l_a, &r).unwrap()]; @@ -148,15 +147,15 @@ where fn bench_tipa_ssm(rng: &mut R, len: usize) where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct, - LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, + LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, IPC: DoublyHomomorphicCommitment, - LMC::Message: MulAssign, - IPC::Message: MulAssign, - IPC::Key: MulAssign, - LMC::Output: MulAssign, - IPC::Output: MulAssign, + LMC::Message: MulAssign, + IPC::Message: MulAssign, + IPC::Key: MulAssign, + LMC::Output: MulAssign, + IPC::Output: MulAssign, IPC::Output: MulAssign, IP::LeftMessage: UniformRand, IP::RightMessage: UniformRand, @@ -165,7 +164,7 @@ where for _ in 0..len { l.push(::rand(rng)); } - let scalar = ::rand(rng); + let scalar = ::rand(rng); let r = structured_scalar_power(len, &scalar); let (srs, ck_t) = TIPAWithSSM::::setup(rng, len).unwrap(); @@ -200,7 +199,7 @@ fn main() { const LEN: usize = 16; type GC1 = AFGHOCommitmentG1; type GC2 = AFGHOCommitmentG2; - type SC1 = PedersenCommitment<::G1Projective>; + type SC1 = PedersenCommitment<::G1>; let mut rng = StdRng::seed_from_u64(0u64); println!("Benchmarking TIPA with vector length: {}", LEN); @@ -210,7 +209,7 @@ fn main() { PairingInnerProduct, GC1, GC2, - IdentityCommitment, ::Fr>, + IdentityCommitment, ::ScalarField>, Bls12_381, Blake2b, StdRng, @@ -218,13 +217,10 @@ fn main() { println!("2) Multiexponentiation G1 inner product..."); bench_tipa::< - MultiexponentiationInnerProduct<::G1Projective>, + MultiexponentiationInnerProduct<::G1>, GC1, SC1, - IdentityCommitment< - ::G1Projective, - ::Fr, - >, + IdentityCommitment<::G1, ::ScalarField>, Bls12_381, Blake2b, StdRng, @@ -235,7 +231,7 @@ fn main() { PairingInnerProduct, GC1, GC2, - IdentityCommitment, ::Fr>, + IdentityCommitment, ::ScalarField>, Bls12_381, Blake2b, StdRng, @@ -243,12 +239,9 @@ fn main() { println!("4) Multiexponentiation G1 inner product with structured scalar message..."); bench_tipa_ssm::< - MultiexponentiationInnerProduct<::G1Projective>, + MultiexponentiationInnerProduct<::G1>, GC1, - IdentityCommitment< - ::G1Projective, - ::Fr, - >, + IdentityCommitment<::G1, ::ScalarField>, Bls12_381, Blake2b, StdRng, diff --git a/dh_commitments/Cargo.toml b/dh_commitments/Cargo.toml index 3ac6a1b..b9a5d36 100644 --- a/dh_commitments/Cargo.toml +++ b/dh_commitments/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-dh-commitments" -version = "0.3.0" +version = "0.4.0" authors = [ "Benedikt Bünz", "Mary Maller", @@ -14,16 +14,16 @@ repository = "https://github.com/arkworks-rs/ripp" documentation = "https://docs.rs/ark-dh-commitments/" [dependencies] -ark-ff = "0.3" -ark-ec = "0.3" -ark-serialize = { version = "0.3", features = [ "derive" ] } -ark-std = "0.3" +ark-ff = "0.4" +ark-ec = "0.4" +ark-serialize = { version = "0.4", features = [ "derive" ] } +ark-std = "0.4" ark-inner-products = { path = "../inner_products" } [dev-dependencies] -ark-bls12-381 = { version = "0.3", features = [ "curve" ] } -ark-ed-on-bls12-381 = "0.3" +ark-bls12-381 = { version = "0.4", features = [ "curve" ] } +ark-ed-on-bls12-381 = "0.4" [features] default = [ "parallel" ] diff --git a/dh_commitments/src/afgho16/mod.rs b/dh_commitments/src/afgho16/mod.rs index 354b31b..612bc12 100644 --- a/dh_commitments/src/afgho16/mod.rs +++ b/dh_commitments/src/afgho16/mod.rs @@ -1,27 +1,27 @@ -use ark_ec::PairingEngine; +use ark_ec::pairing::{Pairing, PairingOutput}; use ark_std::rand::Rng; use std::marker::PhantomData; use crate::{random_generators, DoublyHomomorphicCommitment, Error}; -use ark_inner_products::{ExtensionFieldElement, InnerProduct, PairingInnerProduct}; +use ark_inner_products::{InnerProduct, PairingInnerProduct}; #[derive(Clone)] -pub struct AFGHOCommitment { +pub struct AFGHOCommitment { _pair: PhantomData

, } #[derive(Clone)] -pub struct AFGHOCommitmentG1(AFGHOCommitment

); +pub struct AFGHOCommitmentG1(AFGHOCommitment

); #[derive(Clone)] -pub struct AFGHOCommitmentG2(AFGHOCommitment

); +pub struct AFGHOCommitmentG2(AFGHOCommitment

); -impl DoublyHomomorphicCommitment for AFGHOCommitmentG1

{ - type Scalar = P::Fr; - type Message = P::G1Projective; - type Key = P::G2Projective; - type Output = ExtensionFieldElement

; +impl DoublyHomomorphicCommitment for AFGHOCommitmentG1

{ + type Scalar = P::ScalarField; + type Message = P::G1; + type Key = P::G2; + type Output = PairingOutput

; fn setup(rng: &mut R, size: usize) -> Result, Error> { Ok(random_generators(rng, size)) @@ -32,11 +32,11 @@ impl DoublyHomomorphicCommitment for AFGHOCommitmentG1

{ } } -impl DoublyHomomorphicCommitment for AFGHOCommitmentG2

{ - type Scalar = P::Fr; - type Message = P::G2Projective; - type Key = P::G1Projective; - type Output = ExtensionFieldElement

; +impl DoublyHomomorphicCommitment for AFGHOCommitmentG2

{ + type Scalar = P::ScalarField; + type Message = P::G2; + type Key = P::G1; + type Output = PairingOutput

; fn setup(rng: &mut R, size: usize) -> Result, Error> { Ok(random_generators(rng, size)) @@ -65,13 +65,13 @@ mod tests { let mut message = Vec::new(); let mut wrong_message = Vec::new(); for _ in 0..TEST_SIZE { - message.push(::G1Projective::rand(&mut rng)); - wrong_message.push(::G1Projective::rand(&mut rng)); + message.push(::G1::rand(&mut rng)); + wrong_message.push(::G1::rand(&mut rng)); } let com = C1::commit(&commit_keys, &message).unwrap(); assert!(C1::verify(&commit_keys, &message, &com).unwrap()); assert!(!C1::verify(&commit_keys, &wrong_message, &com).unwrap()); - message.push(::G1Projective::rand(&mut rng)); + message.push(::G1::rand(&mut rng)); assert!(C1::verify(&commit_keys, &message, &com).is_err()); } @@ -82,13 +82,13 @@ mod tests { let mut message = Vec::new(); let mut wrong_message = Vec::new(); for _ in 0..TEST_SIZE { - message.push(::G2Projective::rand(&mut rng)); - wrong_message.push(::G2Projective::rand(&mut rng)); + message.push(::G2::rand(&mut rng)); + wrong_message.push(::G2::rand(&mut rng)); } let com = C2::commit(&commit_keys, &message).unwrap(); assert!(C2::verify(&commit_keys, &message, &com).unwrap()); assert!(!C2::verify(&commit_keys, &wrong_message, &com).unwrap()); - message.push(::G2Projective::rand(&mut rng)); + message.push(::G2::rand(&mut rng)); assert!(C2::verify(&commit_keys, &message, &com).is_err()); } } diff --git a/dh_commitments/src/identity/mod.rs b/dh_commitments/src/identity/mod.rs index 627fb8b..0ec793e 100644 --- a/dh_commitments/src/identity/mod.rs +++ b/dh_commitments/src/identity/mod.rs @@ -1,8 +1,7 @@ -use ark_ff::{bytes::ToBytes, fields::PrimeField}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, SerializationError}; +use ark_ff::fields::PrimeField; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::Rng; use std::{ - io::{Read, Result as IoResult, Write}, marker::PhantomData, ops::{Add, MulAssign}, }; @@ -18,12 +17,6 @@ pub struct IdentityCommitment { #[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Default, Eq, PartialEq)] pub struct HomomorphicPlaceholderValue; -impl ToBytes for HomomorphicPlaceholderValue { - fn write(&self, _writer: W) -> IoResult<()> { - Ok(()) - } -} - impl Add for HomomorphicPlaceholderValue { type Output = Self; @@ -41,15 +34,6 @@ pub struct IdentityOutput(pub Vec) where T: CanonicalSerialize + CanonicalDeserialize + Clone + Default + Eq; -impl ToBytes for IdentityOutput -where - T: ToBytes + CanonicalSerialize + CanonicalDeserialize + Clone + Default + Eq, -{ - fn write(&self, mut writer: W) -> IoResult<()> { - self.0.write(&mut writer) - } -} - impl Add for IdentityOutput where T: Add + CanonicalSerialize + CanonicalDeserialize + Clone + Default + Eq, @@ -79,8 +63,7 @@ where impl DoublyHomomorphicCommitment for IdentityCommitment where - T: ToBytes - + CanonicalSerialize + T: CanonicalSerialize + CanonicalDeserialize + Clone + Default diff --git a/dh_commitments/src/lib.rs b/dh_commitments/src/lib.rs index ec7aeaa..97676b5 100644 --- a/dh_commitments/src/lib.rs +++ b/dh_commitments/src/lib.rs @@ -1,5 +1,5 @@ -use ark_ec::group::Group; -use ark_ff::{bytes::ToBytes, fields::PrimeField}; +use ark_ec::Group; +use ark_ff::fields::PrimeField; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::Rng; use std::{ @@ -19,8 +19,7 @@ pub type Error = Box; pub trait DoublyHomomorphicCommitment: Clone { type Scalar: PrimeField; - type Message: ToBytes - + CanonicalSerialize + type Message: CanonicalSerialize + CanonicalDeserialize + Clone + Default @@ -29,8 +28,7 @@ pub trait DoublyHomomorphicCommitment: Clone { + Sync + Add + MulAssign; - type Key: ToBytes - + CanonicalSerialize + type Key: CanonicalSerialize + CanonicalDeserialize + Clone + Default @@ -39,8 +37,7 @@ pub trait DoublyHomomorphicCommitment: Clone { + Sync + Add + MulAssign; - type Output: ToBytes - + CanonicalSerialize + type Output: CanonicalSerialize + CanonicalDeserialize + Clone + Default diff --git a/dh_commitments/src/pedersen/mod.rs b/dh_commitments/src/pedersen/mod.rs index 9a73a09..55b04ae 100644 --- a/dh_commitments/src/pedersen/mod.rs +++ b/dh_commitments/src/pedersen/mod.rs @@ -1,4 +1,4 @@ -use ark_ec::ProjectiveCurve; +use ark_ec::CurveGroup; use ark_std::rand::Rng; use std::marker::PhantomData; @@ -7,11 +7,11 @@ use crate::{random_generators, DoublyHomomorphicCommitment, Error}; use ark_inner_products::{InnerProduct, MultiexponentiationInnerProduct}; #[derive(Clone)] -pub struct PedersenCommitment { +pub struct PedersenCommitment { _group: PhantomData, } -impl DoublyHomomorphicCommitment for PedersenCommitment { +impl DoublyHomomorphicCommitment for PedersenCommitment { type Scalar = G::ScalarField; type Message = G::ScalarField; type Key = G; @@ -29,7 +29,7 @@ impl DoublyHomomorphicCommitment for PedersenCommitment { #[cfg(test)] mod tests { use super::*; - use ark_ed_on_bls12_381::EdwardsProjective as JubJub; + use ark_ed_on_bls12_381::{EdwardsProjective as JubJub, Fr}; use ark_ff::UniformRand; use ark_std::rand::{rngs::StdRng, SeedableRng}; @@ -43,13 +43,13 @@ mod tests { let mut message = Vec::new(); let mut wrong_message = Vec::new(); for _ in 0..TEST_SIZE { - message.push(::ScalarField::rand(&mut rng)); - wrong_message.push(::ScalarField::rand(&mut rng)); + message.push(Fr::rand(&mut rng)); + wrong_message.push(Fr::rand(&mut rng)); } let com = C::commit(&commit_keys, &message).unwrap(); assert!(C::verify(&commit_keys, &message, &com).unwrap()); assert!(!C::verify(&commit_keys, &wrong_message, &com).unwrap()); - message.push(::ScalarField::rand(&mut rng)); + message.push(Fr::rand(&mut rng)); assert!(C::verify(&commit_keys, &message, &com).is_err()); } } diff --git a/inner_products/Cargo.toml b/inner_products/Cargo.toml index 2352ac7..306d36f 100644 --- a/inner_products/Cargo.toml +++ b/inner_products/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-inner-products" -version = "0.3.0" +version = "0.4.0" authors = [ "Benedikt Bünz", "Mary Maller", @@ -16,10 +16,10 @@ documentation = "https://docs.rs/ark-inner-products/" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -ark-ff = "0.3" -ark-ec = "0.3" -ark-std = "0.3" -ark-serialize = { version = "0.3", features = [ "derive" ] } +ark-ff = "0.4" +ark-ec = "0.4" +ark-std = "0.4" +ark-serialize = { version = "0.4", features = [ "derive" ] } rayon = { version = "1", optional = true } [features] diff --git a/inner_products/src/lib.rs b/inner_products/src/lib.rs index 9009d72..6fabc06 100644 --- a/inner_products/src/lib.rs +++ b/inner_products/src/lib.rs @@ -1,13 +1,13 @@ -use ark_ec::{msm::VariableBaseMSM, PairingEngine, ProjectiveCurve}; -use ark_ff::{bytes::ToBytes, Field, PrimeField}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError}; -use ark_std::{cfg_into_iter, cfg_iter}; +use ark_ec::{ + pairing::{MillerLoopOutput, Pairing, PairingOutput}, + CurveGroup, +}; +use ark_ff::Field; +use ark_std::cfg_iter; use std::{ error::Error as ErrorTrait, fmt::{Display, Formatter, Result as FmtResult}, - io::{Result as IoResult, Write}, marker::PhantomData, - ops::{Add, Mul, MulAssign}, }; #[cfg(feature = "parallel")] @@ -49,14 +49,14 @@ pub trait InnerProduct: Copy { } #[derive(Copy, Clone)] -pub struct PairingInnerProduct { +pub struct PairingInnerProduct { _pair: PhantomData

, } -impl InnerProduct for PairingInnerProduct

{ - type LeftMessage = P::G1Projective; - type RightMessage = P::G2Projective; - type Output = ExtensionFieldElement

; +impl InnerProduct for PairingInnerProduct

{ + type LeftMessage = P::G1; + type RightMessage = P::G2; + type Output = PairingOutput

; fn inner_product( left: &[Self::LeftMessage], @@ -68,22 +68,59 @@ impl InnerProduct for PairingInnerProduct

{ right.len(), ))); }; - let aff_left = P::G1Projective::batch_normalization_into_affine(left); - let aff_right = P::G2Projective::batch_normalization_into_affine(right); - let aff_pairs = cfg_into_iter!(aff_left) - .zip(aff_right) - .map(|(a, b)| (P::G1Prepared::from(a), P::G2Prepared::from(b))) - .collect::>(); - Ok(ExtensionFieldElement(P::product_of_pairings(&aff_pairs))) + + Ok(cfg_multi_pairing(left, right).unwrap()) } } +/// Equivalent to `P::multi_pairing`, but with more parallelism (if enabled) +pub fn cfg_multi_pairing(left: &[P::G1], right: &[P::G2]) -> Option> { + // We make the input affine, then convert to prepared. We do this for speed, since the + // conversion from projective to prepared always goes through affine. + let aff_left = P::G1::normalize_batch(left); + let aff_right = P::G2::normalize_batch(right); + + let left = cfg_iter!(aff_left) + .map(P::G1Prepared::from) + .collect::>(); + let right = cfg_iter!(aff_right) + .map(P::G2Prepared::from) + .collect::>(); + + // We want to process N chunks in parallel where N is the number of threads available + #[cfg(feature = "parallel")] + let num_chunks = rayon::current_num_threads(); + #[cfg(not(feature = "parallel"))] + let num_chunks = 1; + + let chunk_size = if num_chunks <= left.len() { + left.len() / num_chunks + } else { + // More threads than elements. Just do it all in parallel + 1 + }; + + #[cfg(feature = "parallel")] + let (left_chunks, right_chunks) = (left.par_chunks(chunk_size), right.par_chunks(chunk_size)); + #[cfg(not(feature = "parallel"))] + let (left_chunks, right_chunks) = (left.chunks(chunk_size), right.chunks(chunk_size)); + + // Compute all the (partial) pairings and take the product. We have to take the product over + // P::TargetField because MillerLoopOutput doesn't impl Product + let ml_result = left_chunks + .zip(right_chunks) + .map(|(aa, bb)| P::multi_miller_loop(aa.iter().cloned(), bb.iter().cloned()).0) + .product(); + + P::final_exponentiation(MillerLoopOutput(ml_result)) +} + #[derive(Copy, Clone)] -pub struct MultiexponentiationInnerProduct { +pub struct MultiexponentiationInnerProduct { _projective: PhantomData, } -impl InnerProduct for MultiexponentiationInnerProduct { +impl InnerProduct for MultiexponentiationInnerProduct { type LeftMessage = G; type RightMessage = G::ScalarField; type Output = G; @@ -98,11 +135,9 @@ impl InnerProduct for MultiexponentiationInnerProduct { right.len(), ))); }; - let right_bigints = cfg_iter!(right).map(|b| b.into_repr()).collect::>(); - Ok(VariableBaseMSM::multi_scalar_mul( - &G::batch_normalization_into_affine(left), - &right_bigints, - )) + + // Can unwrap because we did the length check above + Ok(G::msm(&G::normalize_batch(left), &right).unwrap()) } } @@ -129,43 +164,3 @@ impl InnerProduct for ScalarInnerProduct { Ok(cfg_iter!(left).zip(right).map(|(x, y)| *x * y).sum()) } } - -// Helper wrapper type around target group commitment output in order to implement MulAssign (needed for dh_commitments) -//TODO: PairingEngine provides target group GT implementing Group for prime order P::Fr - -#[derive(CanonicalSerialize, CanonicalDeserialize, Clone, Debug)] -pub struct ExtensionFieldElement(pub P::Fqk); - -impl Default for ExtensionFieldElement

{ - fn default() -> Self { - ExtensionFieldElement(::default()) - } -} - -impl PartialEq for ExtensionFieldElement

{ - fn eq(&self, other: &Self) -> bool { - self.0.eq(&other.0) - } -} - -impl Eq for ExtensionFieldElement

{} - -impl MulAssign for ExtensionFieldElement

{ - fn mul_assign(&mut self, rhs: P::Fr) { - *self = ExtensionFieldElement(self.0.pow(rhs.into_repr())) - } -} - -impl Add for ExtensionFieldElement

{ - type Output = Self; - - fn add(self, rhs: Self) -> Self::Output { - ExtensionFieldElement(::mul(self.0, rhs.0)) - } -} - -impl ToBytes for ExtensionFieldElement

{ - fn write(&self, mut writer: W) -> IoResult<()> { - self.0.write(&mut writer) - } -} diff --git a/ip_proofs/Cargo.toml b/ip_proofs/Cargo.toml index a54ec4e..96f456f 100644 --- a/ip_proofs/Cargo.toml +++ b/ip_proofs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-ip-proofs" -version = "0.3.0" +version = "0.4.0" authors = [ "Benedikt Bünz", "Mary Maller", @@ -14,12 +14,12 @@ repository = "https://github.com/arkworks-rs/ripp" documentation = "https://docs.rs/ark-ip-proofs/" [dependencies] -ark-ec = "0.3" -ark-ff = "0.3" -ark-poly = "0.3" -ark-serialize = { version = "0.3", features = [ "derive" ] } -ark-std = "0.3" -ark-groth16 = "0.3" +ark-ec = "0.4" +ark-ff = "0.4" +ark-poly = "0.4" +ark-serialize = { version = "0.4", features = [ "derive" ] } +ark-std = "0.4" +ark-groth16 = "0.4" digest = "0.9" num-traits = "0.2" itertools = "0.10" @@ -29,11 +29,11 @@ ark-inner-products = { path = "../inner_products" } ark-dh-commitments = { path = "../dh_commitments" } [dev-dependencies] -ark-bls12-381 = { version = "0.3", features = [ "curve" ] } -ark-ed-on-bls12-381 = "0.3" +ark-bls12-381 = { version = "0.4", features = [ "curve" ] } +ark-ed-on-bls12-381 = "0.4" -ark-relations = "0.3" -ark-r1cs-std = "0.3" +ark-relations = "0.4" +ark-r1cs-std = "0.4" blake2 = "0.9" [features] diff --git a/ip_proofs/src/applications/groth16_aggregation.rs b/ip_proofs/src/applications/groth16_aggregation.rs index 67d26db..91b2592 100644 --- a/ip_proofs/src/applications/groth16_aggregation.rs +++ b/ip_proofs/src/applications/groth16_aggregation.rs @@ -1,8 +1,7 @@ -use ark_ec::{group::Group, AffineCurve, PairingEngine}; -use ark_ff::{to_bytes, Field, One}; +use ark_ec::pairing::{Pairing, PairingOutput}; +use ark_ff::{Field, One}; use ark_groth16::{Proof, VerifyingKey}; - -use std::ops::AddAssign; +use ark_serialize::CanonicalSerialize; use ark_std::rand::Rng; use digest::Digest; @@ -19,15 +18,14 @@ use ark_dh_commitments::{ identity::{HomomorphicPlaceholderValue, IdentityCommitment, IdentityOutput}, }; use ark_inner_products::{ - ExtensionFieldElement, InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct, - ScalarInnerProduct, + InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct, ScalarInnerProduct, }; type PairingInnerProductAB = TIPA< PairingInnerProduct

, AFGHOCommitmentG1

, AFGHOCommitmentG2

, - IdentityCommitment,

::Fr>, + IdentityCommitment,

::ScalarField>, P, D, >; @@ -36,40 +34,40 @@ type PairingInnerProductABProof = TIPAProof< PairingInnerProduct

, AFGHOCommitmentG1

, AFGHOCommitmentG2

, - IdentityCommitment,

::Fr>, + IdentityCommitment,

::ScalarField>, P, D, >; type MultiExpInnerProductC = TIPAWithSSM< - MultiexponentiationInnerProduct<

::G1Projective>, + MultiexponentiationInnerProduct<

::G1>, AFGHOCommitmentG1

, - IdentityCommitment<

::G1Projective,

::Fr>, + IdentityCommitment<

::G1,

::ScalarField>, P, D, >; type MultiExpInnerProductCProof = TIPAWithSSMProof< - MultiexponentiationInnerProduct<

::G1Projective>, + MultiexponentiationInnerProduct<

::G1>, AFGHOCommitmentG1

, - IdentityCommitment<

::G1Projective,

::Fr>, + IdentityCommitment<

::G1,

::ScalarField>, P, D, >; -pub struct AggregateProof { - com_a: ExtensionFieldElement

, - com_b: ExtensionFieldElement

, - com_c: ExtensionFieldElement

, - ip_ab: ExtensionFieldElement

, - agg_c: P::G1Projective, +pub struct AggregateProof { + com_a: PairingOutput

, + com_b: PairingOutput

, + com_c: PairingOutput

, + ip_ab: PairingOutput

, + agg_c: P::G1, tipa_proof_ab: PairingInnerProductABProof, tipa_proof_c: MultiExpInnerProductCProof, } pub fn setup_inner_product(rng: &mut R, size: usize) -> Result, Error> where - P: PairingEngine, + P: Pairing, D: Digest, { let (srs, _) = PairingInnerProductAB::::setup(rng, size)?; @@ -81,21 +79,21 @@ pub fn aggregate_proofs( proofs: &[Proof

], ) -> Result, Error> where - P: PairingEngine, + P: Pairing, D: Digest, { let a = proofs .iter() - .map(|proof| proof.a.into_projective()) - .collect::>(); + .map(|proof| proof.a.into()) + .collect::>(); let b = proofs .iter() - .map(|proof| proof.b.into_projective()) - .collect::>(); + .map(|proof| proof.b.into()) + .collect::>(); let c = proofs .iter() - .map(|proof| proof.c.into_projective()) - .collect::>(); + .map(|proof| proof.c.into()) + .collect::>(); let (ck_1, ck_2) = ip_srs.get_commitment_keys(); @@ -108,9 +106,10 @@ where let r = loop { let mut hash_input = Vec::new(); hash_input.extend_from_slice(&counter_nonce.to_be_bytes()[..]); - //TODO: Should use CanonicalSerialize instead of ToBytes - hash_input.extend_from_slice(&to_bytes![com_a, com_b, com_c]?); - if let Some(r) = ::from_random_bytes(&D::digest(&hash_input)) { + com_a.serialize_uncompressed(&mut hash_input)?; + com_b.serialize_uncompressed(&mut hash_input)?; + com_c.serialize_uncompressed(&mut hash_input)?; + if let Some(r) = ::from_random_bytes(&D::digest(&hash_input)) { break r; }; counter_nonce += 1; @@ -120,16 +119,16 @@ where let a_r = a .iter() .zip(&r_vec) - .map(|(a, r)| a.mul(r)) - .collect::>(); + .map(|(&a, r)| a * r) + .collect::>(); let ip_ab = PairingInnerProduct::

::inner_product(&a_r, &b)?; - let agg_c = MultiexponentiationInnerProduct::::inner_product(&c, &r_vec)?; + let agg_c = MultiexponentiationInnerProduct::::inner_product(&c, &r_vec)?; let ck_1_r = ck_1 .iter() .zip(&r_vec) - .map(|(ck, r)| ck.mul(&r.inverse().unwrap())) - .collect::>(); + .map(|(&ck, r)| ck * r.inverse().unwrap()) + .collect::>(); assert_eq!( com_a, @@ -163,11 +162,11 @@ where pub fn verify_aggregate_proof( ip_verifier_srs: &VerifierSRS

, vk: &VerifyingKey

, - public_inputs: &Vec>, //TODO: Should use ToConstraintField instead + public_inputs: &Vec>, //TODO: Should use ToConstraintField instead proof: &AggregateProof, ) -> Result where - P: PairingEngine, + P: Pairing, D: Digest, { // Random linear combination of proofs @@ -175,9 +174,10 @@ where let r = loop { let mut hash_input = Vec::new(); hash_input.extend_from_slice(&counter_nonce.to_be_bytes()[..]); - //TODO: Should use CanonicalSerialize instead of ToBytes - hash_input.extend_from_slice(&to_bytes![proof.com_a, proof.com_b, proof.com_c]?); - if let Some(r) = ::from_random_bytes(&D::digest(&hash_input)) { + proof.com_a.serialize_uncompressed(&mut hash_input)?; + proof.com_b.serialize_uncompressed(&mut hash_input)?; + proof.com_c.serialize_uncompressed(&mut hash_input)?; + if let Some(r) = ::from_random_bytes(&D::digest(&hash_input)) { break r; }; counter_nonce += 1; @@ -205,28 +205,27 @@ where // Check aggregate pairing product equation - let r_sum = - (r.pow(&[public_inputs.len() as u64]) - &::one()) / &(r.clone() - &::one()); - let p1 = P::pairing(vk.alpha_g1.into_projective().mul(&r_sum), vk.beta_g2); + let r_sum = (r.pow(&[public_inputs.len() as u64]) - &::one()) + / &(r.clone() - &::one()); + let p1 = P::pairing(P::G1::from(vk.alpha_g1) * r_sum, vk.beta_g2); assert_eq!(vk.gamma_abc_g1.len(), public_inputs[0].len() + 1); let r_vec = structured_scalar_power(public_inputs.len(), &r); - let mut g_ic = vk.gamma_abc_g1[0].into_projective().mul(&r_sum); + let mut g_ic = P::G1::from(vk.gamma_abc_g1[0]) * r_sum; for (i, b) in vk.gamma_abc_g1.iter().skip(1).enumerate() { - g_ic.add_assign( - &b.into_projective().mul(&ScalarInnerProduct::inner_product( + g_ic += P::G1::from(*b) + * &ScalarInnerProduct::inner_product( &public_inputs .iter() .map(|inputs| inputs[i].clone()) - .collect::>(), + .collect::>(), &r_vec, - )?), - ); + )?; } let p2 = P::pairing(g_ic, vk.gamma_g2); let p3 = P::pairing(proof.agg_c, vk.delta_g2); - let ppe_valid = proof.ip_ab.0 == (p1 * &p2) * &p3; + let ppe_valid = proof.ip_ab == p1 + p2 + p3; Ok(tipa_proof_ab_valid && tipa_proof_c_valid && ppe_valid) } diff --git a/ip_proofs/src/applications/poly_commit/mod.rs b/ip_proofs/src/applications/poly_commit/mod.rs index a0c3fc8..0b045d0 100644 --- a/ip_proofs/src/applications/poly_commit/mod.rs +++ b/ip_proofs/src/applications/poly_commit/mod.rs @@ -1,7 +1,11 @@ -use ark_ec::{msm::VariableBaseMSM, PairingEngine, ProjectiveCurve}; -use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; +use ark_ec::{ + pairing::{Pairing, PairingOutput}, + scalar_mul::variable_base::VariableBaseMSM, + CurveGroup, Group, +}; +use ark_ff::{Field, One, UniformRand, Zero}; use ark_poly::polynomial::{ - univariate::DensePolynomial as UnivariatePolynomial, Polynomial, UVPolynomial, + univariate::DensePolynomial as UnivariatePolynomial, DenseUVPolynomial, Polynomial, }; use ark_std::{end_timer, start_timer}; @@ -23,101 +27,93 @@ use ark_dh_commitments::{ identity::{HomomorphicPlaceholderValue, IdentityCommitment, IdentityOutput}, DoublyHomomorphicCommitment, }; -use ark_inner_products::{ExtensionFieldElement, MultiexponentiationInnerProduct}; +use ark_inner_products::MultiexponentiationInnerProduct; pub mod transparent; type PolynomialEvaluationSecondTierIPA = TIPAWithSSM< - MultiexponentiationInnerProduct<

::G1Projective>, + MultiexponentiationInnerProduct<

::G1>, AFGHOCommitmentG1

, - IdentityCommitment<

::G1Projective,

::Fr>, + IdentityCommitment<

::G1,

::ScalarField>, P, D, >; type PolynomialEvaluationSecondTierIPAProof = TIPAWithSSMProof< - MultiexponentiationInnerProduct<

::G1Projective>, + MultiexponentiationInnerProduct<

::G1>, AFGHOCommitmentG1

, - IdentityCommitment<

::G1Projective,

::Fr>, + IdentityCommitment<

::G1,

::ScalarField>, P, D, >; -pub struct KZG { +pub struct KZG { _pairing: PhantomData

, } // Simple implementation of KZG polynomial commitment scheme -impl KZG

{ +impl KZG

{ pub fn setup( rng: &mut R, degree: usize, ) -> Result<(Vec, VerifierSRS

), Error> { - let alpha = ::rand(rng); - let beta = ::rand(rng); - let g = ::prime_subgroup_generator(); - let h = ::prime_subgroup_generator(); + let alpha = ::rand(rng); + let beta = ::rand(rng); + let g = ::generator(); + let h = ::generator(); let g_alpha_powers = structured_generators_scalar_power(degree + 1, &g, &alpha); Ok(( -

::G1Projective::batch_normalization_into_affine(&g_alpha_powers), +

::G1::normalize_batch(&g_alpha_powers), VerifierSRS { g: g.clone(), h: h.clone(), - g_beta: g.mul(beta.into_repr()), - h_alpha: h.mul(alpha.into_repr()), + g_beta: g * beta, + h_alpha: h * alpha, }, )) } pub fn commit( powers: &[P::G1Affine], - polynomial: &UnivariatePolynomial, - ) -> Result { + polynomial: &UnivariatePolynomial, + ) -> Result { assert!(powers.len() >= polynomial.degree() + 1); let mut coeffs = polynomial.coeffs.to_vec(); - coeffs.resize(powers.len(), ::zero()); + coeffs.resize(powers.len(), ::zero()); - Ok(VariableBaseMSM::multi_scalar_mul( - powers, - &coeffs.iter().map(|b| b.into_repr()).collect::>(), - )) + // Can unwrap because coeffs.len() is guaranteed to be equal to powers.len() + Ok(P::G1::msm(powers, &coeffs).unwrap()) } pub fn open( powers: &[P::G1Affine], - polynomial: &UnivariatePolynomial, - point: &P::Fr, - ) -> Result { + polynomial: &UnivariatePolynomial, + point: &P::ScalarField, + ) -> Result { assert!(powers.len() >= polynomial.degree() + 1); // Trick to calculate (p(x) - p(z)) / (x - z) as p(x) / (x - z) ignoring remainder p(z) let quotient_polynomial = polynomial - / &UnivariatePolynomial::from_coefficients_vec(vec![-point.clone(), P::Fr::one()]); + / &UnivariatePolynomial::from_coefficients_vec(vec![ + -point.clone(), + P::ScalarField::one(), + ]); let mut quotient_coeffs = quotient_polynomial.coeffs.to_vec(); - quotient_coeffs.resize(powers.len(), ::zero()); - Ok(VariableBaseMSM::multi_scalar_mul( - powers, - "ient_coeffs - .iter() - .map(|b| b.into_repr()) - .collect::>(), - )) + quotient_coeffs.resize(powers.len(), ::zero()); + + // Can unwrap because quotient_coeffs.len() is guaranteed to be equal to powers.len() + Ok(P::G1::msm(powers, "ient_coeffs).unwrap()) } pub fn verify( v_srs: &VerifierSRS

, - com: &P::G1Projective, - point: &P::Fr, - eval: &P::Fr, - proof: &P::G1Projective, + com: &P::G1, + point: &P::ScalarField, + eval: &P::ScalarField, + proof: &P::G1, ) -> Result { - Ok(P::pairing( - com.clone() - &v_srs.g.mul(eval.into_repr()), - v_srs.h.clone(), - ) == P::pairing( - proof.clone(), - v_srs.h_alpha.clone() - &v_srs.h.mul(point.into_repr()), - )) + Ok(P::pairing(com.clone() - v_srs.g * eval, v_srs.h.clone()) + == P::pairing(proof.clone(), v_srs.h_alpha.clone() - v_srs.h * point)) } } @@ -142,43 +138,45 @@ impl BivariatePolynomial { } } -pub struct OpeningProof { +pub struct OpeningProof { ip_proof: PolynomialEvaluationSecondTierIPAProof, - y_eval_comm: P::G1Projective, - kzg_proof: P::G1Projective, + y_eval_comm: P::G1, + kzg_proof: P::G1, } -pub struct BivariatePolynomialCommitment { +pub struct BivariatePolynomialCommitment { _pairing: PhantomData

, _digest: PhantomData, } -impl BivariatePolynomialCommitment { +impl BivariatePolynomialCommitment { pub fn setup( rng: &mut R, x_degree: usize, y_degree: usize, ) -> Result<(SRS

, Vec), Error> { - let alpha = ::rand(rng); - let beta = ::rand(rng); - let g = ::prime_subgroup_generator(); - let h = ::prime_subgroup_generator(); - let kzg_srs =

::G1Projective::batch_normalization_into_affine( - &structured_generators_scalar_power(y_degree + 1, &g, &alpha), - ); + let alpha = ::rand(rng); + let beta = ::rand(rng); + let g = ::generator(); + let h = ::generator(); + let kzg_srs =

::G1::normalize_batch(&structured_generators_scalar_power( + y_degree + 1, + &g, + &alpha, + )); let srs = SRS { g_alpha_powers: vec![g.clone()], h_beta_powers: structured_generators_scalar_power(2 * x_degree + 1, &h, &beta), - g_beta: g.mul(beta.into_repr()), - h_alpha: h.mul(alpha.into_repr()), + g_beta: g * beta, + h_alpha: h * alpha, }; Ok((srs, kzg_srs)) } pub fn commit( srs: &(SRS

, Vec), - bivariate_polynomial: &BivariatePolynomial, - ) -> Result<(ExtensionFieldElement

, Vec), Error> { + bivariate_polynomial: &BivariatePolynomial, + ) -> Result<(PairingOutput

, Vec), Error> { let (ip_srs, kzg_srs) = srs; let (ck, _) = ip_srs.get_commitment_keys(); assert!(ck.len() >= bivariate_polynomial.y_polynomials.len()); @@ -190,7 +188,7 @@ impl BivariatePolynomialCommitment { .chain(vec![UnivariatePolynomial::zero()].iter().cycle()) .take(ck.len()) .map(|y_polynomial| KZG::

::commit(kzg_srs, y_polynomial)) - .collect::, Error>>()?; + .collect::, Error>>()?; // Create AFGHO commitment to Y polynomial commitments Ok(( @@ -201,9 +199,9 @@ impl BivariatePolynomialCommitment { pub fn open( srs: &(SRS

, Vec), - bivariate_polynomial: &BivariatePolynomial, - y_polynomial_comms: &Vec, - point: &(P::Fr, P::Fr), + bivariate_polynomial: &BivariatePolynomial, + y_polynomial_comms: &Vec, + point: &(P::ScalarField, P::ScalarField), ) -> Result, Error> { let (x, y) = point; let (ip_srs, kzg_srs) = srs; @@ -212,7 +210,7 @@ impl BivariatePolynomialCommitment { let precomp_time = start_timer!(|| "Computing coefficients and KZG commitment"); let mut powers_of_x = vec![]; - let mut cur = P::Fr::one(); + let mut cur = P::ScalarField::one(); for _ in 0..(ck_1.len()) { powers_of_x.push(cur); cur *= x; @@ -225,24 +223,19 @@ impl BivariatePolynomialCommitment { .take(ck_1.len()) .map(|y_polynomial| { let mut c = y_polynomial.coeffs.to_vec(); - c.resize(kzg_srs.len(), ::zero()); + c.resize(kzg_srs.len(), ::zero()); c }) - .collect::>>(); + .collect::>>(); let y_eval_coeffs = (0..kzg_srs.len()) .map(|j| { (0..ck_1.len()) .map(|i| powers_of_x[i].clone() * &coeffs[i][j]) .sum() }) - .collect::>(); - let y_eval_comm = VariableBaseMSM::multi_scalar_mul( - kzg_srs, - &y_eval_coeffs - .iter() - .map(|b| b.into_repr()) - .collect::>(), - ); + .collect::>(); + // Can unwrap because y_eval_coeffs.len() is guarnateed to be equal to kzg_srs.len() + let y_eval_comm = P::G1::msm(kzg_srs, &y_eval_coeffs).unwrap(); end_timer!(precomp_time); let ipa_time = start_timer!(|| "Computing IPA proof"); @@ -270,9 +263,9 @@ impl BivariatePolynomialCommitment { pub fn verify( v_srs: &VerifierSRS

, - com: &ExtensionFieldElement

, - point: &(P::Fr, P::Fr), - eval: &P::Fr, + com: &PairingOutput

, + point: &(P::ScalarField, P::ScalarField), + eval: &P::ScalarField, proof: &OpeningProof, ) -> Result { let (x, y) = point; @@ -290,12 +283,12 @@ impl BivariatePolynomialCommitment { } } -pub struct UnivariatePolynomialCommitment { +pub struct UnivariatePolynomialCommitment { _pairing: PhantomData

, _digest: PhantomData, } -impl UnivariatePolynomialCommitment { +impl UnivariatePolynomialCommitment { fn bivariate_degrees(univariate_degree: usize) -> (usize, usize) { //(((univariate_degree + 1) as f64).sqrt().ceil() as usize).next_power_of_two() - 1; let sqrt = (((univariate_degree + 1) as f64).sqrt().ceil() as usize).next_power_of_two(); @@ -312,10 +305,10 @@ impl UnivariatePolynomialCommitment { fn bivariate_form( bivariate_degrees: (usize, usize), - polynomial: &UnivariatePolynomial, - ) -> BivariatePolynomial { + polynomial: &UnivariatePolynomial, + ) -> BivariatePolynomial { let (x_degree, y_degree) = bivariate_degrees; - let default_zero = vec![P::Fr::zero()]; + let default_zero = vec![P::ScalarField::zero()]; let mut coeff_iter = polynomial .coeffs .iter() @@ -342,8 +335,8 @@ impl UnivariatePolynomialCommitment { pub fn commit( srs: &(SRS

, Vec), - polynomial: &UnivariatePolynomial, - ) -> Result<(ExtensionFieldElement

, Vec), Error> { + polynomial: &UnivariatePolynomial, + ) -> Result<(PairingOutput

, Vec), Error> { let bivariate_degrees = Self::parse_bivariate_degrees_from_srs(srs); BivariatePolynomialCommitment::::commit( srs, @@ -353,9 +346,9 @@ impl UnivariatePolynomialCommitment { pub fn open( srs: &(SRS

, Vec), - polynomial: &UnivariatePolynomial, - y_polynomial_comms: &Vec, - point: &P::Fr, + polynomial: &UnivariatePolynomial, + y_polynomial_comms: &Vec, + point: &P::ScalarField, ) -> Result, Error> { let (x_degree, y_degree) = Self::parse_bivariate_degrees_from_srs(srs); let y = point.clone(); @@ -371,9 +364,9 @@ impl UnivariatePolynomialCommitment { pub fn verify( v_srs: &VerifierSRS

, max_degree: usize, - com: &ExtensionFieldElement

, - point: &P::Fr, - eval: &P::Fr, + com: &PairingOutput

, + point: &P::ScalarField, + eval: &P::ScalarField, proof: &OpeningProof, ) -> Result { let (_, y_degree) = Self::bivariate_degrees(max_degree); @@ -411,7 +404,7 @@ mod tests { for _ in 0..BIVARIATE_X_DEGREE + 1 { let mut y_polynomial_coeffs = vec![]; for _ in 0..BIVARIATE_Y_DEGREE + 1 { - y_polynomial_coeffs.push(::Fr::rand(&mut rng)); + y_polynomial_coeffs.push(::ScalarField::rand(&mut rng)); } y_polynomials.push(UnivariatePolynomial::from_coefficients_slice( &y_polynomial_coeffs, @@ -450,7 +443,7 @@ mod tests { let mut polynomial_coeffs = vec![]; for _ in 0..UNIVARIATE_DEGREE + 1 { - polynomial_coeffs.push(::Fr::rand(&mut rng)); + polynomial_coeffs.push(::ScalarField::rand(&mut rng)); } let polynomial = UnivariatePolynomial::from_coefficients_slice(&polynomial_coeffs); diff --git a/ip_proofs/src/applications/poly_commit/transparent.rs b/ip_proofs/src/applications/poly_commit/transparent.rs index 71f11c6..edd1935 100644 --- a/ip_proofs/src/applications/poly_commit/transparent.rs +++ b/ip_proofs/src/applications/poly_commit/transparent.rs @@ -1,7 +1,7 @@ -use ark_ec::PairingEngine; +use ark_ec::pairing::{Pairing, PairingOutput}; use ark_ff::{Field, Zero}; use ark_poly::polynomial::{ - univariate::DensePolynomial as UnivariatePolynomial, Polynomial, UVPolynomial, + univariate::DensePolynomial as UnivariatePolynomial, DenseUVPolynomial, Polynomial, }; use ark_std::{end_timer, start_timer}; @@ -23,37 +23,35 @@ use ark_dh_commitments::{ pedersen::PedersenCommitment, DoublyHomomorphicCommitment, }; -use ark_inner_products::{ - ExtensionFieldElement, MultiexponentiationInnerProduct, ScalarInnerProduct, -}; +use ark_inner_products::{MultiexponentiationInnerProduct, ScalarInnerProduct}; type PolynomialEvaluationSecondTierIPA = GIPAWithSSM< - MultiexponentiationInnerProduct<

::G1Projective>, + MultiexponentiationInnerProduct<

::G1>, AFGHOCommitmentG1

, - IdentityCommitment<

::G1Projective,

::Fr>, + IdentityCommitment<

::G1,

::ScalarField>, D, >; type PolynomialEvaluationSecondTierIPAProof = GIPAProof< - MultiexponentiationInnerProduct<

::G1Projective>, + MultiexponentiationInnerProduct<

::G1>, AFGHOCommitmentG1

, - SSMPlaceholderCommitment<

::Fr>, - IdentityCommitment<

::G1Projective,

::Fr>, + SSMPlaceholderCommitment<

::ScalarField>, + IdentityCommitment<

::G1,

::ScalarField>, D, >; type PolynomialEvaluationFirstTierIPA = GIPAWithSSM< - ScalarInnerProduct<

::Fr>, - PedersenCommitment<

::G1Projective>, - IdentityCommitment<

::Fr,

::Fr>, + ScalarInnerProduct<

::ScalarField>, + PedersenCommitment<

::G1>, + IdentityCommitment<

::ScalarField,

::ScalarField>, D, >; type PolynomialEvaluationFirstTierIPAProof = GIPAProof< - ScalarInnerProduct<

::Fr>, - PedersenCommitment<

::G1Projective>, - SSMPlaceholderCommitment<

::Fr>, - IdentityCommitment<

::Fr,

::Fr>, + ScalarInnerProduct<

::ScalarField>, + PedersenCommitment<

::G1>, + SSMPlaceholderCommitment<

::ScalarField>, + IdentityCommitment<

::ScalarField,

::ScalarField>, D, >; @@ -78,32 +76,32 @@ impl BivariatePolynomial { } } -pub struct OpeningProof { +pub struct OpeningProof { second_tier_ip_proof: PolynomialEvaluationSecondTierIPAProof, - y_eval_comm: P::G1Projective, + y_eval_comm: P::G1, first_tier_ip_proof: PolynomialEvaluationFirstTierIPAProof, } -pub struct BivariatePolynomialCommitment { +pub struct BivariatePolynomialCommitment { _pairing: PhantomData

, _digest: PhantomData, } -impl BivariatePolynomialCommitment { +impl BivariatePolynomialCommitment { pub fn setup( rng: &mut R, x_degree: usize, y_degree: usize, - ) -> Result<(Vec, Vec), Error> { + ) -> Result<(Vec, Vec), Error> { let first_tier_ck = PolynomialEvaluationFirstTierIPA::::setup(rng, y_degree + 1)?.0; let second_tier_ck = PolynomialEvaluationSecondTierIPA::::setup(rng, x_degree + 1)?.0; Ok((first_tier_ck, second_tier_ck)) } pub fn commit( - ck: &(Vec, Vec), - bivariate_polynomial: &BivariatePolynomial, - ) -> Result<(ExtensionFieldElement

, Vec), Error> { + ck: &(Vec, Vec), + bivariate_polynomial: &BivariatePolynomial, + ) -> Result<(PairingOutput

, Vec), Error> { let (first_tier_ck, second_tier_ck) = ck; assert!(second_tier_ck.len() >= bivariate_polynomial.y_polynomials.len()); @@ -116,13 +114,10 @@ impl BivariatePolynomialCommitment { .map(|y_polynomial| { let mut coeffs = y_polynomial.coeffs.to_vec(); assert!(first_tier_ck.len() >= coeffs.len()); - coeffs.resize(first_tier_ck.len(), ::zero()); - PedersenCommitment::<

::G1Projective>::commit( - first_tier_ck, - &coeffs, - ) + coeffs.resize(first_tier_ck.len(), ::zero()); + PedersenCommitment::<

::G1>::commit(first_tier_ck, &coeffs) }) - .collect::, Error>>()?; + .collect::, Error>>()?; // Create AFGHO commitment to Y polynomial commitments Ok(( @@ -132,10 +127,10 @@ impl BivariatePolynomialCommitment { } pub fn open( - ck: &(Vec, Vec), - bivariate_polynomial: &BivariatePolynomial, - y_polynomial_comms: &Vec, - point: &(P::Fr, P::Fr), + ck: &(Vec, Vec), + bivariate_polynomial: &BivariatePolynomial, + y_polynomial_comms: &Vec, + point: &(P::ScalarField, P::ScalarField), ) -> Result, Error> { let (x, y) = point; let (first_tier_ck, second_tier_ck) = ck; @@ -151,21 +146,19 @@ impl BivariatePolynomialCommitment { .take(second_tier_ck.len()) .map(|y_polynomial| { let mut c = y_polynomial.coeffs.to_vec(); - c.resize(first_tier_ck.len(), ::zero()); + c.resize(first_tier_ck.len(), ::zero()); c }) - .collect::>>(); + .collect::>>(); let y_eval_coeffs = (0..first_tier_ck.len()) .map(|j| { (0..second_tier_ck.len()) .map(|i| powers_of_x[i].clone() * &coeffs[i][j]) .sum() }) - .collect::>(); - let y_eval_comm = PedersenCommitment::<

::G1Projective>::commit( - first_tier_ck, - &y_eval_coeffs, - )?; + .collect::>(); + let y_eval_comm = + PedersenCommitment::<

::G1>::commit(first_tier_ck, &y_eval_coeffs)?; end_timer!(precomp_time); let ipa_time = start_timer!(|| "Computing second tier IPA opening proof"); @@ -193,10 +186,10 @@ impl BivariatePolynomialCommitment { } pub fn verify( - ck: &(Vec, Vec), - com: &ExtensionFieldElement

, - point: &(P::Fr, P::Fr), - eval: &P::Fr, + ck: &(Vec, Vec), + com: &PairingOutput

, + point: &(P::ScalarField, P::ScalarField), + eval: &P::ScalarField, proof: &OpeningProof, ) -> Result { let (first_tier_ck, second_tier_ck) = ck; @@ -219,12 +212,12 @@ impl BivariatePolynomialCommitment { } } -pub struct UnivariatePolynomialCommitment { +pub struct UnivariatePolynomialCommitment { _pairing: PhantomData

, _digest: PhantomData, } -impl UnivariatePolynomialCommitment { +impl UnivariatePolynomialCommitment { fn bivariate_degrees(univariate_degree: usize) -> (usize, usize) { //(((univariate_degree + 1) as f64).sqrt().ceil() as usize).next_power_of_two() - 1; let sqrt = (((univariate_degree + 1) as f64).sqrt().ceil() as usize).next_power_of_two(); @@ -233,9 +226,7 @@ impl UnivariatePolynomialCommitment { (sqrt / skew_factor - 1, sqrt * skew_factor - 1) } - fn parse_bivariate_degrees_from_ck( - ck: &(Vec, Vec), - ) -> (usize, usize) { + fn parse_bivariate_degrees_from_ck(ck: &(Vec, Vec)) -> (usize, usize) { let x_degree = ck.1.len() - 1; let y_degree = ck.0.len() - 1; (x_degree, y_degree) @@ -243,10 +234,10 @@ impl UnivariatePolynomialCommitment { fn bivariate_form( bivariate_degrees: (usize, usize), - polynomial: &UnivariatePolynomial, - ) -> BivariatePolynomial { + polynomial: &UnivariatePolynomial, + ) -> BivariatePolynomial { let (x_degree, y_degree) = bivariate_degrees; - let default_zero = vec![P::Fr::zero()]; + let default_zero = vec![P::ScalarField::zero()]; let mut coeff_iter = polynomial .coeffs .iter() @@ -266,18 +257,15 @@ impl UnivariatePolynomialCommitment { BivariatePolynomial { y_polynomials } } - pub fn setup( - rng: &mut R, - degree: usize, - ) -> Result<(Vec, Vec), Error> { + pub fn setup(rng: &mut R, degree: usize) -> Result<(Vec, Vec), Error> { let (x_degree, y_degree) = Self::bivariate_degrees(degree); BivariatePolynomialCommitment::::setup(rng, x_degree, y_degree) } pub fn commit( - ck: &(Vec, Vec), - polynomial: &UnivariatePolynomial, - ) -> Result<(ExtensionFieldElement

, Vec), Error> { + ck: &(Vec, Vec), + polynomial: &UnivariatePolynomial, + ) -> Result<(PairingOutput

, Vec), Error> { let bivariate_degrees = Self::parse_bivariate_degrees_from_ck(ck); BivariatePolynomialCommitment::::commit( ck, @@ -286,10 +274,10 @@ impl UnivariatePolynomialCommitment { } pub fn open( - ck: &(Vec, Vec), - polynomial: &UnivariatePolynomial, - y_polynomial_comms: &Vec, - point: &P::Fr, + ck: &(Vec, Vec), + polynomial: &UnivariatePolynomial, + y_polynomial_comms: &Vec, + point: &P::ScalarField, ) -> Result, Error> { let (x_degree, y_degree) = Self::parse_bivariate_degrees_from_ck(ck); let y = point.clone(); @@ -303,10 +291,10 @@ impl UnivariatePolynomialCommitment { } pub fn verify( - ck: &(Vec, Vec), - com: &ExtensionFieldElement

, - point: &P::Fr, - eval: &P::Fr, + ck: &(Vec, Vec), + com: &PairingOutput

, + point: &P::ScalarField, + eval: &P::ScalarField, proof: &OpeningProof, ) -> Result { let (_, y_degree) = Self::parse_bivariate_degrees_from_ck(ck); @@ -320,7 +308,6 @@ impl UnivariatePolynomialCommitment { mod tests { use super::*; use ark_bls12_381::Bls12_381; - use ark_ec::PairingEngine; use ark_ff::UniformRand; use ark_std::rand::{rngs::StdRng, SeedableRng}; use blake2::Blake2b; @@ -345,7 +332,7 @@ mod tests { for _ in 0..BIVARIATE_X_DEGREE + 1 { let mut y_polynomial_coeffs = vec![]; for _ in 0..BIVARIATE_Y_DEGREE + 1 { - y_polynomial_coeffs.push(::Fr::rand(&mut rng)); + y_polynomial_coeffs.push(::ScalarField::rand(&mut rng)); } y_polynomials.push(UnivariatePolynomial::from_coefficients_slice( &y_polynomial_coeffs, @@ -383,7 +370,7 @@ mod tests { let mut polynomial_coeffs = vec![]; for _ in 0..UNIVARIATE_DEGREE + 1 { - polynomial_coeffs.push(::Fr::rand(&mut rng)); + polynomial_coeffs.push(::ScalarField::rand(&mut rng)); } let polynomial = UnivariatePolynomial::from_coefficients_slice(&polynomial_coeffs); diff --git a/ip_proofs/src/gipa.rs b/ip_proofs/src/gipa.rs index 7b5df29..fceb9dd 100644 --- a/ip_proofs/src/gipa.rs +++ b/ip_proofs/src/gipa.rs @@ -1,5 +1,5 @@ -use ark_ff::{to_bytes, Field, One}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_ff::{Field, One}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::Rng; use ark_std::{end_timer, start_timer}; use digest::Digest; @@ -45,7 +45,9 @@ where (LMC::Output, RMC::Output, IPC::Output), )>, pub(crate) r_base: (LMC::Message, RMC::Message), - _gipa: PhantomData>, + // The fn() is here because PhantomData is Sync iff T is Sync, and these types are not all + // Sync + _gipa: PhantomData GIPA>, } #[derive(Clone)] @@ -189,7 +191,7 @@ where let (mut m_a, mut m_b) = values; let (mut ck_a, mut ck_b, ck_t) = ck; let mut r_commitment_steps = Vec::new(); - let mut r_transcript = Vec::new(); + let mut r_transcript: Vec = Vec::new(); assert!(m_a.len().is_power_of_two()); let (m_base, ck_base) = 'recurse: loop { let recurse = start_timer!(|| format!("Recurse round size {}", m_a.len())); @@ -236,10 +238,13 @@ where let (c, c_inv) = 'challenge: loop { let mut hash_input = Vec::new(); hash_input.extend_from_slice(&counter_nonce.to_be_bytes()[..]); - //TODO: Should use CanonicalSerialize instead of ToBytes - hash_input.extend_from_slice(&to_bytes![ - transcript, com_1.0, com_1.1, com_1.2, com_2.0, com_2.1, com_2.2 - ]?); + transcript.serialize_uncompressed(&mut hash_input)?; + com_1.0.serialize_uncompressed(&mut hash_input)?; + com_1.1.serialize_uncompressed(&mut hash_input)?; + com_1.2.serialize_uncompressed(&mut hash_input)?; + com_2.0.serialize_uncompressed(&mut hash_input)?; + com_2.1.serialize_uncompressed(&mut hash_input)?; + com_2.2.serialize_uncompressed(&mut hash_input)?; let c: LMC::Scalar = u128::from_be_bytes( D::digest(&hash_input).as_slice()[0..16].try_into().unwrap(), ) @@ -319,7 +324,7 @@ where proof: &GIPAProof, ) -> Result<((LMC::Output, RMC::Output, IPC::Output), Vec), Error> { let (mut com_a, mut com_b, mut com_t) = com; - let mut r_transcript = Vec::new(); + let mut r_transcript: Vec = Vec::new(); for (com_1, com_2) in proof.r_commitment_steps.iter().rev() { // Fiat-Shamir challenge let mut counter_nonce: usize = 0; @@ -328,9 +333,13 @@ where let (c, c_inv) = 'challenge: loop { let mut hash_input = Vec::new(); hash_input.extend_from_slice(&counter_nonce.to_be_bytes()[..]); - hash_input.extend_from_slice(&to_bytes![ - transcript, com_1.0, com_1.1, com_1.2, com_2.0, com_2.1, com_2.2 - ]?); + transcript.serialize_uncompressed(&mut hash_input)?; + com_1.0.serialize_uncompressed(&mut hash_input)?; + com_1.1.serialize_uncompressed(&mut hash_input)?; + com_1.2.serialize_uncompressed(&mut hash_input)?; + com_2.0.serialize_uncompressed(&mut hash_input)?; + com_2.1.serialize_uncompressed(&mut hash_input)?; + com_2.2.serialize_uncompressed(&mut hash_input)?; let c: LMC::Scalar = u128::from_be_bytes( D::digest(&hash_input).as_slice()[0..16].try_into().unwrap(), ) @@ -437,7 +446,7 @@ where mod tests { use super::*; use ark_bls12_381::Bls12_381; - use ark_ec::PairingEngine; + use ark_ec::pairing::{Pairing, PairingOutput}; use ark_ff::UniformRand; use ark_std::rand::{rngs::StdRng, SeedableRng}; use blake2::Blake2b; @@ -449,21 +458,20 @@ mod tests { random_generators, }; use ark_inner_products::{ - ExtensionFieldElement, InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct, - ScalarInnerProduct, + InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct, ScalarInnerProduct, }; type GC1 = AFGHOCommitmentG1; type GC2 = AFGHOCommitmentG2; - type SC1 = PedersenCommitment<::G1Projective>; - type SC2 = PedersenCommitment<::G2Projective>; + type SC1 = PedersenCommitment<::G1>; + type SC2 = PedersenCommitment<::G2>; const TEST_SIZE: usize = 8; #[test] fn pairing_inner_product_test() { type IP = PairingInnerProduct; type IPC = - IdentityCommitment, ::Fr>; + IdentityCommitment, ::ScalarField>; type PairingGIPA = GIPA; let mut rng = StdRng::seed_from_u64(0u64); @@ -489,11 +497,9 @@ mod tests { #[test] fn multiexponentiation_inner_product_test() { - type IP = MultiexponentiationInnerProduct<::G1Projective>; - type IPC = IdentityCommitment< - ::G1Projective, - ::Fr, - >; + type IP = MultiexponentiationInnerProduct<::G1>; + type IPC = + IdentityCommitment<::G1, ::ScalarField>; type MultiExpGIPA = GIPA; let mut rng = StdRng::seed_from_u64(0u64); @@ -501,7 +507,7 @@ mod tests { let m_a = random_generators(&mut rng, TEST_SIZE); let mut m_b = Vec::new(); for _ in 0..TEST_SIZE { - m_b.push(::Fr::rand(&mut rng)); + m_b.push(::ScalarField::rand(&mut rng)); } let com_a = GC1::commit(&ck_a, &m_a).unwrap(); let com_b = SC1::commit(&ck_b, &m_b).unwrap(); @@ -522,9 +528,11 @@ mod tests { #[test] fn scalar_inner_product_test() { - type IP = ScalarInnerProduct<::Fr>; - type IPC = - IdentityCommitment<::Fr, ::Fr>; + type IP = ScalarInnerProduct<::ScalarField>; + type IPC = IdentityCommitment< + ::ScalarField, + ::ScalarField, + >; type ScalarGIPA = GIPA; let mut rng = StdRng::seed_from_u64(0u64); @@ -532,8 +540,8 @@ mod tests { let mut m_a = Vec::new(); let mut m_b = Vec::new(); for _ in 0..TEST_SIZE { - m_a.push(::Fr::rand(&mut rng)); - m_b.push(::Fr::rand(&mut rng)); + m_a.push(::ScalarField::rand(&mut rng)); + m_b.push(::ScalarField::rand(&mut rng)); } let com_a = SC2::commit(&ck_a, &m_a).unwrap(); let com_b = SC2::commit(&ck_b, &m_b).unwrap(); diff --git a/ip_proofs/src/tipa/mod.rs b/ip_proofs/src/tipa/mod.rs index 4b9cefc..d1731ec 100644 --- a/ip_proofs/src/tipa/mod.rs +++ b/ip_proofs/src/tipa/mod.rs @@ -1,7 +1,7 @@ -use ark_ec::{msm::FixedBaseMSM, PairingEngine, ProjectiveCurve}; -use ark_ff::{to_bytes, Field, One, PrimeField, UniformRand, Zero}; -use ark_poly::polynomial::{univariate::DensePolynomial, UVPolynomial}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_ec::{pairing::Pairing, scalar_mul::fixed_base::FixedBase, CurveGroup, Group}; +use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; +use ark_poly::polynomial::{univariate::DensePolynomial, DenseUVPolynomial}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::rand::Rng; use ark_std::{end_timer, start_timer}; use digest::Digest; @@ -24,9 +24,9 @@ pub mod structured_scalar_message; //TODO: Could generalize: Don't need TIPA over G1 and G2, would work with G1 and G1 or over different pairing engines pub trait TIPACompatibleSetup {} -impl TIPACompatibleSetup for PedersenCommitment {} -impl TIPACompatibleSetup for AFGHOCommitmentG1

{} -impl TIPACompatibleSetup for AFGHOCommitmentG2

{} +impl TIPACompatibleSetup for PedersenCommitment {} +impl TIPACompatibleSetup for AFGHOCommitmentG1

{} +impl TIPACompatibleSetup for AFGHOCommitmentG2

{} //TODO: May need to add "reverse" MultiexponentiationInnerProduct to allow for MIP with G2 messages (because TIP hard-coded G1 left and G2 right) pub struct TIPA { @@ -42,7 +42,7 @@ pub struct TIPA { pub struct TIPAProof where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct< LeftMessage = LMC::Message, RightMessage = RMC::Message, @@ -60,14 +60,14 @@ where { gipa_proof: GIPAProof, final_ck: (LMC::Key, RMC::Key), - final_ck_proof: (P::G2Projective, P::G1Projective), + final_ck_proof: (P::G2, P::G1), _pair: PhantomData

, } impl Clone for TIPAProof where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct< LeftMessage = LMC::Message, RightMessage = RMC::Message, @@ -94,24 +94,24 @@ where } #[derive(Clone)] -pub struct SRS { - pub g_alpha_powers: Vec, - pub h_beta_powers: Vec, - pub g_beta: P::G1Projective, - pub h_alpha: P::G2Projective, +pub struct SRS { + pub g_alpha_powers: Vec, + pub h_beta_powers: Vec, + pub g_beta: P::G1, + pub h_alpha: P::G2, } #[derive(Clone)] -pub struct VerifierSRS { - pub g: P::G1Projective, - pub h: P::G2Projective, - pub g_beta: P::G1Projective, - pub h_alpha: P::G2Projective, +pub struct VerifierSRS { + pub g: P::G1, + pub h: P::G2, + pub g_beta: P::G1, + pub h_alpha: P::G2, } //TODO: Change SRS to return reference iterator - requires changes to TIPA and GIPA signatures -impl SRS

{ - pub fn get_commitment_keys(&self) -> (Vec, Vec) { +impl SRS

{ + pub fn get_commitment_keys(&self) -> (Vec, Vec) { let ck_1 = self.h_beta_powers.iter().step_by(2).cloned().collect(); let ck_2 = self.g_alpha_powers.iter().step_by(2).cloned().collect(); (ck_1, ck_2) @@ -130,35 +130,34 @@ impl SRS

{ impl TIPA where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct< LeftMessage = LMC::Message, RightMessage = RMC::Message, Output = IPC::Message, >, - LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, - RMC: DoublyHomomorphicCommitment - + TIPACompatibleSetup, + LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, + RMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, IPC: DoublyHomomorphicCommitment, - LMC::Message: MulAssign, - RMC::Message: MulAssign, - IPC::Message: MulAssign, - IPC::Key: MulAssign, - LMC::Output: MulAssign, - RMC::Output: MulAssign, - IPC::Output: MulAssign, + LMC::Message: MulAssign, + RMC::Message: MulAssign, + IPC::Message: MulAssign, + IPC::Key: MulAssign, + LMC::Output: MulAssign, + RMC::Output: MulAssign, + IPC::Output: MulAssign, { pub fn setup(rng: &mut R, size: usize) -> Result<(SRS

, IPC::Key), Error> { - let alpha = ::rand(rng); - let beta = ::rand(rng); - let g = ::prime_subgroup_generator(); - let h = ::prime_subgroup_generator(); + let alpha = ::rand(rng); + let beta = ::rand(rng); + let g = ::generator(); + let h = ::generator(); Ok(( SRS { g_alpha_powers: structured_generators_scalar_power(2 * size - 1, &g, &alpha), h_beta_powers: structured_generators_scalar_power(2 * size - 1, &h, &beta), - g_beta: g.mul(beta.into_repr()), - h_alpha: h.mul(alpha.into_repr()), + g_beta: g * beta, + h_alpha: h * alpha, }, IPC::setup(rng, 1)?.pop().unwrap(), )) @@ -169,7 +168,7 @@ where values: (&[IP::LeftMessage], &[IP::RightMessage]), ck: (&[LMC::Key], &[RMC::Key], &IPC::Key), ) -> Result, Error> { - Self::prove_with_srs_shift(srs, values, ck, &::one()) + Self::prove_with_srs_shift(srs, values, ck, &::one()) } // Shifts KZG proof for left message by scalar r (used for efficient composition with aggregation protocols) @@ -178,7 +177,7 @@ where srs: &SRS

, values: (&[IP::LeftMessage], &[IP::RightMessage]), ck: (&[LMC::Key], &[RMC::Key], &IPC::Key), - r_shift: &P::Fr, + r_shift: &P::ScalarField, ) -> Result, Error> { // Run GIPA let (proof, aux) = >::prove_with_aux( @@ -197,12 +196,12 @@ where let c = loop { let mut hash_input = Vec::new(); hash_input.extend_from_slice(&counter_nonce.to_be_bytes()[..]); - //TODO: Should use CanonicalSerialize instead of ToBytes - hash_input.extend_from_slice(&to_bytes![ - transcript.first().unwrap(), - ck_a_final, - ck_b_final - ]?); + transcript + .first() + .unwrap() + .serialize_uncompressed(&mut hash_input)?; + ck_a_final.serialize_uncompressed(&mut hash_input)?; + ck_b_final.serialize_uncompressed(&mut hash_input)?; if let Some(c) = LMC::Scalar::from_random_bytes(&D::digest(&hash_input)) { break c; }; @@ -219,7 +218,7 @@ where let ck_b_kzg_opening = prove_commitment_key_kzg_opening( &srs.g_alpha_powers, &transcript, - &::one(), + &::one(), &c, )?; @@ -237,7 +236,7 @@ where com: (&LMC::Output, &RMC::Output, &IPC::Output), proof: &TIPAProof, ) -> Result { - Self::verify_with_srs_shift(v_srs, ck_t, com, proof, &::one()) + Self::verify_with_srs_shift(v_srs, ck_t, com, proof, &::one()) } pub fn verify_with_srs_shift( @@ -245,7 +244,7 @@ where ck_t: &IPC::Key, com: (&LMC::Output, &RMC::Output, &IPC::Output), proof: &TIPAProof, - r_shift: &P::Fr, + r_shift: &P::ScalarField, ) -> Result { let (base_com, transcript) = GIPA::verify_recursive_challenge_transcript(com, &proof.gipa_proof)?; @@ -260,12 +259,12 @@ where let c = loop { let mut hash_input = Vec::new(); hash_input.extend_from_slice(&counter_nonce.to_be_bytes()[..]); - //TODO: Should use CanonicalSerialize instead of ToBytes - hash_input.extend_from_slice(&to_bytes![ - transcript.first().unwrap(), - ck_a_final, - ck_b_final - ]?); + transcript + .first() + .unwrap() + .serialize_uncompressed(&mut hash_input)?; + ck_a_final.serialize_uncompressed(&mut hash_input)?; + ck_b_final.serialize_uncompressed(&mut hash_input)?; if let Some(c) = LMC::Scalar::from_random_bytes(&D::digest(&hash_input)) { break c; }; @@ -285,7 +284,7 @@ where &ck_b_final, &ck_b_proof, &transcript, - &::one(), + &::one(), &c, )?; @@ -302,7 +301,7 @@ where } } -pub fn prove_commitment_key_kzg_opening( +pub fn prove_commitment_key_kzg_opening( srs_powers: &Vec, transcript: &Vec, r_shift: &G::ScalarField, @@ -338,45 +337,39 @@ pub fn prove_commitment_key_kzg_opening( } //TODO: Figure out how to avoid needing two separate methods for verification of opposite groups -pub fn verify_commitment_key_g2_kzg_opening( +pub fn verify_commitment_key_g2_kzg_opening( v_srs: &VerifierSRS

, - ck_final: &P::G2Projective, - ck_opening: &P::G2Projective, - transcript: &Vec, - r_shift: &P::Fr, - kzg_challenge: &P::Fr, + ck_final: &P::G2, + ck_opening: &P::G2, + transcript: &Vec, + r_shift: &P::ScalarField, + kzg_challenge: &P::ScalarField, ) -> Result { let ck_polynomial_c_eval = polynomial_evaluation_product_form_from_transcript(transcript, kzg_challenge, r_shift); - Ok(P::pairing( - v_srs.g, - *ck_final - &v_srs.h.mul(ck_polynomial_c_eval.into_repr()), - ) == P::pairing( - v_srs.g_beta - &v_srs.g.mul(kzg_challenge.into_repr()), - *ck_opening, - )) + Ok( + P::pairing(v_srs.g, *ck_final - v_srs.h * ck_polynomial_c_eval) + == P::pairing(v_srs.g_beta - v_srs.g * kzg_challenge, *ck_opening), + ) } -pub fn verify_commitment_key_g1_kzg_opening( +pub fn verify_commitment_key_g1_kzg_opening( v_srs: &VerifierSRS

, - ck_final: &P::G1Projective, - ck_opening: &P::G1Projective, - transcript: &Vec, - r_shift: &P::Fr, - kzg_challenge: &P::Fr, + ck_final: &P::G1, + ck_opening: &P::G1, + transcript: &Vec, + r_shift: &P::ScalarField, + kzg_challenge: &P::ScalarField, ) -> Result { let ck_polynomial_c_eval = polynomial_evaluation_product_form_from_transcript(transcript, kzg_challenge, r_shift); - Ok(P::pairing( - *ck_final - &v_srs.g.mul(ck_polynomial_c_eval.into_repr()), - v_srs.h, - ) == P::pairing( - *ck_opening, - v_srs.h_alpha - &v_srs.h.mul(kzg_challenge.into_repr()), - )) + Ok( + P::pairing(*ck_final - v_srs.g * ck_polynomial_c_eval, v_srs.h) + == P::pairing(*ck_opening, v_srs.h_alpha - v_srs.h * kzg_challenge), + ) } -pub fn structured_generators_scalar_power( +pub fn structured_generators_scalar_power( num: usize, g: &G, s: &G::ScalarField, @@ -389,12 +382,11 @@ pub fn structured_generators_scalar_power( pow_s *= s; } - let window_size = FixedBaseMSM::get_mul_window_size(num); + let window_size = FixedBase::get_mul_window_size(num); - let scalar_bits = G::ScalarField::size_in_bits(); - let g_table = FixedBaseMSM::get_window_table(scalar_bits, window_size, g.clone()); - let powers_of_g = - FixedBaseMSM::multi_scalar_mul::(scalar_bits, window_size, &g_table, &powers_of_scalar); + let scalar_bits = G::ScalarField::MODULUS_BIT_SIZE as usize; + let g_table = FixedBase::get_window_table(scalar_bits, window_size, g.clone()); + let powers_of_g = FixedBase::msm::(scalar_bits, window_size, &g_table, &powers_of_scalar); powers_of_g } @@ -433,6 +425,7 @@ fn polynomial_coefficients_from_transcript(transcript: &Vec, r_shif mod tests { use super::*; use ark_bls12_381::Bls12_381; + use ark_ec::pairing::PairingOutput; use ark_std::rand::{rngs::StdRng, SeedableRng}; use blake2::Blake2b; @@ -444,14 +437,13 @@ mod tests { random_generators, }; use ark_inner_products::{ - ExtensionFieldElement, InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct, - ScalarInnerProduct, + InnerProduct, MultiexponentiationInnerProduct, PairingInnerProduct, ScalarInnerProduct, }; type GC1 = AFGHOCommitmentG1; type GC2 = AFGHOCommitmentG2; - type SC1 = PedersenCommitment<::G1Projective>; - type SC2 = PedersenCommitment<::G2Projective>; + type SC1 = PedersenCommitment<::G1>; + type SC2 = PedersenCommitment<::G2>; const TEST_SIZE: usize = 8; @@ -459,7 +451,7 @@ mod tests { fn pairing_inner_product_test() { type IP = PairingInnerProduct; type IPC = - IdentityCommitment, ::Fr>; + IdentityCommitment, ::ScalarField>; type PairingTIPA = TIPA; let mut rng = StdRng::seed_from_u64(0u64); @@ -480,11 +472,9 @@ mod tests { #[test] fn multiexponentiation_inner_product_test() { - type IP = MultiexponentiationInnerProduct<::G1Projective>; - type IPC = IdentityCommitment< - ::G1Projective, - ::Fr, - >; + type IP = MultiexponentiationInnerProduct<::G1>; + type IPC = + IdentityCommitment<::G1, ::ScalarField>; type MultiExpTIPA = TIPA; let mut rng = StdRng::seed_from_u64(0u64); @@ -494,7 +484,7 @@ mod tests { let m_a = random_generators(&mut rng, TEST_SIZE); let mut m_b = Vec::new(); for _ in 0..TEST_SIZE { - m_b.push(::Fr::rand(&mut rng)); + m_b.push(::ScalarField::rand(&mut rng)); } let com_a = GC1::commit(&ck_a, &m_a).unwrap(); let com_b = SC1::commit(&ck_b, &m_b).unwrap(); @@ -508,9 +498,11 @@ mod tests { #[test] fn scalar_inner_product_test() { - type IP = ScalarInnerProduct<::Fr>; - type IPC = - IdentityCommitment<::Fr, ::Fr>; + type IP = ScalarInnerProduct<::ScalarField>; + type IPC = IdentityCommitment< + ::ScalarField, + ::ScalarField, + >; type ScalarTIPA = TIPA; let mut rng = StdRng::seed_from_u64(0u64); @@ -520,8 +512,8 @@ mod tests { let mut m_a = Vec::new(); let mut m_b = Vec::new(); for _ in 0..TEST_SIZE { - m_a.push(::Fr::rand(&mut rng)); - m_b.push(::Fr::rand(&mut rng)); + m_a.push(::ScalarField::rand(&mut rng)); + m_b.push(::ScalarField::rand(&mut rng)); } let com_a = SC2::commit(&ck_a, &m_a).unwrap(); let com_b = SC1::commit(&ck_b, &m_b).unwrap(); @@ -537,7 +529,7 @@ mod tests { fn pairing_inner_product_with_srs_shift_test() { type IP = PairingInnerProduct; type IPC = - IdentityCommitment, ::Fr>; + IdentityCommitment, ::ScalarField>; type PairingTIPA = TIPA; let mut rng = StdRng::seed_from_u64(0u64); @@ -550,18 +542,18 @@ mod tests { let com_a = GC1::commit(&ck_a, &m_a).unwrap(); let com_b = GC2::commit(&ck_b, &m_b).unwrap(); - let r_scalar = <::Fr>::rand(&mut rng); + let r_scalar = <::ScalarField>::rand(&mut rng); let r_vec = structured_scalar_power(TEST_SIZE, &r_scalar); let m_a_r = m_a .iter() .zip(&r_vec) - .map(|(a, r)| a.mul(r.into_repr())) - .collect::::G1Projective>>(); + .map(|(&a, r)| a * r) + .collect::::G1>>(); let ck_a_r = ck_a .iter() .zip(&r_vec) - .map(|(ck, r)| ck.mul(&r.inverse().unwrap().into_repr())) - .collect::::G2Projective>>(); + .map(|(&ck, r)| ck * r.inverse().unwrap()) + .collect::::G2>>(); let t = vec![IP::inner_product(&m_a_r, &m_b).unwrap()]; let com_t = IPC::commit(&vec![ck_t.clone()], &t).unwrap(); diff --git a/ip_proofs/src/tipa/structured_scalar_message.rs b/ip_proofs/src/tipa/structured_scalar_message.rs index 4fc459e..fe1ebf9 100644 --- a/ip_proofs/src/tipa/structured_scalar_message.rs +++ b/ip_proofs/src/tipa/structured_scalar_message.rs @@ -1,6 +1,6 @@ -use ark_ec::{group::Group, PairingEngine, ProjectiveCurve}; -use ark_ff::{to_bytes, Field, One, PrimeField, UniformRand, Zero}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write}; +use ark_ec::{pairing::Pairing, Group}; +use ark_ff::{Field, One, PrimeField, UniformRand, Zero}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{cfg_iter, rand::Rng}; use ark_std::{end_timer, start_timer}; use digest::Digest; @@ -139,34 +139,34 @@ pub struct TIPAWithSSM { pub struct TIPAWithSSMProof where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct, - LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, + LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, IPC: DoublyHomomorphicCommitment, IPC::Message: MulAssign, IPC::Key: MulAssign, IPC::Output: MulAssign, - LMC::Message: MulAssign, - LMC::Output: MulAssign, + LMC::Message: MulAssign, + LMC::Output: MulAssign, { gipa_proof: GIPAProof, IPC, D>, final_ck: LMC::Key, - final_ck_proof: P::G2Projective, + final_ck_proof: P::G2, _pairing: PhantomData

, } impl Clone for TIPAWithSSMProof where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct, - LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, + LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, IPC: DoublyHomomorphicCommitment, IPC::Message: MulAssign, IPC::Key: MulAssign, IPC::Output: MulAssign, - LMC::Message: MulAssign, - LMC::Output: MulAssign, + LMC::Message: MulAssign, + LMC::Output: MulAssign, { fn clone(&self) -> Self { Self { @@ -181,28 +181,28 @@ where impl TIPAWithSSM where D: Digest, - P: PairingEngine, + P: Pairing, IP: InnerProduct, - LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, + LMC: DoublyHomomorphicCommitment + TIPACompatibleSetup, IPC: DoublyHomomorphicCommitment, - IPC::Message: MulAssign, - IPC::Key: MulAssign, - IPC::Output: MulAssign, - LMC::Message: MulAssign, - LMC::Output: MulAssign, + IPC::Message: MulAssign, + IPC::Key: MulAssign, + IPC::Output: MulAssign, + LMC::Message: MulAssign, + LMC::Output: MulAssign, { //TODO: Don't need full TIPA SRS since only using one set of powers pub fn setup(rng: &mut R, size: usize) -> Result<(SRS

, IPC::Key), Error> { - let alpha = ::rand(rng); - let beta = ::rand(rng); - let g = ::prime_subgroup_generator(); - let h = ::prime_subgroup_generator(); + let alpha = ::rand(rng); + let beta = ::rand(rng); + let g = ::generator(); + let h = ::generator(); Ok(( SRS { g_alpha_powers: structured_generators_scalar_power(2 * size - 1, &g, &alpha), h_beta_powers: structured_generators_scalar_power(2 * size - 1, &h, &beta), - g_beta: ::mul(&g, &beta), - h_alpha: ::mul(&h, &alpha), + g_beta: g * beta, + h_alpha: h * alpha, }, IPC::setup(rng, 1)?.pop().unwrap(), )) @@ -216,7 +216,7 @@ where // Run GIPA let gipa = start_timer!(|| "GIPA"); let (proof, aux) = - , IPC, D>>::prove_with_aux( + , IPC, D>>::prove_with_aux( values, ( ck.0, @@ -239,8 +239,11 @@ where let c = loop { let mut hash_input = Vec::new(); hash_input.extend_from_slice(&counter_nonce.to_be_bytes()[..]); - //TODO: Should use CanonicalSerialize instead of ToBytes - hash_input.extend_from_slice(&to_bytes![transcript.first().unwrap(), ck_a_final]?); + transcript + .first() + .unwrap() + .serialize_uncompressed(&mut hash_input)?; + ck_a_final.serialize_uncompressed(&mut hash_input)?; if let Some(c) = LMC::Scalar::from_random_bytes(&D::digest(&hash_input)) { break c; }; @@ -251,7 +254,7 @@ where let ck_a_kzg_opening = prove_commitment_key_kzg_opening( &srs.h_beta_powers, &transcript_inverse, - &::one(), + &::one(), &c, )?; end_timer!(ck_kzg); @@ -268,7 +271,7 @@ where v_srs: &VerifierSRS

, ck_t: &IPC::Key, com: (&LMC::Output, &IPC::Output), - scalar_b: &P::Fr, + scalar_b: &P::ScalarField, proof: &TIPAWithSSMProof, ) -> Result { let (base_com, transcript) = GIPA::verify_recursive_challenge_transcript( @@ -287,8 +290,11 @@ where let c = loop { let mut hash_input = Vec::new(); hash_input.extend_from_slice(&counter_nonce.to_be_bytes()[..]); - //TODO: Should use CanonicalSerialize instead of ToBytes - hash_input.extend_from_slice(&to_bytes![transcript.first().unwrap(), ck_a_final]?); + transcript + .first() + .unwrap() + .serialize_uncompressed(&mut hash_input)?; + ck_a_final.serialize_uncompressed(&mut hash_input)?; if let Some(c) = LMC::Scalar::from_random_bytes(&D::digest(&hash_input)) { break c; }; @@ -301,7 +307,7 @@ where &ck_a_final, &ck_a_proof, &transcript_inverse, - &::one(), + &::one(), &c, )?; @@ -309,10 +315,10 @@ where let mut power_2_b = scalar_b.clone(); let mut product_form = Vec::new(); for x in transcript.iter() { - product_form.push(::one() + &(x.inverse().unwrap() * &power_2_b)); + product_form.push(::one() + &(x.inverse().unwrap() * &power_2_b)); power_2_b *= &power_2_b.clone(); } - let b_base = cfg_iter!(product_form).product::(); + let b_base = cfg_iter!(product_form).product::(); // Verify base inner product commitment let (com_a, _, com_t) = base_com; @@ -347,17 +353,15 @@ mod tests { use ark_inner_products::{InnerProduct, MultiexponentiationInnerProduct, ScalarInnerProduct}; type GC1 = AFGHOCommitmentG1; - type SC1 = PedersenCommitment<::G1Projective>; + type SC1 = PedersenCommitment<::G1>; const TEST_SIZE: usize = 8; #[test] fn tipa_ssm_multiexponentiation_inner_product_test() { - type IP = MultiexponentiationInnerProduct<::G1Projective>; - type IPC = IdentityCommitment< - ::G1Projective, - ::Fr, - >; + type IP = MultiexponentiationInnerProduct<::G1>; + type IPC = + IdentityCommitment<::G1, ::ScalarField>; type MultiExpTIPA = TIPAWithSSM; let mut rng = StdRng::seed_from_u64(0u64); @@ -365,7 +369,7 @@ mod tests { let (ck_a, _) = srs.get_commitment_keys(); let v_srs = srs.get_verifier_key(); let m_a = random_generators(&mut rng, TEST_SIZE); - let b = <::Fr>::rand(&mut rng); + let b = <::ScalarField>::rand(&mut rng); let m_b = structured_scalar_power(TEST_SIZE, &b); let com_a = GC1::commit(&ck_a, &m_a).unwrap(); let t = vec![IP::inner_product(&m_a, &m_b).unwrap()]; @@ -387,18 +391,20 @@ mod tests { #[test] fn gipa_ssm_scalar_inner_product_test() { - type IP = ScalarInnerProduct<::Fr>; - type IPC = - IdentityCommitment<::Fr, ::Fr>; + type IP = ScalarInnerProduct<::ScalarField>; + type IPC = IdentityCommitment< + ::ScalarField, + ::ScalarField, + >; type ScalarGIPA = GIPAWithSSM; let mut rng = StdRng::seed_from_u64(0u64); let (ck_a, ck_t) = ScalarGIPA::setup(&mut rng, TEST_SIZE).unwrap(); let mut m_a = Vec::new(); for _ in 0..TEST_SIZE { - m_a.push(::Fr::rand(&mut rng)); + m_a.push(::ScalarField::rand(&mut rng)); } - let b = <::Fr>::rand(&mut rng); + let b = <::ScalarField>::rand(&mut rng); let m_b = structured_scalar_power(TEST_SIZE, &b); let com_a = SC1::commit(&ck_a, &m_a).unwrap(); let t = vec![IP::inner_product(&m_a, &m_b).unwrap()]; diff --git a/sipp/Cargo.toml b/sipp/Cargo.toml index 53a3a02..87069de 100644 --- a/sipp/Cargo.toml +++ b/sipp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ark-sipp" -version = "0.3.0" +version = "0.4.0" authors = [ "Benedikt Bünz", "Mary Maller", @@ -21,9 +21,10 @@ edition = "2018" ################################# Dependencies ################################ [dependencies] -ark-ec = "0.3" -ark-ff = "0.3" -ark-std = "0.3" +ark-ec = "0.4" +ark-ff = "0.4" +ark-serialize = "0.4" +ark-std = "0.4" rayon = "1" rand_core = "0.6" rand_chacha = "0.3" @@ -33,7 +34,7 @@ digest = "0.9" blake2 = "0.9" csv = "1" serde = { version = "1", features = [ "derive" ] } -ark-bls12-377 = { version = "0.3", features = [ "curve" ] } +ark-bls12-377 = { version = "0.4", features = [ "curve" ] } [features] default = [ "parallel" ] diff --git a/sipp/examples/scaling-ipp.rs b/sipp/examples/scaling-ipp.rs index 09ffeb1..f731059 100644 --- a/sipp/examples/scaling-ipp.rs +++ b/sipp/examples/scaling-ipp.rs @@ -1,6 +1,6 @@ // For benchmarking use ark_bls12_377::*; -use ark_ec::ProjectiveCurve; +use ark_ec::{CurveGroup, Group}; use ark_ff::UniformRand; use ark_sipp::{rng::FiatShamirRng, SIPP}; use ark_std::rand::seq::SliceRandom; @@ -46,8 +46,8 @@ fn main() { a_s.push(g.double()); b_s.push(h.double()); } - let mut a_s = ProjectiveCurve::batch_normalization_into_affine(&a_s); - let mut b_s = ProjectiveCurve::batch_normalization_into_affine(&b_s); + let mut a_s = G1Projective::normalize_batch(&a_s); + let mut b_s = G2Projective::normalize_batch(&b_s); let r_s = vec![Fr::rand(&mut rng); a_s.len()]; let output_file = output_directory + &format!("/ipp-{}-threads.csv", num_threads); diff --git a/sipp/src/lib.rs b/sipp/src/lib.rs index 266e348..fa47091 100644 --- a/sipp/src/lib.rs +++ b/sipp/src/lib.rs @@ -2,12 +2,19 @@ #![deny(warnings, unused, missing_docs)] #![forbid(unsafe_code)] -use ark_ec::{msm::VariableBaseMSM, AffineCurve, PairingEngine, ProjectiveCurve}; -use ark_ff::{to_bytes, Field, One, PrimeField, UniformRand}; -use digest::Digest; -use rayon::prelude::*; use std::marker::PhantomData; +use ark_ec::{ + pairing::{MillerLoopOutput, Pairing, PairingOutput}, + scalar_mul::variable_base::VariableBaseMSM, + CurveGroup, +}; +use ark_ff::{Field, One, UniformRand}; +use ark_serialize::CanonicalSerialize; +use ark_std::Zero; +use digest::{generic_array::typenum::U32, Digest}; +use rayon::prelude::*; + /// Fiat-Shamir Rng pub mod rng; @@ -15,24 +22,28 @@ use rng::FiatShamirRng; /// SIPP is a inner-pairing product proof that allows a verifier to check an /// inner-pairing product over `n` elements with only a single pairing. -pub struct SIPP { +pub struct SIPP { _engine: PhantomData, _digest: PhantomData, } /// `Proof` contains the GT elements produced by the prover. // TODO(psi): why not just make Proof an alias since there's only one field? -pub struct Proof { - gt_elems: Vec<(E::Fqk, E::Fqk)>, +pub struct Proof { + gt_elems: Vec<(PairingOutput, PairingOutput)>, } -impl SIPP { +impl SIPP +where + E: Pairing, + D: Digest, +{ /// Produce a proof of the inner pairing product. pub fn prove( a: &[E::G1Affine], b: &[E::G2Affine], - r: &[E::Fr], - value: E::Fqk, + r: &[E::ScalarField], + value: PairingOutput, ) -> Result, ()> { assert_eq!(a.len(), b.len()); // Ensure the order of the input vectors is a power of 2 @@ -42,13 +53,17 @@ impl SIPP { assert_eq!(length.count_ones(), 1); let mut proof_vec = Vec::new(); // TODO(psi): should we also input a succinct bilinear group description to the rng? - let mut rng = FiatShamirRng::::from_seed(&to_bytes![a, b, r, value].unwrap()); + let mut rng = { + let mut seed = Vec::new(); + (a, b, r, value).serialize_uncompressed(&mut seed).unwrap(); + FiatShamirRng::::from_seed(&seed) + }; let a = a .into_par_iter() .zip(r) - .map(|(a, r)| a.mul(*r)) + .map(|(&a, r)| a * r) .collect::>(); - let mut a = E::G1Projective::batch_normalization_into_affine(&a); + let mut a = E::G1::normalize_batch(&a); let mut b = b.to_vec(); while length != 1 { @@ -62,31 +77,27 @@ impl SIPP { let z_l = product_of_pairings::(a_r, b_l); let z_r = product_of_pairings::(a_l, b_r); proof_vec.push((z_l, z_r)); - rng.absorb(&to_bytes![z_l, z_r].unwrap()); - let x: E::Fr = u128::rand(&mut rng).into(); + { + let mut buf = Vec::new(); + (z_l, z_r).serialize_uncompressed(&mut buf).unwrap(); + rng.absorb(&buf); + } + let x: E::ScalarField = u128::rand(&mut rng).into(); let a_proj = a_l .par_iter() .zip(a_r) - .map(|(a_l, a_r)| { - let mut temp = a_r.mul(x); - temp.add_assign_mixed(a_l); - temp - }) + .map(|(a_l, &a_r)| a_r * x + a_l) .collect::>(); - a = E::G1Projective::batch_normalization_into_affine(&a_proj); + a = E::G1::normalize_batch(&a_proj); let x_inv = x.inverse().unwrap(); let b_proj = b_l .par_iter() .zip(b_r) - .map(|(b_l, b_r)| { - let mut temp = b_r.mul(x_inv); - temp.add_assign_mixed(b_l); - temp - }) + .map(|(b_l, &b_r)| b_r * x_inv + b_l) .collect::>(); - b = E::G2Projective::batch_normalization_into_affine(&b_proj); + b = E::G2::normalize_batch(&b_proj); } Ok(Proof { @@ -98,8 +109,8 @@ impl SIPP { pub fn verify( a: &[E::G1Affine], b: &[E::G2Affine], - r: &[E::Fr], - claimed_value: E::Fqk, + r: &[E::ScalarField], + claimed_value: PairingOutput, proof: &Proof, ) -> Result { // Ensure the order of the input vectors is a power of 2 @@ -112,14 +123,24 @@ impl SIPP { assert_eq!(proof_len as f32, f32::log2(length as f32)); // TODO(psi): should we also input a succinct bilinear group description to the rng? - let mut rng = FiatShamirRng::::from_seed(&to_bytes![a, b, r, claimed_value].unwrap()); + let mut rng = { + let mut seed = Vec::new(); + (a, b, r, claimed_value) + .serialize_uncompressed(&mut seed) + .unwrap(); + FiatShamirRng::::from_seed(&seed) + }; let x_s = proof .gt_elems .iter() .map(|(z_l, z_r)| { - rng.absorb(&to_bytes![z_l, z_r].unwrap()); - let x: E::Fr = u128::rand(&mut rng).into(); + { + let mut buf = Vec::new(); + (*z_l, *z_r).serialize_uncompressed(&mut buf).unwrap(); + rng.absorb(&buf); + } + let x: E::ScalarField = u128::rand(&mut rng).into(); x }) .collect::>(); @@ -128,18 +149,16 @@ impl SIPP { ark_ff::batch_inversion(&mut x_invs); let z_prime = claimed_value - * &proof + + proof .gt_elems .par_iter() .zip(&x_s) .zip(&x_invs) - .map(|(((z_l, z_r), x), x_inv)| { - z_l.pow(x.into_repr()) * &z_r.pow(x_inv.into_repr()) - }) - .reduce(|| E::Fqk::one(), |a, b| a * &b); + .map(|(((z_l, z_r), x), x_inv)| (*z_l * x) + (*z_r * x_inv)) + .reduce(|| PairingOutput::::zero(), |a, b| a + b); - let mut s: Vec = vec![E::Fr::one(); length]; - let mut s_invs: Vec = vec![E::Fr::one(); length]; + let mut s: Vec = vec![E::ScalarField::one(); length]; + let mut s_invs: Vec = vec![E::ScalarField::one(); length]; // TODO(psi): batch verify for (j, (x, x_inv)) in x_s.into_iter().zip(x_invs).enumerate() { for i in 0..length { @@ -150,18 +169,10 @@ impl SIPP { } } - let s = s - .into_iter() - .zip(r) - .map(|(x, r)| (x * r).into_repr()) - .collect::>(); - let s_invs = s_invs - .iter() - .map(|x_inv| x_inv.into_repr()) - .collect::>(); + let s = s.into_iter().zip(r).map(|(x, r)| x * r).collect::>(); - let a_prime = VariableBaseMSM::multi_scalar_mul(&a, &s); - let b_prime = VariableBaseMSM::multi_scalar_mul(&b, &s_invs); + let a_prime = E::G1::msm(&a, &s).unwrap(); + let b_prime = E::G2::msm(&b, &s_invs).unwrap(); let accept = E::pairing(a_prime, b_prime) == z_prime; @@ -170,39 +181,45 @@ impl SIPP { } /// Compute the product of pairings of `r_i * a_i` and `b_i`. -pub fn product_of_pairings_with_coeffs( +pub fn product_of_pairings_with_coeffs( a: &[E::G1Affine], b: &[E::G2Affine], - r: &[E::Fr], -) -> E::Fqk { + r: &[E::ScalarField], +) -> PairingOutput { let a = a .into_par_iter() .zip(r) - .map(|(a, r)| a.mul(*r)) - .collect::>(); - let a = E::G1Projective::batch_normalization_into_affine(&a); - let elements = a - .par_iter() - .zip(b) - .map(|(a, b)| (E::G1Prepared::from(*a), E::G2Prepared::from(*b))) + .map(|(&a, r)| a * r) .collect::>(); - let num_chunks = elements.len() / rayon::current_num_threads(); - let num_chunks = if num_chunks == 0 { - elements.len() + let a = E::G1::normalize_batch(&a); + + let a = a.par_iter().map(E::G1Prepared::from).collect::>(); + let b = b.par_iter().map(E::G2Prepared::from).collect::>(); + + // We want to process N chunks in parallel where N is the number of threads available + let num_chunks = rayon::current_num_threads(); + let chunk_size = if num_chunks <= a.len() { + a.len() / num_chunks } else { - num_chunks + // More threads than elements. Just do it all in parallel + 1 }; - let ml_result = elements - .par_chunks(num_chunks) - .map(E::miller_loop) + + // Compute all the (partial) pairings and take the product. We have to take the product over + // P::TargetField because MillerLoopOutput doesn't impl Product + let ml_result = a + .par_chunks(chunk_size) + .zip(b.par_chunks(chunk_size)) + .map(|(aa, bb)| E::multi_miller_loop(aa.iter().cloned(), bb.iter().cloned()).0) .product(); - E::final_exponentiation(&ml_result).unwrap() + + E::final_exponentiation(MillerLoopOutput(ml_result)).unwrap() } /// Compute the product of pairings of `a` and `b`. #[must_use] -pub fn product_of_pairings(a: &[E::G1Affine], b: &[E::G2Affine]) -> E::Fqk { - let r = vec![E::Fr::one(); a.len()]; +pub fn product_of_pairings(a: &[E::G1Affine], b: &[E::G2Affine]) -> PairingOutput { + let r = vec![E::ScalarField::one(); a.len()]; product_of_pairings_with_coeffs::(a, b, &r) } @@ -214,7 +231,7 @@ mod tests { #[test] fn prove_and_verify_base_case() { - let mut rng = FiatShamirRng::::from_seed(&to_bytes![b"falafel"].unwrap()); + let mut rng = FiatShamirRng::::from_seed(b"falafel"); let mut a = Vec::with_capacity(32); let mut b = Vec::with_capacity(32); let mut r = Vec::with_capacity(32); @@ -231,6 +248,7 @@ mod tests { let proof = proof.unwrap(); let accept = SIPP::::verify(&a, &b, &r, z, &proof); + assert!(accept.is_ok()); assert!(accept.unwrap()); } diff --git a/sipp/src/rng.rs b/sipp/src/rng.rs index 7e22576..531fa7b 100644 --- a/sipp/src/rng.rs +++ b/sipp/src/rng.rs @@ -1,20 +1,28 @@ -use ark_ff::{FromBytes, ToBytes}; use ark_std::rand::{RngCore, SeedableRng}; -use digest::{generic_array::GenericArray, Digest}; +use digest::{ + generic_array::{typenum::U32, GenericArray}, + Digest, +}; use rand_chacha::ChaChaRng; use std::marker::PhantomData; /// A `SeedableRng` that refreshes its seed by hashing together the previous seed /// and the new seed material. // TODO: later: re-evaluate decision about ChaChaRng -pub struct FiatShamirRng { +pub struct FiatShamirRng +where + D: Digest, +{ r: ChaChaRng, seed: GenericArray, #[doc(hidden)] digest: PhantomData, } -impl RngCore for FiatShamirRng { +impl RngCore for FiatShamirRng +where + D: Digest, +{ #[inline] fn next_u32(&mut self) -> u32 { self.r.next_u32() @@ -36,19 +44,19 @@ impl RngCore for FiatShamirRng { } } -impl FiatShamirRng { +impl FiatShamirRng +where + D: Digest, +{ /// Create a new `Self` by initialzing with a fresh seed. /// `self.seed = H(self.seed || new_seed)`. #[inline] - pub fn from_seed<'a, T: 'a + ToBytes>(seed: &'a T) -> Self { - let mut bytes = Vec::new(); - seed.write(&mut bytes).expect("failed to convert to bytes"); - let seed = D::digest(&bytes); - let r_seed: [u8; 32] = FromBytes::read(seed.as_ref()).expect("failed to get [u32; 8]"); - let r = ChaChaRng::from_seed(r_seed); + pub fn from_seed(seed: &[u8]) -> Self { + let digested_seed = D::digest(&seed); + let r = ChaChaRng::from_seed(digested_seed.into()); Self { r, - seed, + seed: digested_seed, digest: PhantomData, } } @@ -56,12 +64,10 @@ impl FiatShamirRng { /// Refresh `self.seed` with new material. Achieved by setting /// `self.seed = H(self.seed || new_seed)`. #[inline] - pub fn absorb<'a, T: 'a + ToBytes>(&mut self, seed: &'a T) { - let mut bytes = Vec::new(); - seed.write(&mut bytes).expect("failed to convert to bytes"); + pub fn absorb(&mut self, seed: &[u8]) { + let mut bytes = seed.to_vec(); bytes.extend_from_slice(&self.seed); self.seed = D::digest(&bytes); - let seed: [u8; 32] = FromBytes::read(self.seed.as_ref()).expect("failed to get [u32; 8]"); - self.r = ChaChaRng::from_seed(seed); + self.r = ChaChaRng::from_seed(self.seed.into()); } }