From ae3a252fb8359f2e021ba77059722bdac9847390 Mon Sep 17 00:00:00 2001 From: Roman Walch <9820846+rw0x0@users.noreply.github.com> Date: Thu, 26 Sep 2024 10:24:35 +0200 Subject: [PATCH] a lot of wip --- co-noir/co-ultrahonk/Cargo.toml | 1 + co-noir/co-ultrahonk/src/parse/builder.rs | 438 +++++++++++++++++- co-noir/co-ultrahonk/src/parse/types.rs | 46 ++ co-noir/ultrahonk/src/parse/acir_format.rs | 18 +- co-noir/ultrahonk/src/parse/builder.rs | 2 +- co-noir/ultrahonk/src/parse/mod.rs | 13 + co-noir/ultrahonk/src/parse/types.rs | 16 +- co-noir/ultrahonk/src/poseidon2/mod.rs | 14 - .../src/poseidon2/poseidon2_bn254.rs | 2 +- .../src/poseidon2/poseidon2_permutation.rs | 4 +- 10 files changed, 509 insertions(+), 45 deletions(-) diff --git a/co-noir/co-ultrahonk/Cargo.toml b/co-noir/co-ultrahonk/Cargo.toml index 6e07ffc6..56a24e45 100644 --- a/co-noir/co-ultrahonk/Cargo.toml +++ b/co-noir/co-ultrahonk/Cargo.toml @@ -16,3 +16,4 @@ ark-bn254.workspace = true mpc-core.workspace = true tracing.workspace = true ultrahonk.workspace = true +num-bigint.workspace = true diff --git a/co-noir/co-ultrahonk/src/parse/builder.rs b/co-noir/co-ultrahonk/src/parse/builder.rs index 2e40264a..e1b5d036 100644 --- a/co-noir/co-ultrahonk/src/parse/builder.rs +++ b/co-noir/co-ultrahonk/src/parse/builder.rs @@ -1,12 +1,18 @@ use ark_ec::pairing::Pairing; -use ark_ff::Zero; +use ark_ff::{One, Zero}; use mpc_core::traits::PrimeFieldMpcProtocol; +use num_bigint::BigUint; use std::collections::HashMap; use ultrahonk::{ - parse::types::{RamTranscript, RangeList, RomTranscript}, + parse::{ + field_from_hex_string, + types::{RamTranscript, RangeList, RomTranscript}, + }, AcirFormat, UltraCircuitBuilder, }; +use crate::parse::types::GateCounter; + #[derive(Clone, Debug)] pub enum BuilderFieldType where @@ -34,7 +40,7 @@ where pub(crate) zero_idx: u32, one_idx: u32, // pub(crate) blocks: GateBlocks, // Storage for wires and selectors for all gate types - // num_gates: usize, + num_gates: usize, circuit_finalized: bool, contains_recursive_proof: bool, // recursive_proof_public_input_indices: AggregationObjectPubInputIndices, @@ -99,13 +105,12 @@ where constraint_system.recursive, ); - todo!("Build constraints"); - // builder.build_constraints( - // constraint_system, - // has_valid_witness_assignments, - // honk_recursion, - // collect_gates_per_opcode, - // ); + builder.build_constraints( + constraint_system, + has_valid_witness_assignments, + honk_recursion, + collect_gates_per_opcode, + ); builder } @@ -134,7 +139,7 @@ where zero_idx: 0, one_idx: 1, // blocks: GateBlocks::default(), - // num_gates: 0, + num_gates: 0, circuit_finalized: false, contains_recursive_proof: false, // recursive_proof_public_input_indices: Default::default(), @@ -207,4 +212,415 @@ where self.real_variable_tags.push(Self::DUMMY_TAG); idx } + + fn build_constraints( + &mut self, + mut constraint_system: AcirFormat, + has_valid_witness_assignments: bool, + honk_recursion: bool, + collect_gates_per_opcode: bool, + ) { + tracing::info!("Builder build constraints"); + if collect_gates_per_opcode { + constraint_system + .gates_per_opcode + .resize(constraint_system.num_acir_opcodes as usize, 0); + } + + let mut gate_counter = GateCounter::new(collect_gates_per_opcode); + + // Add arithmetic gates + for (i, constraint) in constraint_system.poly_triple_constraints.iter().enumerate() { + self.create_poly_gate(constraint); + gate_counter.track_diff( + self, + &mut constraint_system.gates_per_opcode, + constraint_system + .original_opcode_indices + .poly_triple_constraints[i], + ); + } + for (i, constraint) in constraint_system.quad_constraints.iter().enumerate() { + self.create_big_mul_gate(constraint); + gate_counter.track_diff( + self, + &mut constraint_system.gates_per_opcode, + constraint_system.original_opcode_indices.quad_constraints[i], + ); + } + + // Add logic constraint + // for (i, constraint) in constraint_system.logic_constraints.iter().enumerate() { + // todo!("Logic gates"); + // } + + // Add range constraint + // for (i, constraint) in constraint_system.range_constraints.iter().enumerate() { + // todo!("rage gates"); + // } + + // Add aes128 constraints + // for (i, constraint) in constraint_system.aes128_constraints.iter().enumerate() { + // todo!("aes128 gates"); + // } + + // Add sha256 constraints + // for (i, constraint) in constraint_system.sha256_constraints.iter().enumerate() { + // todo!("sha256 gates"); + // } + + // for (i, constraint) in constraint_system.sha256_compression.iter().enumerate() { + // todo!("sha256 compression gates"); + // } + + // Add schnorr constraints + // for (i, constraint) in constraint_system.schnorr_constraints.iter().enumerate() { + // todo!("schnorr gates"); + // } + + // Add ECDSA k1 constraints + // for (i, constraint) in constraint_system.ecdsa_k1_constraints.iter().enumerate() { + // todo!("ecdsa k1 gates"); + // } + + // Add ECDSA r1 constraints + // for (i, constraint) in constraint_system.ecdsa_r1_constraints.iter().enumerate() { + // todo!("ecdsa r1 gates"); + // } + + // Add blake2s constraints + // for (i, constraint) in constraint_system.blake2s_constraints.iter().enumerate() { + // todo!("blake2s gates"); + // } + + // Add blake3 constraints + // for (i, constraint) in constraint_system.blake3_constraints.iter().enumerate() { + // todo!("blake3 gates"); + // } + + // Add keccak constraints + // for (i, constraint) in constraint_system.keccak_constraints.iter().enumerate() { + // todo!("keccak gates"); + // } + + // for (i, constraint) in constraint_system.keccak_permutations.iter().enumerate() { + // todo!("keccak permutation gates"); + // } + + // Add pedersen constraints + // for (i, constraint) in constraint_system.pedersen_constraints.iter().enumerate() { + // todo!("pederson gates"); + // } + + // for (i, constraint) in constraint_system.pedersen_hash_constraints.iter().enumerate() { + // todo!("pedersen hash gates"); + // } + + // Add poseidon2 constraints + // for (i, constraint) in constraint_system.poseidon2_constraints.iter().enumerate() { + // todo!("poseidon2 gates"); + // } + + // Add multi scalar mul constraints + // for (i, constraint) in constraint_system.multi_scalar_mul_constraints.iter().enumerate() { + // todo!("multi scalar mul gates"); + // } + + // Add ec add constraints + // for (i, constraint) in constraint_system.ec_add_constraints.iter().enumerate() { + // todo!("ec add gates"); + // } + + // Add block constraints + for (i, constraint) in constraint_system.block_constraints.iter().enumerate() { + self.create_block_constraints(constraint, has_valid_witness_assignments); + if collect_gates_per_opcode { + let avg_gates_per_opcode = gate_counter.compute_diff(self) + / constraint_system.original_opcode_indices.block_constraints[i].len(); + for opcode_index in constraint_system.original_opcode_indices.block_constraints[i] + .iter() + .cloned() + { + constraint_system.gates_per_opcode[opcode_index] = avg_gates_per_opcode; + } + } + } + + // Add big_int constraints + // for (i, constraint) in constraint_system.bigint_from_le_bytes_constraints.iter().enumerate() { + // todo!("bigint from le bytes gates"); + // } + + // for (i, constraint) in constraint_system.bigint_operations.iter().enumerate() { + // todo!("bigint operations gates"); + // } + + // for (i, constraint) in constraint_system.bigint_to_le_bytes_constraints.iter().enumerate() { + // todo!("bigint to le bytes gates"); + // } + + // assert equals + for (i, constraint) in constraint_system.assert_equalities.iter().enumerate() { + todo!("assert equalities gates"); + } + + // RecursionConstraints + self.process_plonk_recursion_constraints( + &constraint_system, + has_valid_witness_assignments, + &mut gate_counter, + ); + self.process_honk_recursion_constraints( + &constraint_system, + has_valid_witness_assignments, + &mut gate_counter, + ); + + // If the circuit does not itself contain honk recursion constraints but is going to be + // proven with honk then recursively verified, add a default aggregation object + if constraint_system.honk_recursion_constraints.is_empty() + && honk_recursion + && self.is_recursive_circuit + { + // Set a default aggregation object if we don't have one. + let current_aggregation_object = self.init_default_agg_obj_indices(); + // Make sure the verification key records the public input indices of the + // final recursion output. + self.add_recursive_proof(current_aggregation_object); + } + } + + fn process_plonk_recursion_constraints( + &mut self, + constraint_system: &AcirFormat, + has_valid_witness_assignments: bool, + gate_counter: &mut GateCounter, + ) { + for (i, constraint) in constraint_system.recursion_constraints.iter().enumerate() { + todo!("Plonk recursion"); + } + } + + fn process_honk_recursion_constraints( + &mut self, + constraint_system: &AcirFormat, + has_valid_witness_assignments: bool, + gate_counter: &mut GateCounter, + ) { + { + for (i, constraint) in constraint_system + .honk_recursion_constraints + .iter() + .enumerate() + { + todo!("Honk recursion"); + } + } + } + + pub(crate) fn get_num_gates(&self) -> usize { + // if circuit finalized already added extra gates + if self.circuit_finalized { + return self.num_gates; + } + let mut count = 0; + let mut rangecount = 0; + let mut romcount = 0; + let mut ramcount = 0; + let mut nnfcount = 0; + self.get_num_gates_split_into_components( + &mut count, + &mut rangecount, + &mut romcount, + &mut ramcount, + &mut nnfcount, + ); + count + romcount + ramcount + rangecount + nnfcount + } + + pub(crate) fn get_tables_size(&self) -> usize { + let mut tables_size = 0; + for table in self.lookup_tables.iter() { + tables_size += table.len(); + } + + tables_size + } + + fn get_lookups_size(&self) -> usize { + let mut lookups_size = 0; + for table in self.lookup_tables.iter() { + lookups_size += table.lookup_gates.len(); + } + lookups_size + } + + fn get_num_gates_split_into_components( + &self, + count: &mut usize, + rangecount: &mut usize, + romcount: &mut usize, + ramcount: &mut usize, + nnfcount: &mut usize, + ) { + *count = self.num_gates; + + // each ROM gate adds +1 extra gate due to the rom reads being copied to a sorted list set + for rom_array in self.rom_arrays.iter() { + for state in rom_array.state.iter() { + if state[0] == Self::UNINITIALIZED_MEMORY_RECORD { + *romcount += 2; + } + } + *romcount += rom_array.records.len(); + *romcount += 1; // we add an addition gate after procesing a rom array + } + + // each RAM gate adds +2 extra gates due to the ram reads being copied to a sorted list set, + // as well as an extra gate to validate timestamps + let mut ram_timestamps = Vec::with_capacity(self.ram_arrays.len()); + let mut ram_range_sizes = Vec::with_capacity(self.ram_arrays.len()); + let mut ram_range_exists = Vec::with_capacity(self.ram_arrays.len()); + for ram_array in self.ram_arrays.iter() { + for state in ram_array.state.iter() { + if *state == Self::UNINITIALIZED_MEMORY_RECORD { + *ramcount += Self::NUMBER_OF_GATES_PER_RAM_ACCESS; + } + } + *ramcount += ram_array.records.len() * Self::NUMBER_OF_GATES_PER_RAM_ACCESS; + *ramcount += Self::NUMBER_OF_ARITHMETIC_GATES_PER_RAM_ARRAY; // we add an addition gate after procesing a ram array + + // there will be 'max_timestamp' number of range checks, need to calculate. + let max_timestamp = ram_array.access_count - 1; + + // if a range check of length `max_timestamp` already exists, we are double counting. + // We record `ram_timestamps` to detect and correct for this error when we process range lists. + ram_timestamps.push(max_timestamp); + let mut padding = (NUM_WIRES - (max_timestamp % NUM_WIRES)) % NUM_WIRES; + if max_timestamp == NUM_WIRES { + padding += NUM_WIRES; + } + let ram_range_check_list_size = max_timestamp + padding; + + let mut ram_range_check_gate_count = ram_range_check_list_size / NUM_WIRES; + ram_range_check_gate_count += 1; // we need to add 1 extra addition gates for every distinct range list + + ram_range_sizes.push(ram_range_check_gate_count); + ram_range_exists.push(false); + } + for list in self.range_lists.iter() { + let mut list_size = list.1.variable_indices.len(); + let mut padding = (NUM_WIRES - (list_size % NUM_WIRES)) % NUM_WIRES; + if list_size == NUM_WIRES { + padding += NUM_WIRES; + } + list_size += padding; + + for (time_stamp, ram_range_exist) in ram_timestamps + .iter() + .cloned() + .zip(ram_range_exists.iter_mut()) + { + if list.1.target_range as usize == time_stamp { + *ram_range_exist = true; + } + } + + *rangecount += list_size / NUM_WIRES; + *rangecount += 1; // we need to add 1 extra addition gates for every distinct range list + } + // update rangecount to include the ram range checks the composer will eventually be creating + for (ram_range_sizes, ram_range_exist) in ram_range_sizes + .into_iter() + .zip(ram_range_exists.into_iter()) + { + if !ram_range_exist { + *rangecount += ram_range_sizes; + } + } + + let mut nnf_copy = self.cached_partial_non_native_field_multiplications.clone(); + // update nnfcount + nnf_copy.sort(); + + nnf_copy.dedup(); + let num_nnf_ops = nnf_copy.len(); + *nnfcount = num_nnf_ops * Self::GATES_PER_NON_NATIVE_FIELD_MULTIPLICATION_ARITHMETIC; + } + + fn add_recursive_proof(&mut self, proof_output_witness_indices: AggregationObjectIndices) { + if self.contains_recursive_proof { + panic!("added recursive proof when one already exists"); + } + self.contains_recursive_proof = true; + + for (i, idx) in proof_output_witness_indices.into_iter().enumerate() { + self.set_public_input(idx); + self.recursive_proof_public_input_indices[i] = self.public_inputs.len() as u32 - 1; + } + } + + fn set_public_input(&mut self, witness_index: u32) { + for public_input in self.public_inputs.iter().cloned() { + if public_input == witness_index { + panic!("Attempted to set a public input that is already public!"); + } + } + self.public_inputs.push(witness_index); + } + + fn init_default_agg_obj_indices(&mut self) -> AggregationObjectIndices { + const NUM_LIMBS: usize = 4; + const NUM_LIMB_BITS: u32 = 68; + + let mask = (BigUint::one() << NUM_LIMB_BITS) - BigUint::one(); + + // TODO(https://github.com/AztecProtocol/barretenberg/issues/911): These are pairing points extracted from a valid + // proof. This is a workaround because we can't represent the point at infinity in biggroup yet. + let mut agg_obj_indices = AggregationObjectIndices::default(); + let x0 = field_from_hex_string::( + "0x031e97a575e9d05a107acb64952ecab75c020998797da7842ab5d6d1986846cf", + ) + .expect("x0 works"); + let y0 = field_from_hex_string::( + "0x178cbf4206471d722669117f9758a4c410db10a01750aebb5666547acf8bd5a4", + ) + .expect("y0 works"); + let x1 = field_from_hex_string::( + "0x0f94656a2ca489889939f81e9c74027fd51009034b3357f0e91b8a11e7842c38", + ) + .expect("x1 works"); + let y1 = field_from_hex_string::( + "0x1b52c2020d7464a0c80c0da527a08193fe27776f50224bd6fb128b46c1ddb67f", + ) + .expect("y1 works"); + + let mut agg_obj_indices_idx = 0; + let aggregation_object_fq_values = [x0, y0, x1, y1]; + + for val in aggregation_object_fq_values { + let mut x: BigUint = val.into(); + let x0 = &x & &mask; + x >>= NUM_LIMB_BITS; + let x1 = &x & &mask; + x >>= NUM_LIMB_BITS; + let x2 = &x & &mask; + x >>= NUM_LIMB_BITS; + let x3 = x; + + let val_limbs: [P::ScalarField; NUM_LIMBS] = [ + P::ScalarField::from(x0), + P::ScalarField::from(x1), + P::ScalarField::from(x2), + P::ScalarField::from(x3), + ]; + + for val in val_limbs { + let idx = self.add_variable(val); + agg_obj_indices[agg_obj_indices_idx] = idx; + agg_obj_indices_idx += 1; + } + } + agg_obj_indices + } } diff --git a/co-noir/co-ultrahonk/src/parse/types.rs b/co-noir/co-ultrahonk/src/parse/types.rs index e69de29b..7216b1a8 100644 --- a/co-noir/co-ultrahonk/src/parse/types.rs +++ b/co-noir/co-ultrahonk/src/parse/types.rs @@ -0,0 +1,46 @@ +use crate::CoUltraCircuitBuilder; +use ark_ec::pairing::Pairing; +use mpc_core::traits::PrimeFieldMpcProtocol; + +pub(crate) struct GateCounter { + collect_gates_per_opcode: bool, + prev_gate_count: usize, +} + +impl GateCounter { + pub(crate) fn new(collect_gates_per_opcode: bool) -> Self { + Self { + collect_gates_per_opcode, + prev_gate_count: 0, + } + } + + pub(crate) fn compute_diff( + &mut self, + builder: &CoUltraCircuitBuilder, + ) -> usize + where + T: PrimeFieldMpcProtocol, + { + if !self.collect_gates_per_opcode { + return 0; + } + let new_gate_count = builder.get_num_gates(); + let diff = new_gate_count - self.prev_gate_count; + self.prev_gate_count = new_gate_count; + diff + } + + pub(crate) fn track_diff( + &mut self, + builder: &CoUltraCircuitBuilder, + gates_per_opcode: &mut [usize], + opcode_index: usize, + ) where + T: PrimeFieldMpcProtocol, + { + if self.collect_gates_per_opcode { + gates_per_opcode[opcode_index] = self.compute_diff(builder); + } + } +} diff --git a/co-noir/ultrahonk/src/parse/acir_format.rs b/co-noir/ultrahonk/src/parse/acir_format.rs index 144645c3..fc4df924 100644 --- a/co-noir/ultrahonk/src/parse/acir_format.rs +++ b/co-noir/ultrahonk/src/parse/acir_format.rs @@ -24,7 +24,7 @@ pub struct AcirFormat { // hashing in its transcript, while we still want a prove that uses Keccak for its transcript in order // to be able to verify SNARKs on Ethereum. pub recursive: bool, - pub(crate) num_acir_opcodes: u32, + pub num_acir_opcodes: u32, // using PolyTripleConstraint = bb::poly_triple_; pub public_inputs: Vec, // std::vector logic_constraints; @@ -44,30 +44,30 @@ pub struct AcirFormat { // std::vector poseidon2_constraints; // std::vector multi_scalar_mul_constraints; // std::vector ec_add_constraints; - pub(crate) recursion_constraints: Vec, - pub(crate) honk_recursion_constraints: Vec, + pub recursion_constraints: Vec, + pub honk_recursion_constraints: Vec, // std::vector ivc_recursion_constraints; // std::vector bigint_from_le_bytes_constraints; // std::vector bigint_to_le_bytes_constraints; // std::vector bigint_operations; - pub(crate) assert_equalities: Vec>, + pub assert_equalities: Vec>, // A standard plonk arithmetic constraint, as defined in the poly_triple struct, consists of selector values // for q_M,q_L,q_R,q_O,q_C and indices of three variables taking the role of left, right and output wire // This could be a large vector, we don't expect the blackbox implementations to be so large. - pub(crate) poly_triple_constraints: Vec>, - pub(crate) quad_constraints: Vec>, - pub(crate) block_constraints: Vec>, + pub poly_triple_constraints: Vec>, + pub quad_constraints: Vec>, + pub block_constraints: Vec>, // Number of gates added to the circuit per original opcode. // Has length equal to num_acir_opcodes. - pub(crate) gates_per_opcode: Vec, + pub gates_per_opcode: Vec, // Set of constrained witnesses pub(crate) constrained_witness: HashSet, // Indices of the original opcode that originated each constraint in AcirFormat. - pub(crate) original_opcode_indices: AcirFormatOriginalOpcodeIndices, + pub original_opcode_indices: AcirFormatOriginalOpcodeIndices, } impl AcirFormat { diff --git a/co-noir/ultrahonk/src/parse/builder.rs b/co-noir/ultrahonk/src/parse/builder.rs index d7ec4180..b8e2f597 100644 --- a/co-noir/ultrahonk/src/parse/builder.rs +++ b/co-noir/ultrahonk/src/parse/builder.rs @@ -10,11 +10,11 @@ use super::{ }; use crate::{ get_msb64, + parse::field_from_hex_string, parse::{ plookup::{MultiTableId, Plookup}, types::{FieldCT, GateCounter, RomRecord, RomTable, NUM_WIRES}, }, - poseidon2::field_from_hex_string, }; use ark_ec::pairing::Pairing; use ark_ff::{One, Zero}; diff --git a/co-noir/ultrahonk/src/parse/mod.rs b/co-noir/ultrahonk/src/parse/mod.rs index cabe66bd..b406e978 100644 --- a/co-noir/ultrahonk/src/parse/mod.rs +++ b/co-noir/ultrahonk/src/parse/mod.rs @@ -7,9 +7,22 @@ pub mod types; use acir::{circuit::Circuit, native_types::WitnessStack, FieldElement}; use acir_format::AcirFormat; +use ark_ff::PrimeField; +use eyre::Error; use noirc_artifacts::program::ProgramArtifact; +use num_bigint::BigUint; +use num_traits::Num; use std::io; +pub fn field_from_hex_string(str: &str) -> Result { + let tmp = match str.strip_prefix("0x") { + Some(t) => BigUint::from_str_radix(t, 16), + None => BigUint::from_str_radix(str, 16), + }; + + Ok(tmp?.into()) +} + fn read_circuit_from_file(path: &str) -> io::Result> { let program = std::fs::read_to_string(path)?; let program_artifact = serde_json::from_str::(&program)?; diff --git a/co-noir/ultrahonk/src/parse/types.rs b/co-noir/ultrahonk/src/parse/types.rs index 144f593f..0b5e9376 100644 --- a/co-noir/ultrahonk/src/parse/types.rs +++ b/co-noir/ultrahonk/src/parse/types.rs @@ -14,7 +14,7 @@ use std::hash::{Hash, Hasher}; use std::ops::{Index, IndexMut}; #[derive(Default, PartialEq, Eq)] -pub(crate) struct PolyTriple { +pub struct PolyTriple { pub(crate) a: u32, pub(crate) b: u32, pub(crate) c: u32, @@ -50,7 +50,7 @@ pub(crate) struct AddQuad { } #[derive(Default, PartialEq, Eq)] -pub(crate) struct MulQuad { +pub struct MulQuad { pub(crate) a: u32, pub(crate) b: u32, pub(crate) c: u32, @@ -85,7 +85,7 @@ impl Default for BlockType { } #[derive(Default)] -pub(crate) struct BlockConstraint { +pub struct BlockConstraint { pub(crate) init: Vec>, pub(crate) trace: Vec>, pub(crate) type_: BlockType, @@ -93,7 +93,7 @@ pub(crate) struct BlockConstraint { } #[derive(Default)] -pub(crate) struct AcirFormatOriginalOpcodeIndices { +pub struct AcirFormatOriginalOpcodeIndices { // pub(crate)logic_constraints: Vec, // pub(crate)range_constraints: Vec, // pub(crate)aes128_constraints: Vec, @@ -118,10 +118,10 @@ pub(crate) struct AcirFormatOriginalOpcodeIndices { // pub(crate)bigint_to_le_bytes_constraints: Vec, // pub(crate)bigint_operations: Vec, pub(crate) assert_equalities: Vec, - pub(crate) poly_triple_constraints: Vec, - pub(crate) quad_constraints: Vec, + pub poly_triple_constraints: Vec, + pub quad_constraints: Vec, // Multiple opcode indices per block: - pub(crate) block_constraints: Vec>, + pub block_constraints: Vec>, } pub(crate) struct UltraTraceBlocks { @@ -337,7 +337,7 @@ impl GateCounter { } } -pub(crate) struct RecursionConstraint { +pub struct RecursionConstraint { // An aggregation state is represented by two G1 affine elements. Each G1 point has // two field element coordinates (x, y). Thus, four field elements key: Vec, diff --git a/co-noir/ultrahonk/src/poseidon2/mod.rs b/co-noir/ultrahonk/src/poseidon2/mod.rs index 758de303..d408b0e4 100644 --- a/co-noir/ultrahonk/src/poseidon2/mod.rs +++ b/co-noir/ultrahonk/src/poseidon2/mod.rs @@ -1,17 +1,3 @@ pub mod poseidon2_bn254; pub mod poseidon2_params; pub mod poseidon2_permutation; - -use ark_ff::PrimeField; -use eyre::Error; -use num_bigint::BigUint; -use num_traits::Num; - -pub(super) fn field_from_hex_string(str: &str) -> Result { - let tmp = match str.strip_prefix("0x") { - Some(t) => BigUint::from_str_radix(t, 16), - None => BigUint::from_str_radix(str, 16), - }; - - Ok(tmp?.into()) -} diff --git a/co-noir/ultrahonk/src/poseidon2/poseidon2_bn254.rs b/co-noir/ultrahonk/src/poseidon2/poseidon2_bn254.rs index 50c5637d..5d398642 100644 --- a/co-noir/ultrahonk/src/poseidon2/poseidon2_bn254.rs +++ b/co-noir/ultrahonk/src/poseidon2/poseidon2_bn254.rs @@ -1,5 +1,5 @@ use super::poseidon2_params::Poseidon2Params; -use crate::poseidon2::field_from_hex_string; +use crate::parse::field_from_hex_string; use lazy_static::lazy_static; use std::sync::Arc; diff --git a/co-noir/ultrahonk/src/poseidon2/poseidon2_permutation.rs b/co-noir/ultrahonk/src/poseidon2/poseidon2_permutation.rs index c08c4d85..6599e8a4 100644 --- a/co-noir/ultrahonk/src/poseidon2/poseidon2_permutation.rs +++ b/co-noir/ultrahonk/src/poseidon2/poseidon2_permutation.rs @@ -171,7 +171,9 @@ impl FieldHash for Poseidon2< #[cfg(test)] mod test { use super::*; - use crate::poseidon2::{field_from_hex_string, poseidon2_bn254::POSEIDON2_BN254_T4_PARAMS}; + use crate::{ + parse::field_from_hex_string, poseidon2::poseidon2_bn254::POSEIDON2_BN254_T4_PARAMS, + }; use rand::thread_rng; const TESTRUNS: usize = 10;