Skip to content

Commit

Permalink
chore: add benchmarks (#105)
Browse files Browse the repository at this point in the history
Co-authored-by: Khashayar Barooti <khashayar.baroot@epfl.ch>
  • Loading branch information
TomAFrench and Khashayar Barooti authored Jan 20, 2025
1 parent 35bf983 commit 2ca8b1a
Show file tree
Hide file tree
Showing 11 changed files with 164 additions and 16 deletions.
49 changes: 49 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
name: Benchmarks

on:
push:
branches:
- main
pull_request:

jobs:
test:
name: Benchmark library
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4

- name: Install Nargo
uses: noir-lang/noirup@v0.1.3
with:
toolchain: 1.0.0-beta.0

- name: Install bb
run: |
npm install -g bbup
bbup -nv 1.0.0-beta.0
sudo apt install libc++-dev
- name: Build Noir benchmark programs
run: nargo export

- name: Generate gates report
run: ./scripts/build-gates-report.sh
env:
BACKEND: /home/runner/.bb/bb

- name: Compare gates reports
id: gates_diff
uses: noir-lang/noir-gates-diff@1931aaaa848a1a009363d6115293f7b7fc72bb87
with:
report: gates_report.json
summaryQuantile: 0.9 # only display the 10% most significant circuit size diffs in the summary (defaults to 20%)

- name: Add gates diff to sticky comment
if: github.event_name == 'pull_request' || github.event_name == 'pull_request_target'
uses: marocchino/sticky-pull-request-comment@v2
with:
# delete the comment in case changes no longer impact circuit sizes
delete: ${{ !steps.gates_diff.outputs.markdown }}
message: ${{ steps.gates_diff.outputs.markdown }}
11 changes: 5 additions & 6 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,11 @@ jobs:
runs-on: ubuntu-latest
outputs:
noir_versions: ${{ steps.get_versions.outputs.versions }}

steps:
- name: Checkout sources
id: get_versions
run: |
run: |
# gh returns the Noir releases in reverse chronological order so we keep all releases published after the minimum supported version.
VERSIONS=$(gh release list -R noir-lang/noir --exclude-pre-releases --json tagName -q 'map(.tagName) | index(env.MINIMUM_NOIR_VERSION) as $index | if $index then .[0:$index+1] else [env.MINIMUM_NOIR_VERSION] end')
echo "versions=$VERSIONS"
Expand Down Expand Up @@ -58,13 +59,11 @@ jobs:
- name: Install Nargo
uses: noir-lang/noirup@v0.1.3
with:
toolchain: ${{ env.MINIMUM_NOIR_VERSION }}

toolchain: ${{env.MINIMUM_NOIR_VERSION}}
- name: Run formatter
run: nargo fmt --check


# This is a job which depends on all test jobs and reports the overall status.
# This is a job which depends on all test jobs and reports the overall status.
# This allows us to add/remove test jobs without having to update the required workflows.
tests-end:
name: Noir End
Expand All @@ -85,4 +84,4 @@ jobs:
fi
env:
# We treat any cancelled, skipped or failing jobs as a failure for the workflow as a whole.
FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }}
FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }}
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
target
.vscode/launch.json
export/*
gates_report.json
1 change: 0 additions & 1 deletion export/test_add_BN.json

This file was deleted.

1 change: 0 additions & 1 deletion info.sh

This file was deleted.

34 changes: 34 additions & 0 deletions scripts/build-gates-report.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#!/usr/bin/env bash
set -e

BACKEND=${BACKEND:-bb}

cd $(dirname "$0")/../

artifacts_path="./export"
artifacts=$(ls $artifacts_path)

echo "{\"programs\": [" > gates_report.json

# Bound for checking where to place last parentheses
NUM_ARTIFACTS=$(ls -1q "$artifacts_path" | wc -l)

ITER="1"
for artifact in $artifacts; do
ARTIFACT_NAME=$(basename "$artifact")

GATES_INFO=$($BACKEND gates -b "$artifacts_path/$artifact")
MAIN_FUNCTION_INFO=$(echo $GATES_INFO | jq -r '.functions[0] | .name = "main"')
echo "{\"package_name\": \"$ARTIFACT_NAME\", \"functions\": [$MAIN_FUNCTION_INFO]" >> gates_report.json

if (($ITER == $NUM_ARTIFACTS)); then
echo "}" >> gates_report.json
else
echo "}, " >> gates_report.json
fi

ITER=$(( $ITER + 1 ))
done

echo "]}" >> gates_report.json

63 changes: 63 additions & 0 deletions src/benchmarks/bignum_benchmarks.nr
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
use crate::bignum::BigNum;

use crate::fields::bn254Fq::BN254_Fq_Params;
use crate::fields::U256::U256Params;

// the types we will be benchmarking

type Fq = BigNum<3, 254, BN254_Fq_Params>;
type BN256 = BigNum<3, 257, U256Params>;

// type Fq
// type BN256
// type BN381
// type BN2048
#[export]
fn bench_add_Fq(a: Fq, b: Fq) -> Fq {
a + b
}

#[export]
fn bench_sub_Fq(a: Fq, b: Fq) -> Fq {
a - b
}

#[export]
fn bench_mul_Fq(a: Fq, b: Fq) -> Fq {
a * b
}

#[export]
fn bench_div_Fq(a: Fq, b: Fq) -> Fq {
a / b
}

#[export]
fn bench_add_BN256(a: BN256, b: BN256) -> BN256 {
a + b
}

#[export]
fn bench_sub_BN256(a: BN256, b: BN256) -> BN256 {
a - b
}

#[export]
fn bench_mul_BN256(a: BN256, b: BN256) -> BN256 {
a * b
}

#[export]
fn bench_div_BN256(a: BN256, b: BN256) -> BN256 {
a / b
}

#[export]
fn bench_from_field_Fq(a: Field) -> Fq {
Fq::from(a)
}

#[export]
fn bench_from_field_BN256(a: Field) -> BN256 {
BN256::from(a)
}
1 change: 1 addition & 0 deletions src/benchmarks/mod.nr
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
mod bignum_benchmarks;
14 changes: 7 additions & 7 deletions src/fns/constrained_ops.nr
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,6 @@ use crate::params::BigNumParams as P;
* umod
*/

/**
* @brief given an input seed, generate a pseudorandom BigNum value
* @details we hash the input seed into `modulus_bits * 2` bits of entropy,
* which is then reduced into a BigNum value
* We use a hash function that can be modelled as a random oracle
* This function *should* produce an output that is a uniformly randomly distributed value modulo BigNum::modulus()
**/
pub(crate) fn from_field<let N: u32, let MOD_BITS: u32>(
params: P<N, MOD_BITS>,
field: Field,
Expand Down Expand Up @@ -70,6 +63,13 @@ pub(crate) fn from_field<let N: u32, let MOD_BITS: u32>(
result
}

/**
* @brief given an input seed, generate a pseudorandom BigNum value
* @details we hash the input seed into `modulus_bits * 2` bits of entropy,
* which is then reduced into a BigNum value
* We use a hash function that can be modelled as a random oracle
* This function *should* produce an output that is a uniformly randomly distributed value modulo BigNum::modulus()
**/
pub(crate) fn derive_from_seed<let N: u32, let MOD_BITS: u32, let SeedBytes: u32>(
params: P<N, MOD_BITS>,
seed: [u8; SeedBytes],
Expand Down
3 changes: 3 additions & 0 deletions src/lib.nr
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,6 @@ pub use runtime_bignum::RuntimeBigNum;

// Tests
mod tests;

// Benchmarks
mod benchmarks;
1 change: 0 additions & 1 deletion src/tests/bignum_test.nr
Original file line number Diff line number Diff line change
Expand Up @@ -815,4 +815,3 @@ fn test_from_field_3_digits() {
};
assert(result == expected);
}

0 comments on commit 2ca8b1a

Please sign in to comment.