Skip to content

Commit

Permalink
Update TOML field names
Browse files Browse the repository at this point in the history
  • Loading branch information
jimouris committed Feb 6, 2024
1 parent 1b7df5a commit a84e3a9
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 44 deletions.
32 changes: 16 additions & 16 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,30 +29,30 @@ Metrics with Prio) uses a different config. The contents that are shared between
all the config files are shown below:

```toml
data_bits = 8 # Number of bits of each string.
hist_buckets = 2 # Number of each histogram buckets
data_bits = 8 # Number of bits of each string.
hist_buckets = 2 # Number of each histogram buckets

# [mode] # Mode of operation, one of:
# [mode] # Mode of operation, one of:
# mode.weighted_heavy_hitters.threshold = 0.01
# mode.attribute_based_metrics.num_attributes = 10
# mode = "plain_metrics"

server_0 = "0.0.0.0:8000" # The `IP:port` for server 0.
server_1 = "0.0.0.0:8001" # The `IP:port` for server 1.
server_0 = "0.0.0.0:8000" # The `IP:port` for server 0.
server_1 = "0.0.0.0:8001" # The `IP:port` for server 1.

add_key_batch_size = 1000 # Size of RPC requests for transmitting keys.
flp_batch_size = 100000 # Size of RPC requests for transmitting FLPs.
add_report_share_batch_size = 1000 # Size of RPC requests for transmitting keys.
query_flp_batch_size = 100000 # Size of RPC requests for transmitting FLPs.

unique_buckets = 1000 # Zipf parameter
zipf_exponent = 1.03 # Zipf exponent
zipf_unique_buckets = 1000 # Zipf parameter
zipf_exponent = 1.03 # Zipf exponent
```

### 1. Weighted Heavy Hitters
[weighted-heavy-hitters.toml](./src/configs/weighted-heavy-hitters.toml)
```bash
...
```toml
# ...
mode.weighted_heavy_hitters.threshold = 0.01
...
# ...
```

#### Weighted Heavy Hitters: Aggregators
Expand All @@ -79,9 +79,9 @@ cargo run --release --bin driver -- --config src/configs/weighted-heavy-hitters.
### 2. Attribute-Based Metrics
[attribute-based-metrics.toml](./src/configs/attribute-based-metrics.toml)
```toml
...
# ...
mode.attribute_based_metrics.num_attributes = 10
...
# ...
```

#### Attribute-Based Metrics: Aggregators
Expand All @@ -108,9 +108,9 @@ cargo run --release --bin driver -- --config src/configs/attribute-based-metrics
### 3. Plain Metrics with Prio
[plain-metrics.toml](./src/configs/plain-metrics.toml)
```toml
...
# ...
mode = "plain_metrics"
...
# ...
```

#### Plain Metrics with Prios: Aggregators
Expand Down
30 changes: 15 additions & 15 deletions src/bin/driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,9 @@ impl PlaintextReport {
}

fn generate_reports(cfg: &config::Config, mastic: &MasticHistogram) -> Vec<PlaintextReport> {
assert!(cfg.unique_buckets > 0);
assert!(cfg.zipf_unique_buckets > 0);

let reports = (0..cfg.unique_buckets)
let reports = (0..cfg.zipf_unique_buckets)
.into_par_iter()
.map(|_| {
let mut rng = thread_rng();
Expand Down Expand Up @@ -277,7 +277,7 @@ async fn add_reports(
malicious_percentage: f32,
) -> io::Result<()> {
let mut rng = rand::thread_rng();
let zipf = zipf::ZipfDistribution::new(cfg.unique_buckets, cfg.zipf_exponent).unwrap();
let zipf = zipf::ZipfDistribution::new(cfg.zipf_unique_buckets, cfg.zipf_exponent).unwrap();

let mut report_shares_0 = Vec::with_capacity(num_clients);
let mut report_shares_1 = Vec::with_capacity(num_clients);
Expand Down Expand Up @@ -344,11 +344,11 @@ async fn run_flp_queries(
client_1: &CollectorClient,
num_clients: usize,
) -> io::Result<()> {
// Receive FLP query responses in chunks of cfg.flp_batch_size to avoid having huge RPC messages.
// Receive FLP query responses in chunks of cfg.query_flp_batch_size to avoid having huge RPC messages.
let mut keep = vec![];
let mut start = 0;
while start < num_clients {
let end = std::cmp::min(num_clients, start + cfg.flp_batch_size);
let end = std::cmp::min(num_clients, start + cfg.query_flp_batch_size);

let req = RunFlpQueriesRequest { start, end };
let resp_0 = client_0.run_flp_queries(long_context(), req.clone());
Expand All @@ -372,7 +372,7 @@ async fn run_flp_queries(
.collect::<Vec<_>>(),
);

start += cfg.flp_batch_size;
start += cfg.query_flp_batch_size;
}

// Tree prune
Expand Down Expand Up @@ -493,7 +493,7 @@ async fn run_level_last(
// Receive counters in chunks to avoid having huge RPC messages.
let mut start = 0;
while start < num_clients {
let end = std::cmp::min(num_clients, start + cfg.flp_batch_size);
let end = std::cmp::min(num_clients, start + cfg.query_flp_batch_size);

let req = GetProofsRequest { start, end };
let resp_0 = client_0.get_proofs(long_context(), req.clone());
Expand All @@ -508,7 +508,7 @@ async fn run_level_last(
.all(|(&h0, &h1)| h0 == h1);
assert!(verified);

start += cfg.flp_batch_size;
start += cfg.query_flp_batch_size;
}

// Tree prune
Expand Down Expand Up @@ -544,8 +544,8 @@ async fn run_attribute_based_metrics(
attributes: &[Vec<bool>],
num_clients: usize,
) -> io::Result<()> {
for start in (0..num_clients).step_by(cfg.flp_batch_size) {
let end = std::cmp::min(num_clients, start + cfg.flp_batch_size);
for start in (0..num_clients).step_by(cfg.query_flp_batch_size) {
let end = std::cmp::min(num_clients, start + cfg.query_flp_batch_size);
let req = AttributeBasedMetricsValidateRequest {
attributes: attributes.to_vec(),
start,
Expand Down Expand Up @@ -621,8 +621,8 @@ async fn run_plain_metrics(
let chunk_length = histogram_chunk_length(mastic.input_len(), Mode::PlainMetrics);
let prio3 = Prio3::new_histogram(2, mastic.input_len(), chunk_length).unwrap();

for start in (0..num_clients).step_by(cfg.flp_batch_size) {
let end = std::cmp::min(num_clients, start + cfg.flp_batch_size);
for start in (0..num_clients).step_by(cfg.query_flp_batch_size) {
let end = std::cmp::min(num_clients, start + cfg.query_flp_batch_size);
let req = PlainMetricsValidateRequest { start, end };

// For each report, each aggregator evaluates the VIDPF on each of the attributes and returns
Expand Down Expand Up @@ -728,7 +728,7 @@ async fn main() -> io::Result<()> {
let mut responses = vec![];

for _ in 0..reqs_in_flight {
let this_batch = std::cmp::min(left_to_go, cfg.add_key_batch_size);
let this_batch = std::cmp::min(left_to_go, cfg.add_report_share_batch_size);
left_to_go -= this_batch;

if this_batch > 0 {
Expand Down Expand Up @@ -771,8 +771,8 @@ async fn main() -> io::Result<()> {
// Synthesize a set of attributes.
let attributes = {
let mut rng = rand::thread_rng();
let zipf =
zipf::ZipfDistribution::new(cfg.unique_buckets, cfg.zipf_exponent).unwrap();
let zipf = zipf::ZipfDistribution::new(cfg.zipf_unique_buckets, cfg.zipf_exponent)
.unwrap();
let mut unique_inputs = HashSet::with_capacity(num_attributes);
for _ in 0..num_attributes {
let client_index = zipf.sample(&mut rng);
Expand Down
8 changes: 4 additions & 4 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,13 @@ pub struct Config {

/// The servers will output the collection of strings that more than a `threshold` of clients
/// hold.
pub add_key_batch_size: usize,
pub add_report_share_batch_size: usize,

/// Similar to `add_key_batch_size` but with a greater threshold.
pub flp_batch_size: usize,
/// Similar to `add_report_share_batch_size` but with a greater threshold.
pub query_flp_batch_size: usize,

/// Zipf parameter: Number of distinct strings.
pub unique_buckets: usize,
pub zipf_unique_buckets: usize,

/// Mode of operation in which to test Mastic.
pub mode: Mode,
Expand Down
6 changes: 3 additions & 3 deletions src/configs/attribute-based-metrics.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ hist_buckets = 4
server_0 = "0.0.0.0:8000"
server_1 = "0.0.0.0:8001"

add_key_batch_size = 1000
flp_batch_size = 100000
unique_buckets = 1000
add_report_share_batch_size = 1000
query_flp_batch_size = 100000
zipf_unique_buckets = 1000
zipf_exponent = 1.03
6 changes: 3 additions & 3 deletions src/configs/plain-metrics.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ hist_buckets = 4
server_0 = "0.0.0.0:8000"
server_1 = "0.0.0.0:8001"

add_key_batch_size = 1000
flp_batch_size = 100000
unique_buckets = 1000
add_report_share_batch_size = 1000
query_flp_batch_size = 100000
zipf_unique_buckets = 1000
zipf_exponent = 1.03
6 changes: 3 additions & 3 deletions src/configs/weighted-heavy-hitters.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ hist_buckets = 4
server_0 = "0.0.0.0:8000"
server_1 = "0.0.0.0:8001"

add_key_batch_size = 1000
flp_batch_size = 100000
unique_buckets = 1000
add_report_share_batch_size = 1000
query_flp_batch_size = 100000
zipf_unique_buckets = 1000
zipf_exponent = 1.03

0 comments on commit a84e3a9

Please sign in to comment.