diff --git a/Cargo.lock b/Cargo.lock index 690da91..5707ac3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,15 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + [[package]] name = "anstream" version = "0.6.15" @@ -385,6 +394,7 @@ dependencies = [ "d4-oxide", "file_diff", "itertools", + "log", "nom", "num", "once_cell", @@ -404,8 +414,11 @@ name = "ddnnife_bin" version = "0.7.0" dependencies = [ "clap", + "csv", "ddnnife", + "log", "mimalloc", + "pretty_env_logger", ] [[package]] @@ -442,6 +455,19 @@ version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +[[package]] +name = "env_logger" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -608,6 +634,18 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "indexmap" version = "2.4.0" @@ -618,6 +656,17 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "is-terminal" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.1" @@ -939,6 +988,16 @@ dependencies = [ "termtree", ] +[[package]] +name = "pretty_env_logger" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c" +dependencies = [ + "env_logger", + "log", +] + [[package]] name = "proc-macro2" version = "1.0.86" @@ -1021,11 +1080,34 @@ dependencies = [ "bitflags", ] +[[package]] +name = "regex" +version = "1.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + [[package]] name = "regex-automata" version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "rustix" diff --git a/Cargo.toml b/Cargo.toml index 3c1aa63..2b0693e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,7 +10,9 @@ resolver = "2" [workspace.dependencies] clap = { version = "4.5", features = ["derive"] } +csv = "1.3" ddnnife = { path = "ddnnife" } +log = "0.4" mimalloc = "0.1" uniffi = { version = "0.28" } diff --git a/ddnnife/Cargo.toml b/ddnnife/Cargo.toml index bdd758a..f090111 100644 --- a/ddnnife/Cargo.toml +++ b/ddnnife/Cargo.toml @@ -17,9 +17,10 @@ crate-type = ["lib", "cdylib"] [dependencies] bitvec = "1.0" -csv = "1.3" +csv = { workspace = true } file_diff = "1.0.0" itertools = "0.13" +log = { workspace = true } nom = "7.1" num = "0.4" once_cell = "1.19" diff --git a/ddnnife/src/ddnnf/anomalies.rs b/ddnnife/src/ddnnf/anomalies.rs index 6caff85..992e88a 100644 --- a/ddnnife/src/ddnnf/anomalies.rs +++ b/ddnnife/src/ddnnf/anomalies.rs @@ -5,30 +5,23 @@ pub mod false_optional; pub mod sat; pub mod t_wise_sampling; -use std::{ - fs::File, - io::{LineWriter, Write}, -}; - use crate::Ddnnf; +use std::io::Write; impl Ddnnf { /// Takes a d-DNNF and writes the string representation into a file with the provided name - pub fn write_anomalies(&mut self, path_out: &str) -> std::io::Result<()> { - let file = File::create(path_out)?; - let mut file = LineWriter::with_capacity(1000, file); - + pub fn write_anomalies(&mut self, mut output: impl Write) -> std::io::Result<()> { // core/dead features let mut core = self.core.clone().into_iter().collect::>(); core.sort(); - file.write_all(format!("core: {core:?}\n").as_bytes())?; + output.write_all(format!("core: {core:?}\n").as_bytes())?; // false-optionals // atomic sets let mut atomic_sets = self.get_atomic_sets(None, &[], false); atomic_sets.sort_unstable(); - file.write_all(format!("atomic sets: {atomic_sets:?}\n").as_bytes())?; + output.write_all(format!("atomic sets: {atomic_sets:?}\n").as_bytes())?; Ok(()) } diff --git a/ddnnife/src/ddnnf/anomalies/t_wise_sampling.rs b/ddnnife/src/ddnnf/anomalies/t_wise_sampling.rs index ccee64b..97c5d41 100644 --- a/ddnnife/src/ddnnf/anomalies/t_wise_sampling.rs +++ b/ddnnife/src/ddnnf/anomalies/t_wise_sampling.rs @@ -7,16 +7,13 @@ mod sat_wrapper; mod t_iterator; mod t_wise_sampler; -use crate::util::format_vec; use crate::Ddnnf; pub use config::Config; pub use sample::Sample; use sample_merger::similarity_merger::SimilarityMerger; use sample_merger::zipping_merger::ZippingMerger; -use sampling_result::SamplingResult; +pub use sampling_result::SamplingResult; use sat_wrapper::SatWrapper; -use std::path::Path; -use std::{fs, io, iter}; use t_wise_sampler::TWiseSampler; #[cfg_attr(feature = "uniffi", uniffi::export)] @@ -37,33 +34,6 @@ impl Ddnnf { } } -pub fn save_sample_to_file(sampling_result: &SamplingResult, file_path: &str) -> io::Result<()> { - let file_path = Path::new(file_path); - if let Some(dir) = file_path.parent() { - fs::create_dir_all(dir)?; - } - let mut wtr = csv::Writer::from_path(file_path)?; - - match sampling_result { - /* - Writing "true" and "false" to the file does not really fit the format of the file but we - want to somehow distinguish between true and false sampling results. - True means that the feature model contains no variables and therefore an empty sample - covers all t-wise interactions. - False means that the feature model is void. - */ - SamplingResult::Empty => wtr.write_record(iter::once("true"))?, - SamplingResult::Void => wtr.write_record(iter::once("false"))?, - SamplingResult::ResultWithSample(sample) => { - for (index, config) in sample.iter().enumerate() { - wtr.write_record([index.to_string(), format_vec(config.get_literals().iter())])?; - } - } - } - - wtr.flush() -} - #[cfg(test)] mod test { use itertools::Itertools; diff --git a/ddnnife/src/ddnnf/counting/features.rs b/ddnnife/src/ddnnf/counting/features.rs index c4fd591..4622046 100644 --- a/ddnnife/src/ddnnf/counting/features.rs +++ b/ddnnife/src/ddnnf/counting/features.rs @@ -1,9 +1,20 @@ -use num::{BigRational, ToPrimitive}; -use std::error::Error; - use super::super::Ddnnf; +use num::{BigInt, BigRational, ToPrimitive}; +use std::error::Error; impl Ddnnf { + pub fn card_of_each_feature(&mut self) -> impl Iterator + '_ { + self.annotate_partial_derivatives(); + let rc = self.rc(); + (1_i32..self.number_of_variables as i32 + 1).map(move |variable| { + let cardinality = self.card_of_feature_with_partial_derivatives(variable); + let ratio = BigRational::from((cardinality.clone(), rc.clone())) + .to_f64() + .unwrap(); + (variable, cardinality, ratio) + }) + } + #[inline] /// Computes the cardinality of features for all features in a model. /// The results are saved in the file_path. The .csv ending always gets added to the user input. @@ -18,30 +29,29 @@ impl Ddnnf { /// // create a ddnnf /// // and run the queries /// let mut ddnnf: Ddnnf = build_ddnnf("./tests/data/small_ex_c2d.nnf", None); - /// ddnnf.card_of_each_feature("./tests/data/smt_out.csv") + /// ddnnf.card_of_each_feature_csv("./tests/data/smt_out.csv") /// .unwrap_or_default(); /// let _rm = fs::remove_file("./tests/data/smt_out.csv"); /// /// ``` - pub fn card_of_each_feature(&mut self, file_path: &str) -> Result<(), Box> { + pub fn card_of_each_feature_csv(&mut self, file_path: &str) -> Result<(), Box> { self.annotate_partial_derivatives(); // start the csv writer with the file_path let mut wtr = csv::Writer::from_path(file_path)?; - for work in 1_i32..self.number_of_variables as i32 + 1 { - let cardinality = self.card_of_feature_with_partial_derivatives(work); - wtr.write_record(vec![ - work.to_string(), - cardinality.to_string(), - format!( - "{:.10e}", - BigRational::from((cardinality, self.rc())) - .to_f64() - .expect("Failed to convert rational!") - ), - ])?; - } + self.card_of_each_feature() + .for_each(|(variable, cardinality, ratio)| { + wtr.write_record(vec![ + variable.to_string(), + cardinality.to_string(), + format!( + "{:.10e}", + ratio.to_f64().expect("Failed to convert rational!") + ), + ]) + .unwrap(); + }); Ok(()) } @@ -61,10 +71,14 @@ mod test { fn card_multi_queries() { let mut ddnnf: Ddnnf = build_ddnnf("./tests/data/VP9_d4.nnf", Some(42)); ddnnf.max_worker = 1; - ddnnf.card_of_each_feature("./tests/data/fcs.csv").unwrap(); + ddnnf + .card_of_each_feature_csv("./tests/data/fcs.csv") + .unwrap(); ddnnf.max_worker = 4; - ddnnf.card_of_each_feature("./tests/data/fcm.csv").unwrap(); + ddnnf + .card_of_each_feature_csv("./tests/data/fcm.csv") + .unwrap(); let mut is_single = File::open("./tests/data/fcs.csv").unwrap(); let mut is_multi = File::open("./tests/data/fcm.csv").unwrap(); @@ -93,7 +107,7 @@ mod test { let mut ddnnf: Ddnnf = build_ddnnf("./tests/data/VP9_d4.nnf", Some(42)); ddnnf.max_worker = 1; - ddnnf.card_of_each_feature(PD_FILE).unwrap(); + ddnnf.card_of_each_feature_csv(PD_FILE).unwrap(); let mut pd: File = File::open(PD_FILE).unwrap(); let mut should_be = File::open(SHOULD_FILE).unwrap(); diff --git a/ddnnife/src/ddnnf/heuristics.rs b/ddnnife/src/ddnnf/heuristics.rs index 4439c5b..cf4368c 100644 --- a/ddnnife/src/ddnnf/heuristics.rs +++ b/ddnnife/src/ddnnf/heuristics.rs @@ -1,5 +1,6 @@ use super::{node::NodeType::*, Ddnnf}; use crate::Node; +use log::info; impl Ddnnf { /// Computes and prints some heuristics including: @@ -31,7 +32,7 @@ impl Ddnnf { } let node_count: u64 = self.nodes.len() as u64; - println!( + info!( "\nThe d-DNNF consists out of the following node types:\n\ \t |-> {:?} out of {:?} are And nodes (≈{:.2}% of total)\n\ \t |-> {:?} out of {:?} are Or nodes (≈{:.2}% of total)\n\ @@ -83,7 +84,7 @@ impl Ddnnf { } let node_count: u64 = self.nodes.len() as u64; - println!( + info!( "\nThe d-DNNF has the following information regarding node count:\n\ \t |-> The overall count of child connections is {:?}\n\ \t |-> The overall node count is {:?}.\n\ @@ -130,7 +131,7 @@ impl Ddnnf { let s_x: f64 = (derivation / length as f64).sqrt(); - println!("\nThe d-DNNF has the following length attributes:\n\ + info!("\nThe d-DNNF has the following length attributes:\n\ \t |-> The shortest path is {:?} units long\n\ \t |-> The longest path is {:?} units long\n\ \t |-> The mean path is ≈{:.2} units long\n\ diff --git a/ddnnife/src/ddnnf/multiple_queries.rs b/ddnnife/src/ddnnf/multiple_queries.rs index 1aea7ec..01e7835 100644 --- a/ddnnife/src/ddnnf/multiple_queries.rs +++ b/ddnnife/src/ddnnf/multiple_queries.rs @@ -1,14 +1,6 @@ -use std::{ - error::Error, - fs::File, - io::{BufWriter, Write}, - sync::mpsc, - thread, -}; - -use workctl::WorkQueue; - use crate::{parser, Ddnnf}; +use std::{error::Error, io::Write, sync::mpsc, thread}; +use workctl::WorkQueue; impl Ddnnf { #[inline] @@ -19,12 +11,12 @@ impl Ddnnf { &mut self, operation: fn(&mut Ddnnf, &[i32]) -> T, path_in: &str, - path_out: &str, + output: impl Write, ) -> Result<(), Box> { if self.max_worker == 1 { - self.queries_single_thread(operation, path_in, path_out) + self.queries_single_thread(operation, path_in, output) } else { - self.queries_multi_thread(operation, path_in, path_out) + self.queries_multi_thread(operation, path_in, output) } } @@ -38,12 +30,8 @@ impl Ddnnf { &mut self, operation: fn(&mut Ddnnf, &[i32]) -> T, path_in: &str, - path_out: &str, + mut output: impl Write, ) -> Result<(), Box> { - // start the file writer with the file_path - let f = File::create(path_out).expect("Unable to create file"); - let mut wtr = BufWriter::new(f); - let work_queue: Vec<(usize, Vec)> = parser::parse_queries_file(path_in); for (_, work) in &work_queue { @@ -53,7 +41,7 @@ impl Ddnnf { .fold(String::new(), |acc, &num| acc + &num.to_string() + " "); features_str.pop(); let data = &format!("{},{}\n", features_str, cardinality.to_string()); - wtr.write_all(data.as_bytes())?; + output.write_all(data.as_bytes())?; } Ok(()) @@ -65,7 +53,7 @@ impl Ddnnf { &mut self, operation: fn(&mut Ddnnf, &[i32]) -> T, path_in: &str, - path_out: &str, + mut output: impl Write, ) -> Result<(), Box> { let work: Vec<(usize, Vec)> = parser::parse_queries_file(path_in); let mut queue = WorkQueue::with_capacity(work.len()); @@ -113,8 +101,6 @@ impl Ddnnf { } // start the file writer with the file_path - let f = File::create(path_out).expect("Unable to create file"); - let mut wtr = BufWriter::new(f); let mut results = Vec::new(); // Get completed work from the channel while there's work to be done. @@ -140,7 +126,7 @@ impl Ddnnf { .fold(String::new(), |acc, &num| acc + &num.to_string() + " "); features_str.pop(); let data = &format!("{},{}\n", features_str, result.to_string()); - wtr.write_all(data.as_bytes())?; + output.write_all(data.as_bytes())?; } // Just make sure that all the other threads are done. @@ -150,7 +136,7 @@ impl Ddnnf { // Flush everything into the file that is still in a buffer // Now we finished writing the csv file - wtr.flush()?; + output.flush()?; // If everything worked as expected, then we can return Ok(()) and we are happy :D Ok(()) @@ -159,14 +145,13 @@ impl Ddnnf { #[cfg(test)] mod test { - use std::{ - fs, - io::{BufRead, BufReader}, - }; - use file_diff::diff_files; use itertools::Itertools; use num::BigInt; + use std::{ + fs::{self, File}, + io::{BufRead, BufReader, BufWriter}, + }; use crate::parser::build_ddnnf; @@ -175,22 +160,21 @@ mod test { #[test] fn card_multi_queries() { let mut ddnnf: Ddnnf = build_ddnnf("./tests/data/VP9_d4.nnf", Some(42)); + + let output = + BufWriter::new(File::create("./tests/data/pcs.csv").expect("Unable to create file")); + ddnnf.max_worker = 1; ddnnf - .queries_multi_thread( - Ddnnf::execute_query, - "./tests/data/VP9.config", - "./tests/data/pcs.csv", - ) + .queries_multi_thread(Ddnnf::execute_query, "./tests/data/VP9.config", output) .unwrap(); + let output = + BufWriter::new(File::create("./tests/data/pcm.csv").expect("Unable to create file")); + ddnnf.max_worker = 4; ddnnf - .queries_multi_thread( - Ddnnf::execute_query, - "./tests/data/VP9.config", - "./tests/data/pcm.csv", - ) + .queries_multi_thread(Ddnnf::execute_query, "./tests/data/VP9.config", output) .unwrap(); let mut is_single = File::open("./tests/data/pcs.csv").unwrap(); @@ -215,12 +199,12 @@ mod test { #[test] fn sat_multi_queries() { let mut ddnnf: Ddnnf = build_ddnnf("./tests/data/VP9_d4.nnf", Some(42)); + + let output = + BufWriter::new(File::create("./tests/data/sat.csv").expect("Unable to create file")); + ddnnf - .operate_on_queries( - Ddnnf::sat, - "./tests/data/VP9.config", - "./tests/data/sat.csv", - ) + .operate_on_queries(Ddnnf::sat, "./tests/data/VP9.config", output) .unwrap(); let sat_results = File::open("./tests/data/sat.csv").unwrap(); @@ -254,28 +238,27 @@ mod test { let mut ddnnf: Ddnnf = build_ddnnf("./tests/data/VP9_d4.nnf", Some(42)); ddnnf.max_worker = 1; + let output = + BufWriter::new(File::create("./tests/data/pcs1.csv").expect("Unable to create file")); + ddnnf - .queries_single_thread( - Ddnnf::execute_query, - "./tests/data/VP9.config", - "./tests/data/pcs1.csv", - ) + .queries_single_thread(Ddnnf::execute_query, "./tests/data/VP9.config", output) .unwrap(); + + let output = + BufWriter::new(File::create("./tests/data/pcm1.csv").expect("Unable to create file")); + ddnnf - .queries_multi_thread( - Ddnnf::execute_query, - "./tests/data/VP9.config", - "./tests/data/pcm1.csv", - ) + .queries_multi_thread(Ddnnf::execute_query, "./tests/data/VP9.config", output) .unwrap(); ddnnf.max_worker = 4; + + let output = + BufWriter::new(File::create("./tests/data/pcm4.csv").expect("Unable to create file")); + ddnnf - .queries_multi_thread( - Ddnnf::execute_query, - "./tests/data/VP9.config", - "./tests/data/pcm4.csv", - ) + .queries_multi_thread(Ddnnf::execute_query, "./tests/data/VP9.config", output) .unwrap(); let mut is_single = File::open("./tests/data/pcs1.csv").unwrap(); diff --git a/ddnnife/src/parser.rs b/ddnnife/src/parser.rs index bd09f97..f797cdd 100644 --- a/ddnnife/src/parser.rs +++ b/ddnnife/src/parser.rs @@ -1,15 +1,14 @@ pub mod c2d_lexer; -use c2d_lexer::{lex_line_c2d, C2DToken, TId}; - pub mod d4_lexer; -use d4_lexer::{lex_line_d4, D4Token}; - pub mod from_cnf; pub mod persisting; use crate::ddnnf::{node::Node, node::NodeType, Ddnnf}; +use c2d_lexer::{lex_line_c2d, C2DToken, TId}; use core::panic; +use d4_lexer::{lex_line_d4, D4Token}; +use log::{error, warn}; use num::BigInt; use petgraph::{ graph::{EdgeIndex, NodeIndex}, @@ -145,10 +144,9 @@ pub fn distribute_building( } None => { // unknown standard or combination -> we assume d4 and choose total_features - // Bold, Yellow, Foreground Color (see https://gist.github.com/fnky/458719343aabd01cfb17a3a4f7296797) - println!("\x1b[1;38;5;226mWARNING: The first line of the file isn't a header and the option 'total_features' is not set. \ + warn!("The first line of the file isn't a header and the option 'total_features' is not set. \ Hence, we can't determine the number of variables and as a result, we might not be able to construct a valid ddnnf. \ - Nonetheless, we build a ddnnf with our limited information, but we discourage using ddnnife in this manner.\n\x1b[0m" + Nonetheless, we build a ddnnf with our limited information, but we discourage using ddnnife in this manner." ); build_d4_ddnnf(lines, None, clauses) } @@ -421,8 +419,7 @@ fn build_d4_ddnnf( TId::And => ddnnf_graph.remove_edge(n_edge).unwrap(), TId::Or => (), // should never happen _ => { - // Bold, Red, Foreground Color (see https://gist.github.com/fnky/458719343aabd01cfb17a3a4f7296797) - eprintln!("\x1b[1;38;5;196mERROR: Unexpected Nodetype while encoutering a True node. Only OR and AND nodes can have children. Aborting..."); + error!("Unexpected Nodetype while encoutering a True node. Only OR and AND nodes can have children. Aborting..."); process::exit(1); } }; @@ -433,7 +430,7 @@ fn build_d4_ddnnf( TId::Or => ddnnf_graph.remove_edge(n_edge).unwrap(), TId::And => delete_parent_and_chain(&mut ddnnf_graph, nx), _ => { - eprintln!("\x1b[1;38;5;196mERROR: Unexpected Nodetype while encoutering a False node. Only OR and AND nodes can have children. Aborting..."); + error!("Unexpected Nodetype while encoutering a False node. Only OR and AND nodes can have children. Aborting..."); process::exit(1); } }; @@ -648,7 +645,7 @@ pub fn open_file_savely(path: &str) -> File { Ok(x) => x, Err(err) => { // Bold, Red, Foreground Color (see https://gist.github.com/fnky/458719343aabd01cfb17a3a4f7296797) - eprintln!("\x1b[1;38;5;196mERROR: The following error code occured while trying to open the file \"{}\":\n{}\nAborting...", path, err); + error!("The following error code occured while trying to open the file {}:\n{}\nAborting...", path, err); process::exit(1); } } diff --git a/ddnnife/src/parser/persisting.rs b/ddnnife/src/parser/persisting.rs index d42596d..532ae37 100644 --- a/ddnnife/src/parser/persisting.rs +++ b/ddnnife/src/parser/persisting.rs @@ -84,7 +84,7 @@ fn deconstruct_children(mut str: String, children: &[usize]) -> String { pub fn write_as_mermaid_md( ddnnf: &mut Ddnnf, features: &[i32], - path_out: &str, + mut output: impl Write, ) -> std::io::Result<()> { for node in ddnnf.nodes.iter_mut() { node.temp.clone_from(&node.count); @@ -92,9 +92,6 @@ pub fn write_as_mermaid_md( ddnnf.operate_on_partial_config_marker(features, Ddnnf::calc_count_marked_node); - let file = File::create(path_out)?; - let mut lw = LineWriter::with_capacity(1000, file); - let config = format!( "```mermaid\n\t\ graph TD @@ -113,10 +110,10 @@ pub fn write_as_mermaid_md( classDef marked stroke:#d90000, stroke-width:4px\n\n", features ); - lw.write_all(config.as_bytes()).unwrap(); + output.write_all(config.as_bytes()).unwrap(); let marking = ddnnf.get_marked_nodes_clone(features); - lw.write_all(mermaidify_nodes(ddnnf, &marking).as_bytes())?; - lw.write_all(b"```").unwrap(); + output.write_all(mermaidify_nodes(ddnnf, &marking).as_bytes())?; + output.write_all(b"```").unwrap(); Ok(()) } diff --git a/ddnnife/tests/cardinalities.rs b/ddnnife/tests/cardinalities.rs index 668acaf..e50bcc9 100644 --- a/ddnnife/tests/cardinalities.rs +++ b/ddnnife/tests/cardinalities.rs @@ -2,14 +2,14 @@ use ddnnife::ddnnf::Ddnnf; use ddnnife::parser; use file_diff::diff_files; use serial_test::serial; -use std::fs; -use std::fs::File; +use std::fs::{self, File}; +use std::io::BufWriter; #[test] fn card_of_features_c2d() { let c2d_out = "./tests/data/auto1_c2d_fs.csv"; let mut ddnnf: Ddnnf = parser::build_ddnnf("./tests/data/auto1_c2d.nnf", None); - ddnnf.card_of_each_feature(c2d_out).unwrap_or_default(); + ddnnf.card_of_each_feature_csv(c2d_out).unwrap_or_default(); let mut should = File::open("./tests/data/auto1_sb_fs.csv").unwrap(); let mut is = File::open(c2d_out).unwrap(); @@ -23,7 +23,7 @@ fn card_of_features_c2d() { fn card_of_features_d4() { let d4_out = "./tests/data/auto1_d4_fs.csv"; let mut ddnnf: Ddnnf = parser::build_ddnnf("./tests/data/auto1_d4.nnf", Some(2513)); - ddnnf.card_of_each_feature(d4_out).unwrap_or_default(); + ddnnf.card_of_each_feature_csv(d4_out).unwrap_or_default(); let mut should = File::open("./tests/data/auto1_sb_fs.csv").unwrap(); let mut is = File::open(d4_out).unwrap(); @@ -39,7 +39,7 @@ fn card_of_features_d4() { fn card_of_features_cnf() { let cnf_out = "./tests/data/auto1_cnf_fs.csv"; let mut ddnnf: Ddnnf = parser::build_ddnnf("./tests/data/auto1.cnf", None); - ddnnf.card_of_each_feature(cnf_out).unwrap_or_default(); + ddnnf.card_of_each_feature_csv(cnf_out).unwrap_or_default(); let mut should = File::open("./tests/data/auto1_sb_fs.csv").unwrap(); let mut is = File::open(cnf_out).unwrap(); @@ -55,10 +55,12 @@ fn card_of_pc_c2d() { let sb_file_path = "./tests/data/auto1_sb_pc.csv"; let config_file = "./tests/data/auto1.config"; + let output = BufWriter::new(File::create(c2d_out).expect("Unable to create file")); + let mut ddnnf: Ddnnf = parser::build_ddnnf("tests/data/auto1_c2d.nnf", None); ddnnf.max_worker = 1; ddnnf - .operate_on_queries(Ddnnf::execute_query, config_file, c2d_out) + .operate_on_queries(Ddnnf::execute_query, config_file, output) .unwrap_or_default(); let mut should = File::open(sb_file_path).unwrap(); @@ -75,10 +77,12 @@ fn card_of_pc_d4() { let sb_file_path = "./tests/data/auto1_sb_pc.csv"; let config_file = "./tests/data/auto1.config"; + let output = BufWriter::new(File::create(d4_out).expect("Unable to create file")); + let mut ddnnf: Ddnnf = parser::build_ddnnf("tests/data/auto1_d4.nnf", Some(2513)); ddnnf.max_worker = 1; ddnnf - .operate_on_queries(Ddnnf::execute_query, config_file, d4_out) + .operate_on_queries(Ddnnf::execute_query, config_file, output) .unwrap_or_default(); let mut should = File::open(sb_file_path).unwrap(); @@ -97,10 +101,12 @@ fn card_of_pc_cnf() { let sb_file_path = "./tests/data/auto1_sb_pc.csv"; let config_file = "./tests/data/auto1.config"; + let output = BufWriter::new(File::create(cnf_out).expect("Unable to create file")); + let mut ddnnf: Ddnnf = parser::build_ddnnf("tests/data/auto1.cnf", None); ddnnf.max_worker = 1; ddnnf - .operate_on_queries(Ddnnf::execute_query, config_file, cnf_out) + .operate_on_queries(Ddnnf::execute_query, config_file, output) .unwrap_or_default(); let mut should = File::open(sb_file_path).unwrap(); diff --git a/ddnnife/tests/write_to_file.rs b/ddnnife/tests/write_to_file.rs index 94b18ea..390158c 100644 --- a/ddnnife/tests/write_to_file.rs +++ b/ddnnife/tests/write_to_file.rs @@ -9,7 +9,7 @@ fn card_of_features_normal_and_reloaded_test() { // default way to compute card of features with a d-DNNF in d4 standard let d4_out = "./tests/data/auto1_d4_fs.csv"; let mut ddnnf: Ddnnf = parser::build_ddnnf("./tests/data/auto1_d4.nnf", Some(2513)); - ddnnf.card_of_each_feature(d4_out).unwrap_or_default(); + ddnnf.card_of_each_feature_csv(d4_out).unwrap_or_default(); // save nnf in c2d format let saved_nnf = "./tests/data/auto1_d4_to_c2d.nnf"; @@ -18,7 +18,9 @@ fn card_of_features_normal_and_reloaded_test() { // compute the cardinality of features for the saved file let saved_out = "./tests/data/auto1_d4_to_c2d_fs.csv"; let mut ddnnf: Ddnnf = parser::build_ddnnf(saved_nnf, None); - ddnnf.card_of_each_feature(saved_out).unwrap_or_default(); + ddnnf + .card_of_each_feature_csv(saved_out) + .unwrap_or_default(); // compare the results let mut is_d4 = File::open(d4_out).unwrap(); diff --git a/ddnnife_bin/Cargo.toml b/ddnnife_bin/Cargo.toml index 7bd2c02..952d83a 100644 --- a/ddnnife_bin/Cargo.toml +++ b/ddnnife_bin/Cargo.toml @@ -17,5 +17,8 @@ deterministic = ["ddnnife/deterministic"] [dependencies] clap = { workspace = true } +csv = { workspace = true } ddnnife = { workspace = true } +log = { workspace = true } mimalloc = { workspace = true } +pretty_env_logger = "0.5" diff --git a/ddnnife_bin/src/main.rs b/ddnnife_bin/src/main.rs index 3f0946a..6208003 100644 --- a/ddnnife_bin/src/main.rs +++ b/ddnnife_bin/src/main.rs @@ -1,14 +1,14 @@ use clap::{Parser, Subcommand}; -use ddnnife::ddnnf::anomalies::t_wise_sampling::save_sample_to_file; +use ddnnife::ddnnf::anomalies::t_wise_sampling::SamplingResult; use ddnnife::ddnnf::Ddnnf; use ddnnife::parser::{ self as dparser, persisting::{write_as_mermaid_md, write_ddnnf_to_file}, }; use ddnnife::util::format_vec; +use log::info; use std::fs::File; -use std::io::{self, BufRead, BufReader, BufWriter, Write}; -use std::path::Path; +use std::io::{self, stdout, BufRead, BufReader, BufWriter, Write}; use std::time::Instant; #[global_allocator] @@ -17,19 +17,13 @@ static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; #[derive(Parser)] #[command(name = "ddnnife", version, arg_required_else_help(true))] struct Cli { - /// The path to either a dDNNF file in c2d or d4 format or a CNF file. The ddnnf has to be either fulfill the requirements - /// of the c2d format and be smooth or produced by the newest d4 compiler version to work properly! - #[arg(verbatim_doc_comment)] - file_path: Option, - - /// Allows to load the ddnnf via stdin. - /// Either 'total_features' has to be set or the file must start with a header of the form 'nnf n v e', - /// where v is the number of nodes, e is the number of edges, - /// and n is the number of variables over which the d-dnnf is defined. - /// Like the c2d and the d4 format specifies, each line must be defided by a new line. - /// Two following new lines end the reading from stdin. + /// Input path, stdin when not given. #[arg(short, long, verbatim_doc_comment)] - pipe_ddnnf_stdin: bool, + input: Option, + + /// Output path, stdout when not given. + #[arg(short, long, verbatim_doc_comment)] + output: Option, /// Choose one of the available #[clap(subcommand)] @@ -40,8 +34,7 @@ struct Cli { #[arg(short, long, verbatim_doc_comment)] total_features: Option, - /// Save the smooth ddnnf in the c2d format. Default output file is '{FILE_NAME}-saved.nnf'. - /// Alternatively, you can choose a name. The .nnf ending is added automatically. + /// Save the smooth ddnnf in the c2d format. #[arg(long, verbatim_doc_comment)] save_ddnnf: Option, @@ -53,7 +46,6 @@ struct Cli { #[derive(Debug, Clone, Subcommand)] enum Operation { /// Computes the cardinality of features for an assignment. - #[clap(short_flag = 's')] Count { /// The numbers of the features that should be included or excluded /// (positive number to include, negative to exclude). Can be one or multiple features. @@ -63,25 +55,14 @@ enum Operation { features: Option>, }, /// Computes the cardinality of a single feature for all features. Is single threaded. - #[clap(short_flag = 'c')] - CountFeatures { - /// Computes the cardinality of features for the feature model, - /// i.e. the cardinality iff we select one feature for all features. - /// Default output file is '{FILE_NAME}-features.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, - }, + CountFeatures, /// Computes the cardinality of multiple (partial) configurations. - #[clap(short_flag = 'q')] CountQueries { /// Path to a file that may contain multiple queries. /// Queries are split by new rows and consist of feature numbers ∈ ℤ that can be negated. /// Feature numbers are separated by a space. #[arg(verbatim_doc_comment)] queries_input_file: String, - /// Default output file is '{FILE_NAME}-queries.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, /// Specify how many threads should be used. /// Possible values are between 1 and 32. #[arg(short, long, value_parser = clap::value_parser!(u16).range(1..=32), default_value_t = 4, verbatim_doc_comment)] @@ -94,9 +75,6 @@ enum Operation { /// Feature numbers are separated by a space. #[arg(verbatim_doc_comment)] queries_input_file: String, - /// Default output file is '{FILE_NAME}-sat.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, /// Specify how many threads should be used. /// Possible values are between 1 and 32. #[arg(short, long, value_parser = clap::value_parser!(u16).range(1..=32), default_value_t = 4, verbatim_doc_comment)] @@ -117,15 +95,9 @@ enum Operation { /// Feature numbers are separated by a space. #[arg(verbatim_doc_comment)] queries_input_file: String, - /// Default output file is '{FILE_NAME}-stream.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, }, /// Computes t-wise samples TWise { - /// The default ouput file is '{FILE_NAME}-t-wise.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, /// The 't' in t-wise sampling refers to the degree of interaction /// or combination of input parameters to be considered in each test case. /// For example, 2-wise sampling (also known as pairwise testing) considers @@ -136,16 +108,9 @@ enum Operation { t: usize, }, /// Computes core, dead, false-optional features, and atomic sets. - Anomalies { - /// The default ouput file is '{FILE_NAME}-anomalies.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, - }, + Anomalies, /// Computes all atomic sets for the feature model (under assumptions and for candidates). AtomicSets { - /// The default ouput file is '{FILE_NAME}-atomic.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, /// The numbers of the features that should be included or excluded /// (positive number to include, negative to exclude). /// Can be one or multiple. A feature f has to be ∈ ℤ @@ -166,9 +131,6 @@ enum Operation { }, /// Generates uniform random sample Urs { - /// The default ouput file is '{FILE_NAME}-urs.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, /// The numbers of the features that should be included or excluded /// (positive number to include, negative to exclude). /// Can be one or multiple. A feature f has to be ∈ ℤ @@ -185,20 +147,10 @@ enum Operation { }, /// Computes the core and dead features. #[clap(verbatim_doc_comment)] - Core { - /// An leading '-' indicates that the feature is dead. - /// Contrast to that, if the '-' is missing the feature is core. - /// The default ouput file is '{FILE_NAME}-core.csv'. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, - }, + Core, /// Transforms the smooth d-DNNF into the mermaid.md format. #[clap(verbatim_doc_comment)] Mermaid { - /// Default output file is '{FILE_NAME}-mermaid.md'. - /// Alternatively, you can choose a name. The .md ending is added automatically. - #[arg(verbatim_doc_comment)] - custom_output_file: Option, /// The numbers of the features that should be included or excluded /// (positive number to include, negative to exclude). /// The nodes will be annotaded with their count regarding this query @@ -214,12 +166,16 @@ enum Operation { fn main() { let cli = Cli::parse(); + pretty_env_logger::init(); + // create the ddnnf based of the input file that is required let time = Instant::now(); - let mut ddnnf: Ddnnf; - if cli.pipe_ddnnf_stdin { - // read model line by line from stdin + let mut ddnnf = if let Some(path) = &cli.input { + // Read from file. + dparser::build_ddnnf(path, cli.total_features) + } else { + // Read from stdin. let mut input = Vec::new(); for line in io::stdin().lock().lines() { let read_line = line.unwrap(); @@ -228,31 +184,8 @@ fn main() { } input.push(read_line); } - ddnnf = dparser::distribute_building(input, cli.total_features, None); - } else { - let ddnnf_path = &cli.file_path.clone().unwrap(); - ddnnf = dparser::build_ddnnf(ddnnf_path, cli.total_features) - } - - // file path without last extension - let input_file_path = String::from( - Path::new(&cli.file_path.unwrap_or(String::from("ddnnf.nnf"))) - .with_extension("") - .file_name() - .unwrap() - .to_str() - .unwrap(), - ); - // Uses the supplied file path if there is any. - // If there is no prefix, we switch to the default fallback. - let construct_ouput_path = |maybe_prefix: &Option, operation: &str, file_type: &str| { - format!( - "{}-{}.{}", - maybe_prefix.clone().unwrap_or(input_file_path.clone()), - operation, - file_type - ) + dparser::distribute_building(input, cli.total_features, None) }; // print additional output, iff we are not in the stream mode @@ -260,231 +193,186 @@ fn main() { Some(Operation::Stream { .. }) => (), _ => { let elapsed_time = time.elapsed().as_secs_f32(); - println!( - "Ddnnf overall count: {:#?}\nElapsed time for parsing, and overall count in seconds: {:.3}s. \ - (This includes compiling to dDNNF if needed)", - ddnnf.rc(), + info!("Ddnnf overall count: {:#?}", ddnnf.rc()); + info!( + "Elapsed time for parsing, and overall count in seconds: {:.3}s. (This includes compiling to dDNNF if needed)", elapsed_time ); } } - if cli.operation.is_some() { - let operation = match cli.operation { - Some(op) => op, - None => todo!(), - }; - + if let Some(operation) = cli.operation { // change the number of threads used for cardinality of features and partial configurations match operation { - CountQueries { jobs, .. } | Stream { jobs } | Sat { jobs, .. } => { + Operation::CountQueries { jobs, .. } + | Operation::Stream { jobs } + | Operation::Sat { jobs, .. } => { ddnnf.max_worker = jobs; } _ => (), } - // compute the output file path to which the results (if any) will be written - let output_file_path: String = match &operation { - CountFeatures { - custom_output_file, .. - } => construct_ouput_path(custom_output_file, "features", "csv"), - CountQueries { - custom_output_file, .. - } => construct_ouput_path(custom_output_file, "queries", "csv"), - Sat { - custom_output_file, .. - } => construct_ouput_path(custom_output_file, "sat", "csv"), - StreamQueries { - custom_output_file, .. - } => construct_ouput_path(custom_output_file, "stream", "csv"), - TWise { - custom_output_file, - t, - } => construct_ouput_path(custom_output_file, format!("{}-wise", t).as_str(), "csv"), - Anomalies { custom_output_file } => { - construct_ouput_path(custom_output_file, "anomalies", "txt") - } - AtomicSets { - custom_output_file, .. - } => construct_ouput_path(custom_output_file, "atomic", "csv"), - Urs { - custom_output_file, .. - } => construct_ouput_path(custom_output_file, "urs", "csv"), - Core { custom_output_file } => construct_ouput_path(custom_output_file, "core", "csv"), - Mermaid { - custom_output_file, .. - } => construct_ouput_path(custom_output_file, "mermaid", "md"), - _ => String::new(), + let mut writer: Box = if let Some(path) = &cli.output { + Box::new(BufWriter::new( + File::create(path).expect("Unable to create file"), + )) + } else { + Box::new(BufWriter::new(stdout())) }; - use Operation::*; match &operation { - AtomicSets { - custom_output_file: _, + Operation::AtomicSets { assumptions, candidates, cross, } => { - let mut wtr = - BufWriter::new(File::create(&output_file_path).expect("Unable to create file")); for set in ddnnf.get_atomic_sets(candidates.clone(), assumptions, *cross) { - wtr.write_all(format_vec(set.iter()).as_bytes()).unwrap(); - wtr.write_all("\n".as_bytes()).unwrap(); + writer.write_all(format_vec(set.iter()).as_bytes()).unwrap(); + writer.write_all("\n".as_bytes()).unwrap(); } - wtr.flush().unwrap(); - println!( - "\nComputed the atomic sets and saved the results in {}.", - output_file_path - ); } - Urs { + Operation::Urs { assumptions, seed, number, - custom_output_file: _, } => { - let mut wtr = - BufWriter::new(File::create(&output_file_path).expect("Unable to create file")); for sample in ddnnf .uniform_random_sampling(assumptions, *number, *seed) .unwrap() { - wtr.write_all(format_vec(sample.iter()).as_bytes()).unwrap(); - wtr.write_all("\n".as_bytes()).unwrap(); + writer + .write_all(format_vec(sample.iter()).as_bytes()) + .unwrap(); + + writer.write_all("\n".as_bytes()).unwrap(); } - wtr.flush().unwrap(); - println!( - "\nComputed {} uniform random samples and saved the results in {}.", - number, output_file_path - ); } - TWise { - t, - custom_output_file: _, - } => { - let sample_result = ddnnf.sample_t_wise(*t); - save_sample_to_file(&sample_result, &output_file_path).unwrap(); - println!( - "\nComputed {}-wise samples and saved the results in {}.", - t, output_file_path - ); + Operation::TWise { t } => { + match ddnnf.sample_t_wise(*t) { + /* + Writing "true" and "false" to the file does not really fit the format of the file but we + want to somehow distinguish between true and false sampling results. + True means that the feature model contains no variables and therefore an empty sample + covers all t-wise interactions. + False means that the feature model is void. + */ + SamplingResult::Empty => writer.write_all("true".as_bytes()).unwrap(), + SamplingResult::Void => writer.write_all("false".as_bytes()).unwrap(), + SamplingResult::ResultWithSample(sample) => { + let mut csv_writer = csv::Writer::from_writer(writer); + + sample.iter().enumerate().for_each(|(index, config)| { + csv_writer + .serialize(&(index, format_vec(config.get_literals().iter()))) + .unwrap(); + }); + + writer = csv_writer.into_inner().unwrap(); + } + } } // computes the cardinality for the partial configuration that can be mentioned with parameters - Count { features } => { + Operation::Count { features } => { let features = features.clone().unwrap_or(vec![]); - println!( - "\nDdnnf count for query {:?} is: {:?}", - &features, - ddnnf.execute_query(&features) - ); + let count = ddnnf.execute_query(&features); + + writer.write_all(count.to_string().as_ref()).unwrap(); + let marked_nodes = ddnnf.get_marked_nodes_clone(&features); - println!("While computing the cardinality of the partial configuration {} out of the {} nodes were marked. \ + info!("While computing the cardinality of the partial configuration {} out of the {} nodes were marked. \ That are {:.2}%", marked_nodes.len(), ddnnf.nodes.len(), marked_nodes.len() as f64 / ddnnf.nodes.len() as f64 * 100.0); } // computes the cardinality of features and saves the results in a .csv file // the cardinalities are always sorted from lowest to highest (also for multiple threads) - CountFeatures { .. } => { + Operation::CountFeatures => { let time = Instant::now(); + + let mut csv_writer = csv::Writer::from_writer(writer); + ddnnf - .card_of_each_feature(&output_file_path) - .unwrap_or_default(); + .card_of_each_feature() + .for_each(|(variable, cardinality, ratio)| { + csv_writer + .write_record(vec![ + variable.to_string(), + cardinality.to_string(), + format!("{:.10e}", ratio), + ]) + .unwrap(); + }); + + writer = csv_writer.into_inner().unwrap(); + let elapsed_time = time.elapsed().as_secs_f64(); - println!( - "\nComputed the Cardinality of all features in {} and the results are saved in {}\n\ - It took {} seconds. That is an average of {} seconds per feature", - input_file_path, - output_file_path, + info!( + "Runtime: {} seconds. That is an average of {} seconds per feature.", elapsed_time, elapsed_time / ddnnf.number_of_variables as f64 ); } - CountQueries { + Operation::CountQueries { queries_input_file, .. } => { compute_queries( &mut ddnnf, queries_input_file, - &output_file_path, + &mut writer, Ddnnf::execute_query, ); } - Sat { + Operation::Sat { queries_input_file, .. } => { - compute_queries( - &mut ddnnf, - queries_input_file, - &output_file_path, - Ddnnf::sat, - ); + compute_queries(&mut ddnnf, queries_input_file, &mut writer, Ddnnf::sat); } - StreamQueries { + Operation::StreamQueries { queries_input_file, .. } => { - let mut wtr = - BufWriter::new(File::create(&output_file_path).expect("Unable to create file")); - let file = dparser::open_file_savely(queries_input_file); let queries = BufReader::new(file) .lines() .map(|line| line.expect("Unable to read line")); for query in queries { - wtr.write_all(ddnnf.handle_stream_msg(&query).as_bytes()) + writer + .write_all(ddnnf.handle_stream_msg(&query).as_bytes()) .unwrap(); - wtr.write_all("\n".as_bytes()).unwrap(); + writer.write_all("\n".as_bytes()).unwrap(); } - wtr.flush().unwrap(); - println!( - "\nComputed stream queries and saved the results in {}.", - output_file_path - ); + writer.flush().unwrap(); } // switch in the stream mode - Stream { .. } => { + Operation::Stream { .. } => { ddnnf.init_stream(); } // writes the anomalies of the d-DNNF to file // anomalies are: core, dead, false-optional features and atomic sets - Anomalies { - custom_output_file: _, - } => { - ddnnf.write_anomalies(&output_file_path).unwrap(); - println!("\nThe anomalies of the d-DNNF (i.e. core, dead, false-optional features, and atomic sets) are written into {}.", output_file_path); + Operation::Anomalies => { + ddnnf.write_anomalies(&mut writer).unwrap(); } - Core { - custom_output_file: _, - } => { + Operation::Core => { let mut core: Vec = ddnnf.core.clone().into_iter().collect(); core.sort_unstable_by_key(|k| k.abs()); - let mut wtr = - BufWriter::new(File::create(&output_file_path).expect("Unable to create file")); - wtr.write_all(format_vec(core.iter()).as_bytes()).unwrap(); - wtr.write_all("\n".as_bytes()).unwrap(); - println!( - "\nComputed the core / dead features and saved the results in {}.", - output_file_path - ); + writer + .write_all(format_vec(core.iter()).as_bytes()) + .unwrap(); } - Mermaid { - custom_output_file: _, - assumptions, - } => { - write_as_mermaid_md(&mut ddnnf, assumptions, &output_file_path).unwrap(); - println!("The smooth d-DNNF was transformed into mermaid markdown format and was written in {}.", output_file_path); + Operation::Mermaid { assumptions } => { + write_as_mermaid_md(&mut ddnnf, assumptions, &mut writer).unwrap(); } } + + writer.flush().unwrap(); } // writes the d-DNNF to file - if cli.save_ddnnf.is_some() { - let path = construct_ouput_path(&cli.save_ddnnf, "saved", "nnf"); + if let Some(path) = cli.save_ddnnf { write_ddnnf_to_file(&ddnnf, &path).unwrap(); - println!( - "\nThe smooth d-DNNF was written into the c2d format in {}.", + info!( + "The smooth d-DNNF was written into the c2d format in {}.", path ); } @@ -497,22 +385,21 @@ fn main() { fn compute_queries( ddnnf: &mut Ddnnf, - queries_file: &String, - output_file: &String, + queries_file: &str, + output: impl Write, operation: fn(&mut Ddnnf, query: &[i32]) -> T, ) { let time = Instant::now(); + ddnnf - .operate_on_queries(operation, queries_file, output_file) + .operate_on_queries(operation, queries_file, output) .unwrap_or_default(); + let elapsed_time = time.elapsed().as_secs_f64(); - println!( - "\nComputed values of all queries in {} and the results are saved in {}\n\ - It took {} seconds. That is an average of {} seconds per query", - queries_file, - output_file, + info!( + "Runtime: {} seconds. That is an average of {} seconds per query.", elapsed_time, - elapsed_time / dparser::parse_queries_file(queries_file.as_str()).len() as f64 + elapsed_time / dparser::parse_queries_file(queries_file).len() as f64 ); }