From 12f8bd201f2949feb009ec115c15141d7054356d Mon Sep 17 00:00:00 2001 From: Will Banks Date: Fri, 12 Apr 2024 18:44:42 +0100 Subject: [PATCH] Rethought the DiskImageInfo struct to define carving operations for Searchlight, and added a carve log which is outputted. Skeleton code for processing a carve log to produce the carved files from the disk image --- Cargo.lock | 5 +- libsearchlight/Cargo.toml | 1 + libsearchlight/src/searchlight.rs | 333 +++++++++++--------- libsearchlight/src/searchlight/carve_log.rs | 48 +++ libsearchlight/src/searchlight/config.rs | 4 +- libsearchlight/src/utils/fragments_index.rs | 2 +- libsearchlight/src/validation.rs | 5 +- searchlight/src/args.rs | 10 +- searchlight/src/main.rs | 71 +++-- 9 files changed, 302 insertions(+), 177 deletions(-) create mode 100644 libsearchlight/src/searchlight/carve_log.rs diff --git a/Cargo.lock b/Cargo.lock index a93fe2e..503a069 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -562,6 +562,7 @@ dependencies = [ "memmap", "rio", "serde", + "serde_json", "strum", "tinyrand", "unicode-segmentation", @@ -900,9 +901,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", diff --git a/libsearchlight/Cargo.toml b/libsearchlight/Cargo.toml index b92fce0..a2b3e18 100644 --- a/libsearchlight/Cargo.toml +++ b/libsearchlight/Cargo.toml @@ -20,6 +20,7 @@ crc32fast = "1.3.2" unicode-segmentation = "1.11.0" strum = { version = "0.26.1", features = [ "derive" ] } flate2 = "1.0.28" # Need for decompressing deflate-compressed ZIP file data +serde_json = "1.0.115" # Conditionally depend on libc and rio on linux platforms [target.'cfg(target_os = "linux")'.dependencies] diff --git a/libsearchlight/src/searchlight.rs b/libsearchlight/src/searchlight.rs index 024418f..06ebdf7 100644 --- a/libsearchlight/src/searchlight.rs +++ b/libsearchlight/src/searchlight.rs @@ -1,221 +1,268 @@ pub mod config; +mod carve_log; use std::{arch::x86_64::{_mm_prefetch, _MM_HINT_T0}, collections::VecDeque, fs::{self, File}, io::{IoSlice, Write}}; use log::{debug, info, log_enabled, trace, Level}; use memmap::MmapOptions; -use crate::{error::Error, utils::file_len, search::{pairing::{self, pair, MatchPart}, search_common::AcTableBuilder, Search, SearchFuture, Searcher}, utils::{estimate_cluster_size, iter::ToGappedWindows}, validation::{DelegatingValidator, FileValidationType, FileValidator}}; +use crate::{error::Error, search::{pairing::{self, pair, MatchPart}, search_common::AcTableBuilder, Search, SearchFuture, Searcher}, searchlight::carve_log::CarveLog, utils::{estimate_cluster_size, file_len, iter::ToGappedWindows}, validation::{DelegatingValidator, FileValidationType, FileValidator}}; use self::config::SearchlightConfig; /// Default size of the blocks to load and search disk image data in pub const DEFAULT_BLOCK_SIZE: usize = 1024 * 1024; -pub struct DiskImageInfo { - pub path: String, - pub cluster_size: Option +pub enum CarveOperationInfo { + Image { + path: String, + config: SearchlightConfig, + cluster_size: Option, + skip_carving: bool, + }, + FromLog { + path: String, + } +} + +impl CarveOperationInfo { + pub fn path(&self) -> String { + match &self { + CarveOperationInfo::Image { path, .. } => path, + CarveOperationInfo::FromLog { path } => path, + }.to_string() + } } /// The main mediator of the library, this struct manages state pub struct Searchlight { - config: SearchlightConfig, - queue: VecDeque, + queue: VecDeque, } impl Searchlight { - /// Creates a new `Searchlight` instance with the specified config, validating it and returning an error if it - /// did not successfully validate - pub fn new(config: SearchlightConfig) -> Result { - match config.validate() { - Ok(_) => Ok(Searchlight { - config, - queue: VecDeque::new(), - }), - Err(e) => Err(e) + pub fn new() -> Self { + Searchlight { + queue: VecDeque::new() } } - /// Add a file to the queue of files to be processed - pub fn with_file(mut self, info: DiskImageInfo) -> Self { - self.add_file(info); + /// Add an operation to the queue of operations to be processed + pub fn with_operation(mut self, info: CarveOperationInfo) -> Self { + self.add_operation(info); self } - /// Add a file to the queue of files to be processed - pub fn add_file(&mut self, info: DiskImageInfo) { + /// Add an operation to the queue of operations to be processed + pub fn add_operation(&mut self, info: CarveOperationInfo) { self.queue.push_back(info); } /// Processes the file at the front of the queue, returning true if one was processed, and false if there were none to be processed. - /// Returns an error if one occurred. - pub fn process_file(&mut self, output_dir: impl AsRef) -> Result { + /// Returns an error if one occurred. Also returns the carve operation info + pub fn process_file(&mut self, output_dir: impl AsRef) -> (Option, Result) { if let Some(info) = self.queue.pop_front() { - let (mmap, file_len) = { - let mut file = File::open(&info.path)?; + let result = match info { + CarveOperationInfo::Image { ref path, ref config, cluster_size, skip_carving } => { + self.process_image_file(output_dir, &path, &config, cluster_size, skip_carving).map(|_| true) + } + CarveOperationInfo::FromLog { ref path } => { + self.process_log_file(output_dir, &path).map(|_| true) + } + }; - let file_len = file_len(&mut file)?; + ( + Some(info), + result + ) + } else { + (None, Ok(false)) + } + } - info!("Opened file {} (size: {} bytes)", &info.path, file_len); + pub fn process_image_file(&mut self, output_dir: impl AsRef, path: &str, config: &SearchlightConfig, cluster_size: Option, skip_carving: bool) -> Result<(), Error> { + let (mmap, file_len) = { + let mut file = File::open(&path)?; - ( - unsafe { MmapOptions::new().map(&file)? }, - file_len - ) - }; + let file_len = file_len(&mut file)?; - assert_eq!(file_len, mmap.len() as u64); + info!("Opened file {} (size: {} bytes)", &path, file_len); - let (mut searcher, max_pat_len) = { - let ac_table = AcTableBuilder::from_config(&self.config).build(); + ( + unsafe { MmapOptions::new().map(&file)? }, + file_len + ) + }; - ( - Search::new(ac_table.clone(), false), - ac_table.max_pat_len as usize - ) - }; + assert_eq!(file_len, mmap.len() as u64); + + let (mut searcher, max_pat_len) = { + let ac_table = AcTableBuilder::from_config(&config).build(); - let block_size = searcher.max_search_size().unwrap_or(DEFAULT_BLOCK_SIZE); + ( + Search::new(ac_table.clone(), false), + ac_table.max_pat_len as usize + ) + }; - assert!(max_pat_len < block_size); + let block_size = searcher.max_search_size().unwrap_or(DEFAULT_BLOCK_SIZE); - let num_blocks = { - let num_blocks = (file_len as usize - max_pat_len) / (block_size - max_pat_len); - if file_len % block_size as u64 != 0 { - num_blocks + 1 + assert!(max_pat_len < block_size); + + let num_blocks = { + let num_blocks = (file_len as usize - max_pat_len) / (block_size - max_pat_len); + if file_len % block_size as u64 != 0 { + num_blocks + 1 + } else { + num_blocks + } + }; + + info!("Starting search phase, searching {} bytes in {} blocks of (at most) {} bytes each", file_len, num_blocks, block_size); + + let mut matches = Vec::new(); + let mut result_fut: Option = None; + + // PERF: Perhaps use a by-block loading method when doing the sequential search and then go back to the memory map for the random-access carving. + // If possible, when using the GPU search impl, write directly into the vulkan-allocated host-side buffer to avoid a memcpy + // PERF: Queuing read operations with io_uring might have a more substantial performance improvement for HDDs, as it may be able to reduce the + // amount of disk rotations - but for a single file, would it be any better? Perhaps look into this + for (i, window) in mmap.gapped_windows(block_size, block_size - max_pat_len).enumerate() { + // This probably doesn't do a lot but there seems no reason to not have it + #[cfg(target_arch = "x86_64")] + unsafe { _mm_prefetch::<_MM_HINT_T0>(window.as_ptr() as *const i8) }; + + if let Some(prev_result) = result_fut.take() { + matches.append(&mut prev_result.wait().unwrap()); + } + let fut = { + if i == 0 { + searcher.search(window, 0).unwrap() } else { - num_blocks + searcher.search_next(window, (i * (block_size - max_pat_len)) as u64).unwrap() } }; + result_fut = Some(fut); - info!("Starting search phase, searching {} bytes in {} blocks of (at most) {} bytes each", file_len, num_blocks, block_size); + if log_enabled!(Level::Info) { + eprint!("\rProgress: {:.2}", (i as f32 / num_blocks as f32) * 100.0); + } + } - let mut matches = Vec::new(); - let mut result_fut: Option = None; + if log_enabled!(Level::Info) { + eprintln!("\rProgress: 100.00%"); + } - // PERF: Perhaps use a by-block loading method when doing the sequential search and then go back to the memory map for the random-access carving. - // If possible, when using the GPU search impl, write directly into the vulkan-allocated host-side buffer to avoid a memcpy - // PERF: Queuing read operations with io_uring might have a more substantial performance improvement for HDDs, as it may be able to reduce the - // amount of disk rotations - but for a single file, would it be any better? Perhaps look into this - for (i, window) in mmap.gapped_windows(block_size, block_size - max_pat_len).enumerate() { - // This probably doesn't do a lot but there seems no reason to not have it - #[cfg(target_arch = "x86_64")] - unsafe { _mm_prefetch::<_MM_HINT_T0>(window.as_ptr() as *const i8) }; + if let Some(result) = result_fut.take() { + matches.append(&mut result.wait().unwrap()); + } - if let Some(prev_result) = result_fut.take() { - matches.append(&mut prev_result.wait().unwrap()); - } - let fut = { - if i == 0 { - searcher.search(window, 0).unwrap() - } else { - searcher.search_next(window, (i * (block_size - max_pat_len)) as u64).unwrap() - } - }; - result_fut = Some(fut); + let num_matches = matches.len(); + + matches.sort_by_key(|m| m.start_idx); - if log_enabled!(Level::Info) { - eprint!("\rProgress: {:.2}", (i as f32 / num_blocks as f32) * 100.0); + let id_ftype_map = &pairing::preprocess_config(&config); + + // Get the user-supplied cluster size or estimate it based off of headers + // A None for cluster size here will indicate that the headers appear to be mostly not allocated on any usual cluster boundaries, or that + // has been passed in as the case + let cluster_size = cluster_size.unwrap_or_else(|| { + let est = estimate_cluster_size(matches.iter().filter(|m| { + if let Some((_, _, part)) = id_ftype_map.get(&m.id) { + *part == MatchPart::Header + } else { + assert!(false); + panic!() // assert!(false) is not detected as a control flow terminator/does not return ! but is more semantically correct } - } + })).unwrap_or(1); // A cluster size of 1 is effectively the same as not being clustered - if log_enabled!(Level::Info) { - eprintln!("\rProgress: 100.00%"); - } + info!("Calculated cluster size estimate: {est}"); - if let Some(result) = result_fut.take() { - matches.append(&mut result.wait().unwrap()); - } + est + }); - let num_matches = matches.len(); - - matches.sort_by_key(|m| m.start_idx); - - let id_ftype_map = &pairing::preprocess_config(&self.config); - - // Get the user-supplied cluster size or estimate it based off of headers - // A None for cluster size here will indicate that the headers appear to be mostly not allocated on any usual cluster boundaries, or that - // has been passed in as the case - let cluster_size = info.cluster_size.unwrap_or_else(|| { - let est = estimate_cluster_size(matches.iter().filter(|m| { - if let Some((_, _, part)) = id_ftype_map.get(&m.id) { - *part == MatchPart::Header - } else { - assert!(false); - panic!() // assert!(false) is not detected as a control flow terminator/does not return ! but is more semantically correct - } - })).unwrap_or(1); // A cluster size of 1 is effectively the same as not being clustered - - info!("Calculated cluster size estimate: {est}"); - - est - }); - - if log_enabled!(Level::Trace) { - for m in &matches { - if let Some((_, ftype, part)) = id_ftype_map.get(&m.id) { - trace!("Match at {}, type {} ({})", m.start_idx, ftype.extension.clone().unwrap_or("".to_string()), part); - } else { - assert!(false); - } + if log_enabled!(Level::Trace) { + for m in &matches { + if let Some((_, ftype, part)) = id_ftype_map.get(&m.id) { + trace!("Match at {}, type {} ({})", m.start_idx, ftype.extension.clone().unwrap_or("".to_string()), part); + } else { + assert!(false); } } + } - let mut consumable_matches = matches.clone(); - let match_pairs = pair(&mut consumable_matches, id_ftype_map, true); + let mut consumable_matches = matches.clone(); + let match_pairs = pair(&mut consumable_matches, id_ftype_map, true); - info!("Searching complete: Found {} potential files ({} individual matches)", match_pairs.len(), num_matches); + info!("Searching complete: Found {} potential files ({} individual matches)", match_pairs.len(), num_matches); - // Create output directory, erroring if it exists already - fs::create_dir(output_dir.as_ref())?; + // Create output directory, erroring if it exists already + fs::create_dir(output_dir.as_ref())?; - let validator = DelegatingValidator::new(); + let validator = DelegatingValidator::new(); - let mut num_carved_files = 0; + let mut num_carved_files = 0; - for pot_file in match_pairs { - let validation = validator.validate(&mmap, &pot_file, &matches, cluster_size as usize, &self.config); + let mut log = CarveLog::new(); - debug!("Potential file at {}-{} (type id {}) validated as: {}, with fragments {:?}", pot_file.start_idx, pot_file.end_idx + 1, pot_file.file_type.type_id, validation.validation_type, validation.fragments); + for pot_file in match_pairs { + let validation = validator.validate(&mmap, &pot_file, &matches, cluster_size as usize, &config); - if validation.validation_type != FileValidationType::Unrecognised { - let fragments = if validation.fragments.is_empty() { - vec![ (pot_file.start_idx..(pot_file.end_idx + 1)) ] - } else { - validation.fragments - }; + debug!("Potential file at {}-{} (type id {}) validated as: {}, with fragments {:?}", pot_file.start_idx, pot_file.end_idx + 1, pot_file.file_type.type_id, validation.validation_type, validation.fragments); - // Get the minimum index and maximum index of all fragments and designate them the start and end idxs - let start_idx = fragments.iter().min_by_key(|frag| frag.start).unwrap().start; // .map_or(pot_file.start_idx, |frag| frag.start); - let end_idx = fragments.iter().max_by_key(|frag| frag.end).unwrap().end; // .map_or(pot_file.end_idx + 1, |frag| frag.end); + if validation.validation_type != FileValidationType::Unrecognised { + let fragments = if validation.fragments.is_empty() { + vec![ (pot_file.start_idx..(pot_file.end_idx + 1)) ] + } else { + validation.fragments + }; + // Get the minimum index and maximum index of all fragments and designate them the start and end idxs + let start_idx = fragments.iter().min_by_key(|frag| frag.start).unwrap().start; // .map_or(pot_file.start_idx, |frag| frag.start); + let end_idx = fragments.iter().max_by_key(|frag| frag.end).unwrap().end; // .map_or(pot_file.end_idx + 1, |frag| frag.end); + + // Create the file with filename -. + let filename = format!("{}/{}/{}-{}.{}", + output_dir.as_ref(), + validation.validation_type, + start_idx, + end_idx, + pot_file.file_type.extension.clone().unwrap_or("".to_string()) + ); + + // Only write out the file content if the skip carving flag is false/not present + if !skip_carving { // Create validation directory if it doesn't exist fs::create_dir_all(format!("{}/{}", output_dir.as_ref(), validation.validation_type.to_string()))?; - // Create the file with filename -. - let mut file = File::create( - format!("{}/{}/{}-{}.{}", - output_dir.as_ref(), - validation.validation_type, - start_idx, - end_idx, - pot_file.file_type.extension.clone().unwrap_or("".to_string()) - ) - )?; + let mut file = File::create(filename.clone())?; file.write_vectored( &fragments.iter().map(|frag| IoSlice::new(&mmap[frag.start..frag.end])).collect::>() )?; - - num_carved_files += 1; } + + // Add entry to log + log.add_entry(pot_file.file_type.type_id, filename, validation.validation_type, fragments); + + num_carved_files += 1; } + } + if !skip_carving { info!("{} successfully validated files exported to {}", num_carved_files, output_dir.as_ref()); - - Ok(true) - } else { - Ok(false) } + + log.write(output_dir.as_ref())?; + + info!("Carve log written to {}/log.txt", output_dir.as_ref()); + + Ok(()) + } + + pub fn process_log_file(&mut self, output_dir: impl AsRef, path: &str) -> Result<(), Error> { + // TODO: Quite simply, deserialize the log file, read the image file indicated by the log file (need to add that field and other information), + // and carve the necessary data ranges for each file + + todo!(); } } \ No newline at end of file diff --git a/libsearchlight/src/searchlight/carve_log.rs b/libsearchlight/src/searchlight/carve_log.rs new file mode 100644 index 0000000..9e69d81 --- /dev/null +++ b/libsearchlight/src/searchlight/carve_log.rs @@ -0,0 +1,48 @@ +use std::{fs, io, path::PathBuf}; + +use serde::Serialize; + +use crate::validation::{FileValidationType, Fragment}; + +use super::config::FileTypeId; + +#[derive(Serialize)] +pub struct CarveLog { + files: Vec +} + +#[derive(Serialize)] +pub struct CarveLogEntry { + file_type_id: FileTypeId, + filename: String, + validation: FileValidationType, + fragments: Vec +} + +impl CarveLog { + pub fn new() -> Self { + CarveLog { + files: Vec::new() + } + } + + pub fn add_entry(&mut self, file_type_id: FileTypeId, filename: String, validation: FileValidationType, fragments: Vec) { + self.files.push(CarveLogEntry { + file_type_id, + filename, + validation, + fragments + }); + } + + pub fn write(&self, dir_path: &str) -> Result<(), io::Error> { + let mut buf = Vec::new(); + let formatter = serde_json::ser::PrettyFormatter::with_indent(b"\t"); + let mut ser = serde_json::Serializer::with_formatter(&mut buf, formatter); + self.serialize(&mut ser).unwrap(); // This shouldn't fail... right?? + + let filename: PathBuf = [ dir_path, "log.txt" ].into_iter().collect(); + + fs::write(filename, buf) + } +} \ No newline at end of file diff --git a/libsearchlight/src/searchlight/config.rs b/libsearchlight/src/searchlight/config.rs index cb3f659..10de820 100644 --- a/libsearchlight/src/searchlight/config.rs +++ b/libsearchlight/src/searchlight/config.rs @@ -1,7 +1,7 @@ use std::{collections::HashMap, fmt::Display, ops::Deref}; use log::error; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use crate::{error::Error, search::{match_id_hash_slice_u16, pairing::MatchPart}, utils::str_parse::parse_match_str}; @@ -75,7 +75,7 @@ impl Display for MatchString { } } -#[derive(Deserialize, Debug, PartialEq, Eq, Hash, strum::Display)] +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Hash, strum::Display, Clone, Copy)] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] pub enum FileTypeId { diff --git a/libsearchlight/src/utils/fragments_index.rs b/libsearchlight/src/utils/fragments_index.rs index 528c7d2..66261ed 100644 --- a/libsearchlight/src/utils/fragments_index.rs +++ b/libsearchlight/src/utils/fragments_index.rs @@ -139,7 +139,7 @@ mod test { let frags = [ 4..7, 10..15 ]; - let expected = [ ]; + let expected = [0u8; 0]; let frags_index = FragmentsIndex::new_sliced(&file_data, &frags, 4, 5); diff --git a/libsearchlight/src/validation.rs b/libsearchlight/src/validation.rs index 3ff7a85..7be32b2 100644 --- a/libsearchlight/src/validation.rs +++ b/libsearchlight/src/validation.rs @@ -4,6 +4,8 @@ pub mod zip; use std::{collections::HashMap, ops::Range}; +use serde::Serialize; + use crate::{search::{pairing::MatchPair, Match}, searchlight::config::{FileTypeId, SearchlightConfig}}; use self::{jpeg::JpegValidator, png::PngValidator, zip::ZipValidator}; @@ -36,7 +38,8 @@ impl Default for FileValidationInfo { } } -#[derive(Debug, PartialEq, strum::Display)] +#[derive(Serialize, Debug, PartialEq, strum::Display)] +#[serde(rename_all = "snake_case")] #[strum(serialize_all = "snake_case")] pub enum FileValidationType { /// Data is recognised as completely valid for the file format diff --git a/searchlight/src/args.rs b/searchlight/src/args.rs index 3183d76..ae3c0da 100644 --- a/searchlight/src/args.rs +++ b/searchlight/src/args.rs @@ -10,10 +10,10 @@ pub struct Args { // TODO: Add a "quick search" option to only look for headers pub verbose: clap_verbosity_flag::Verbosity, /// Path to the input disk image file to attempt to recover data from #[arg(short, long)] - pub input: String, + pub input: Option, /// The cluster size that the filesystem that is/was present in the disk image allocated files in, i.e. all valid non-embedded file headers will be found at multiples of this value. /// Alternatively, you can specify "unaligned" or "unknown" - #[arg(short = 'l', long, default_value = "unknown")] + #[arg(short, long, default_value = "unknown")] pub cluster_size: ClusterSizeArg, /// The output directory to save recovered file contents in. Defaults to a timestamped directory (startup time) in the current working directory #[arg(short, long)] @@ -22,8 +22,10 @@ pub struct Args { // TODO: Add a "quick search" option to only look for headers #[arg(short, long)] pub skip_carving: bool, /// Path to the TOML config file. Defaults to looking for "Searchlight.toml" in the current working directory - #[arg(short, long)] - pub config: Option + #[arg(short = 'f', long)] + pub config: Option, + /// If specified, will read the target log file and carve the files indicated in it. Doesn't require a config. If specified alongside input, will perform both carving operations separately + pub carve_log: Option, } #[derive(Debug, Clone)] diff --git a/searchlight/src/main.rs b/searchlight/src/main.rs index cd6c3ff..10cb699 100644 --- a/searchlight/src/main.rs +++ b/searchlight/src/main.rs @@ -4,8 +4,8 @@ use std::{fs, io::Write, time::SystemTime}; use args::Args; use clap::Parser; -use libsearchlight::searchlight::{DiskImageInfo, Searchlight}; -use log::{debug, error}; +use libsearchlight::searchlight::{CarveOperationInfo, Searchlight}; +use log::{debug, error, info}; #[cfg(not(target_pointer_width = "64"))] compile_error!("Target architecture is not 64-bit - This software is only supported on 64-bit platforms"); @@ -21,35 +21,58 @@ fn main() { }) .init(); - debug!("args: {:?}", args); + debug!("Args: {:?}", args); - args.config = Some(args.config.unwrap_or("Searchlight.toml".to_string())); + let mut searchlight = Searchlight::new(); - let config = match fs::read_to_string(args.config.as_ref().unwrap()) { - Ok(config_string) => match toml::from_str(&config_string) { - Ok(config) => config, + if let Some(input) = args.input { + args.config = Some(args.config.unwrap_or("Searchlight.toml".to_string())); + + let config = match fs::read_to_string(args.config.as_ref().unwrap()) { + Ok(config_string) => match toml::from_str(&config_string) { + Ok(config) => config, + Err(e) => { + error!("Error processing config file \"{}\": {}", args.config.unwrap(), e); + return; + } + }, Err(e) => { - error!("Error processing config file \"{}\": {}", args.config.unwrap(), e); + error!("Could not open config file \"{}\": {}", args.config.unwrap(), e); return; } - }, - Err(e) => { - error!("Could not open config file \"{}\": {}", args.config.unwrap(), e); - return; - } - }; + }; - debug!("config: {:?}", config); + debug!("Config: {:?}", config); - let mut searchlight = match Searchlight::new(config) { - Ok(searchlight) => searchlight.with_file(DiskImageInfo { path: args.input.clone(), cluster_size: args.cluster_size.as_option() }), - Err(e) => { - error!("Failed to initialise Searchlight: {}", e); - return; - } - }; + searchlight.add_operation(CarveOperationInfo::Image { + path: input, + config, + cluster_size: args.cluster_size.as_option(), + skip_carving: args.skip_carving + }); + } + + if let Some(log_path) = args.carve_log { + searchlight.add_operation(CarveOperationInfo::FromLog { + path: log_path + }) + } - if let Err(e) = searchlight.process_file(args.out_dir.unwrap_or(humantime::format_rfc3339(SystemTime::now()).to_string())) { - error!("Failed to process file \"{}\": {}", args.input, e); + loop { + match searchlight.process_file(args.out_dir.clone().unwrap_or(humantime::format_rfc3339(SystemTime::now()).to_string())) { + (Some(info), Ok(true)) => { + info!("Finished processing file \"{}\"", info.path()); + } + (_, Ok(false)) => { + info!("No files left to process, exiting"); + break; + } + (Some(info), Err(e)) => { + error!("Failed to process file \"{}\": {}", info.path(), e); + } + _ => { + unimplemented!() + } + } } } \ No newline at end of file