diff --git a/Cargo.toml b/Cargo.toml index 9a247f79..2cd3cb93 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ cubeb = { version = "0.10", optional = true } dasp_sample = "0.11" float_eq = "1.0" hound = "3.5" -hrtf = "0.8" +hrtf = "0.8.1" llq = "0.1.1" log = "0.4" num-complex = "0.4" diff --git a/benches/my_benchmark.rs b/benches/my_benchmark.rs index 07584c52..7d866fe7 100644 --- a/benches/my_benchmark.rs +++ b/benches/my_benchmark.rs @@ -2,8 +2,7 @@ use iai::black_box; use web_audio_api::context::BaseAudioContext; use web_audio_api::context::OfflineAudioContext; -use web_audio_api::node::AudioNode; -use web_audio_api::node::AudioScheduledSourceNode; +use web_audio_api::node::{AudioNode, AudioScheduledSourceNode, PanningModelType}; const SAMPLE_RATE: f32 = 48000.; const DURATION: usize = 10; @@ -197,6 +196,27 @@ pub fn bench_analyser_node() { assert_eq!(ctx.start_rendering_sync().length(), SAMPLES); } +pub fn bench_hrtf_panners() { + let ctx = OfflineAudioContext::new(2, black_box(SAMPLES), SAMPLE_RATE); + + let mut panner1 = ctx.create_panner(); + panner1.set_panning_model(PanningModelType::HRTF); + panner1.position_x().set_value(10.0); + panner1.connect(&ctx.destination()); + + let mut panner2 = ctx.create_panner(); + panner2.set_panning_model(PanningModelType::HRTF); + panner2.position_x().set_value(-10.0); + panner2.connect(&ctx.destination()); + + let mut osc = ctx.create_oscillator(); + osc.connect(&panner1); + osc.connect(&panner2); + osc.start(); + + assert_eq!(ctx.start_rendering_sync().length(), SAMPLES); +} + iai::main!( bench_ctor, bench_sine, @@ -209,4 +229,5 @@ iai::main!( bench_stereo_positional, bench_stereo_panning_automation, bench_analyser_node, + bench_hrtf_panners, ); diff --git a/src/context/concrete_base.rs b/src/context/concrete_base.rs index bc71657e..a5aed670 100644 --- a/src/context/concrete_base.rs +++ b/src/context/concrete_base.rs @@ -193,22 +193,28 @@ impl ConcreteBaseAudioContext { }; (listener_params, destination_channel_config) - }; // nodes will drop now, so base.inner has no copies anymore + }; // Nodes will drop now, so base.inner has no copies anymore let mut base = base; let inner_mut = Arc::get_mut(&mut base.inner).unwrap(); inner_mut.listener_params = Some(listener_params); inner_mut.destination_channel_config = destination_channel_config; - // validate if the hardcoded node IDs line up + // Validate if the hardcoded node IDs line up debug_assert_eq!( base.inner.node_id_inc.load(Ordering::Relaxed), LISTENER_PARAM_IDS.end, ); - // (?) only for online context + // For an online AudioContext, pre-create the HRTF-database for panner nodes + if !offline { + crate::node::load_hrtf_processor(sample_rate as u32); + } + + // Boot the event loop thread that handles the events spawned by the render thread + // (we don't do this for offline rendering because it makes little sense, the graph cannot + // be mutated once rendering has started anyway) if let Some(event_channel) = event_recv { - // init event loop event_loop.run(event_channel); } diff --git a/src/node/panner.rs b/src/node/panner.rs index da123987..7af0ea5a 100644 --- a/src/node/panner.rs +++ b/src/node/panner.rs @@ -1,5 +1,7 @@ use std::any::Any; +use std::collections::HashMap; use std::f32::consts::PI; +use std::sync::{Mutex, OnceLock}; use float_eq::float_eq; use hrtf::{HrirSphere, HrtfContext, HrtfProcessor, Vec3}; @@ -13,6 +15,31 @@ use super::{ AudioNode, ChannelConfig, ChannelConfigOptions, ChannelCountMode, ChannelInterpretation, }; +/// Load the HRTF processor for the given sample_rate +/// +/// The included data contains the impulse responses at 44100 Hertz, so it needs to be resampled +/// for other values (which can easily take 100s of milliseconds). Therefore cache the result (per +/// sample rate) in a global variable and clone it every time a new panner is created. +pub(crate) fn load_hrtf_processor(sample_rate: u32) -> (HrtfProcessor, usize) { + static INSTANCE: OnceLock>> = OnceLock::new(); + let cache = INSTANCE.get_or_init(|| Mutex::new(HashMap::new())); + let mut guard = cache.lock().unwrap(); + guard + .entry(sample_rate) + .or_insert_with(|| { + let resource = include_bytes!("../../resources/IRC_1003_C.bin"); + let hrir_sphere = HrirSphere::new(&resource[..], sample_rate).unwrap(); + let len = hrir_sphere.len(); + + let interpolation_steps = 1; // TODO? + let samples_per_step = RENDER_QUANTUM_SIZE / interpolation_steps; + let processor = HrtfProcessor::new(hrir_sphere, interpolation_steps, samples_per_step); + + (processor, len) + }) + .clone() +} + /// Spatialization algorithm used to position the audio in 3D space #[derive(Debug, Copy, Clone, PartialEq, Eq, Default)] pub enum PanningModelType { @@ -167,14 +194,7 @@ struct HrtfState { } impl HrtfState { - fn new(hrir_sphere: HrirSphere) -> Self { - let len = hrir_sphere.len(); - - let interpolation_steps = 1; - let samples_per_step = RENDER_QUANTUM_SIZE / interpolation_steps; - - let processor = HrtfProcessor::new(hrir_sphere, interpolation_steps, samples_per_step); - + fn new(processor: HrtfProcessor, len: usize) -> Self { Self { len, processor, @@ -549,10 +569,9 @@ impl PannerNode { let hrtf_option = match value { PanningModelType::EqualPower => None, PanningModelType::HRTF => { - let resource = include_bytes!("../../resources/IRC_1003_C.bin"); let sample_rate = self.context().sample_rate() as u32; - let hrir_sphere = HrirSphere::new(&resource[..], sample_rate).unwrap(); - Some(HrtfState::new(hrir_sphere)) + let (processor, len) = load_hrtf_processor(sample_rate); + Some(HrtfState::new(processor, len)) } };