Skip to content

Commit

Permalink
Merge pull request #385 from orottier/feature/audioworkletnode
Browse files Browse the repository at this point in the history
AudioWorkletNode
  • Loading branch information
orottier authored Nov 18, 2023
2 parents 117b9ae + c513ad4 commit 65ce47f
Show file tree
Hide file tree
Showing 18 changed files with 987 additions and 160 deletions.
25 changes: 25 additions & 0 deletions benches/my_benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,16 @@ use paste::paste;
use web_audio_api::context::BaseAudioContext;
use web_audio_api::context::OfflineAudioContext;
use web_audio_api::node::{AudioNode, AudioScheduledSourceNode, PanningModelType};
use web_audio_api::worklet::{AudioWorkletNode, AudioWorkletNodeOptions};
use web_audio_api::AudioBuffer;

const SAMPLE_RATE: f32 = 48000.;
const DURATION: usize = 10;
const SAMPLES: usize = SAMPLE_RATE as usize * DURATION;
const SAMPLES_SHORT: usize = SAMPLE_RATE as usize; // only 1 second for heavy benchmarks

mod worklet;

/// Load an audio buffer and cache the result
///
/// We don't want to measure the IO and decoding in most of our benchmarks, so by using this static
Expand Down Expand Up @@ -61,6 +64,7 @@ pub fn bench_sine_gain() {
let ctx = OfflineAudioContext::new(2, black_box(SAMPLES), SAMPLE_RATE);
let mut osc = ctx.create_oscillator();
let gain = ctx.create_gain();
gain.gain().set_value(0.5); // avoid happy path

osc.connect(&gain);
gain.connect(&ctx.destination());
Expand Down Expand Up @@ -242,6 +246,26 @@ pub fn bench_hrtf_panners() {
assert_eq!(ctx.start_rendering_sync().length(), SAMPLES_SHORT);
}

pub fn bench_sine_gain_with_worklet() {
let ctx = OfflineAudioContext::new(2, black_box(SAMPLES), SAMPLE_RATE);
let mut osc = ctx.create_oscillator();

let options = AudioWorkletNodeOptions::default();
let gain_worklet = AudioWorkletNode::new::<worklet::GainProcessor>(&ctx, options);
gain_worklet
.parameters()
.get("gain")
.unwrap()
.set_value(0.5);

osc.connect(&gain_worklet);
gain_worklet.connect(&ctx.destination());

osc.start();

assert_eq!(ctx.start_rendering_sync().length(), SAMPLES);
}

macro_rules! iai_or_criterion {
( $( $func:ident ),+ $(,)* ) => {
#[cfg(feature = "iai")]
Expand Down Expand Up @@ -283,4 +307,5 @@ iai_or_criterion!(
bench_stereo_panning_automation,
bench_analyser_node,
bench_hrtf_panners,
bench_sine_gain_with_worklet,
);
53 changes: 53 additions & 0 deletions benches/worklet.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
use web_audio_api::render::RenderScope;
use web_audio_api::worklet::{AudioParamValues, AudioWorkletProcessor};
use web_audio_api::{AudioParamDescriptor, AutomationRate};

pub struct GainProcessor;

impl AudioWorkletProcessor for GainProcessor {
type ProcessorOptions = ();

fn constructor(_opts: Self::ProcessorOptions) -> Self {
Self {}
}

fn parameter_descriptors() -> Vec<AudioParamDescriptor>
where
Self: Sized,
{
vec![AudioParamDescriptor {
name: String::from("gain"),
min_value: f32::MIN,
max_value: f32::MAX,
default_value: 1.,
automation_rate: AutomationRate::A,
}]
}

fn process<'a, 'b>(
&mut self,
inputs: &'b [&'a [&'a [f32]]],
outputs: &'b mut [&'a mut [&'a mut [f32]]],
params: AudioParamValues<'b>,
_scope: &'b RenderScope,
) -> bool {
let gain = params.get("gain");
let io_zip = inputs[0].iter().zip(outputs[0].iter_mut());
if gain.len() == 1 {
let gain = gain[0];
io_zip.for_each(|(ic, oc)| {
for (is, os) in ic.iter().zip(oc.iter_mut()) {
*os = is * gain;
}
});
} else {
io_zip.for_each(|(ic, oc)| {
for ((is, os), g) in ic.iter().zip(oc.iter_mut()).zip(gain.iter().cycle()) {
*os = is * g;
}
});
}

false
}
}
192 changes: 192 additions & 0 deletions examples/audio_processor_trait.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,192 @@
use std::any::Any;

use rand::Rng;

use web_audio_api::context::{
AudioContext, AudioContextLatencyCategory, AudioContextOptions, AudioContextRegistration,
AudioParamId, BaseAudioContext,
};
use web_audio_api::node::{AudioNode, ChannelConfig};
use web_audio_api::render::{AudioParamValues, AudioProcessor, AudioRenderQuantum, RenderScope};
use web_audio_api::{AudioParam, AudioParamDescriptor, AutomationRate};

// Shocase how to create your own audio node
//
// `cargo run --release --example audio_processor_trait`
//
// If you are on Linux and use ALSA as audio backend backend, you might want to run
// the example with the `WEB_AUDIO_LATENCY=playback ` env variable which will
// increase the buffer size to 1024
//
// `WEB_AUDIO_LATENCY=playback cargo run --release --example audio_processor_trait`

#[derive(Debug, Copy, Clone, PartialEq, Eq)]
enum NoiseColor {
White, // zero mean, constant variance, uncorrelated in time
Red, // zero mean, constant variance, serially correlated in time
}

/// Audio source node emitting white noise (random samples)
struct WhiteNoiseNode {
/// handle to the audio context, required for all audio nodes
registration: AudioContextRegistration,
/// channel configuration (for up/down-mixing of inputs), required for all audio nodes
channel_config: ChannelConfig,
/// audio param controlling the volume (for educational purpose, use a GainNode otherwise)
amplitude: AudioParam,
}

// implement required methods for AudioNode trait
impl AudioNode for WhiteNoiseNode {
fn registration(&self) -> &AudioContextRegistration {
&self.registration
}

fn channel_config(&self) -> &ChannelConfig {
&self.channel_config
}

// source nodes take no input
fn number_of_inputs(&self) -> usize {
0
}

// emit a single output
fn number_of_outputs(&self) -> usize {
1
}
}

impl WhiteNoiseNode {
/// Construct a new WhiteNoiseNode
fn new<C: BaseAudioContext>(context: &C) -> Self {
context.register(move |registration| {
// setup the amplitude audio param
let param_opts = AudioParamDescriptor {
name: String::from("amplitude"),
min_value: 0.,
max_value: 1.,
default_value: 1.,
automation_rate: AutomationRate::A,
};
let (param, proc) = context.create_audio_param(param_opts, &registration);

// setup the processor, this will run in the render thread
let render = WhiteNoiseProcessor {
amplitude: proc,
color: NoiseColor::White,
};

// setup the audio node, this will live in the control thread (user facing)
let node = WhiteNoiseNode {
registration,
channel_config: ChannelConfig::default(),
amplitude: param,
};

(node, Box::new(render))
})
}

/// The Amplitude AudioParam
fn amplitude(&self) -> &AudioParam {
&self.amplitude
}

fn set_noise_color(&self, color: NoiseColor) {
self.registration.post_message(color);
}
}

struct WhiteNoiseProcessor {
amplitude: AudioParamId,
color: NoiseColor,
}

impl AudioProcessor for WhiteNoiseProcessor {
fn process(
&mut self,
_inputs: &[AudioRenderQuantum],
outputs: &mut [AudioRenderQuantum],
params: AudioParamValues<'_>,
_scope: &RenderScope,
) -> bool {
// single output node, with a stereo config
let output = &mut outputs[0];
output.set_number_of_channels(2);

// get the audio param values
let amplitude_values = params.get(&self.amplitude);

// edit the output buffer in place
output.channels_mut().iter_mut().for_each(|buf| {
let mut rng = rand::thread_rng();

// audio param buffer length is either 1 (k-rate, or when all a-rate samples are equal) or
// 128 (a-rate), so use `cycle` to be able to zip it with the output buffer
let amplitude_values_cycled = amplitude_values.iter().cycle();

let mut prev_sample = 0.; // TODO, inherit from previous render quantum

buf.iter_mut()
.zip(amplitude_values_cycled)
.for_each(|(output_sample, amplitude)| {
let mut value: f32 = rng.gen_range(-1.0..1.0);
if self.color == NoiseColor::Red {
// red noise samples correlate with their previous value
value = value * 0.2 + prev_sample * 0.8;
prev_sample = value;
}
*output_sample = *amplitude * value
})
});

true // source node will always be active
}

fn onmessage(&mut self, msg: &mut dyn Any) {
// handle incoming signals requesting for change of color
if let Some(color) = msg.downcast_ref::<NoiseColor>() {
self.color = *color;
return;
}

// ...add more message handlers here...

log::warn!("WhiteNoiseProcessor: Dropping incoming message {msg:?}");
}
}

fn main() {
env_logger::init();

let latency_hint = match std::env::var("WEB_AUDIO_LATENCY").as_deref() {
Ok("playback") => AudioContextLatencyCategory::Playback,
_ => AudioContextLatencyCategory::default(),
};

let context = AudioContext::new(AudioContextOptions {
latency_hint,
..AudioContextOptions::default()
});

// construct new node in this context
let noise = WhiteNoiseNode::new(&context);

// control amplitude
noise.amplitude().set_value(0.3); // start at low volume
noise.amplitude().set_value_at_time(1., 2.); // high volume after 2 secs

// connect to speakers
noise.connect(&context.destination());

// enjoy listening
println!("Low volume");
std::thread::sleep(std::time::Duration::from_secs(2));
println!("High volume");
std::thread::sleep(std::time::Duration::from_secs(2));

println!("Switch to red noise");
noise.set_noise_color(NoiseColor::Red);
std::thread::sleep(std::time::Duration::from_secs(4));
}
Loading

0 comments on commit 65ce47f

Please sign in to comment.