gui: Add terminator packet and 200ms voice hold for VAD
Implements proper voice activity detection with: - 200ms hold period after audio drops below threshold to prevent choppy cutoffs - Terminator packet (end_bit=true) when speech ends to signal stream completion - TransmitState enum to track transmission state across frames This ensures other Mumble clients receive proper end-of-speech signaling for clean audio termination and correct "talking" indicator behavior. Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
+52
-17
@@ -1,7 +1,9 @@
|
||||
use crate::app::Command;
|
||||
use crate::effects::{AudioProcessor, AudioProcessorSender};
|
||||
use crate::effects::{AudioProcessor, AudioProcessorSender, TransmitState};
|
||||
use color_eyre::eyre::{bail, eyre, Error};
|
||||
use crossbeam::atomic::AtomicCell;
|
||||
use dioxus::prelude::*;
|
||||
use std::sync::Arc;
|
||||
use futures::{AsyncRead, AsyncWrite};
|
||||
use gloo_timers::future::TimeoutFuture;
|
||||
use js_sys::Float32Array;
|
||||
@@ -118,7 +120,7 @@ impl AudioSystem {
|
||||
self.processors.store(Some(processor))
|
||||
}
|
||||
|
||||
pub fn start_recording(&mut self, each: impl FnMut(Vec<u8>) + 'static) -> Result<(), Error> {
|
||||
pub fn start_recording(&mut self, each: impl FnMut(Vec<u8>, bool) + 'static) -> Result<(), Error> {
|
||||
let audio_context_worklet = self.webctx.clone();
|
||||
let processors = self.processors.clone();
|
||||
spawn(async move {
|
||||
@@ -222,22 +224,24 @@ impl PromiseExt for Promise {
|
||||
}
|
||||
}
|
||||
|
||||
fn process_audio(frame: &JsValue, processor: &mut AudioProcessor) {
|
||||
fn process_audio(frame: &JsValue, processor: &mut AudioProcessor) -> TransmitState {
|
||||
let Ok(samples) = Reflect::get(&frame, &"data".into()) else {
|
||||
return;
|
||||
return TransmitState::Silent;
|
||||
};
|
||||
let Ok(samples) = samples.dyn_into::<Float32Array>() else {
|
||||
return;
|
||||
return TransmitState::Silent;
|
||||
};
|
||||
let input = samples.to_vec();
|
||||
let mut output = Vec::with_capacity(input.len());
|
||||
processor.process(&input, 1, &mut output);
|
||||
let state = processor.process(&input, 1, &mut output);
|
||||
samples.copy_from(&output);
|
||||
|
||||
state
|
||||
}
|
||||
|
||||
async fn run_encoder_worklet(
|
||||
audio_context: &AudioContext,
|
||||
mut each: impl FnMut(Vec<u8>) + 'static,
|
||||
mut each: impl FnMut(Vec<u8>, bool) + 'static,
|
||||
processors: AudioProcessorSender,
|
||||
) -> Result<AudioWorkletNode, Error> {
|
||||
let constraints = MediaStreamConstraints::new();
|
||||
@@ -262,12 +266,19 @@ async fn run_encoder_worklet(
|
||||
let encoder_error: Closure<dyn FnMut(JsValue)> =
|
||||
Closure::new(|e| error!("error encoding audio {:?}", e));
|
||||
|
||||
// Shared state to signal terminator between onmessage and output closures
|
||||
// The output closure runs asynchronously after encoding completes
|
||||
let pending_terminator = Arc::new(AtomicCell::new(false));
|
||||
let pending_terminator_output = pending_terminator.clone();
|
||||
|
||||
// This knows what MediaStreamTrackGenerator to use as it closes around it
|
||||
let output: Closure<dyn FnMut(EncodedAudioChunk)> =
|
||||
Closure::new(move |audio_data: EncodedAudioChunk| {
|
||||
let mut array = vec![0u8; audio_data.byte_length() as usize];
|
||||
audio_data.copy_to_with_u8_slice(&mut array);
|
||||
each(array);
|
||||
// Check if this frame was marked as a terminator
|
||||
let is_terminator = pending_terminator_output.swap(false);
|
||||
each(array, is_terminator);
|
||||
});
|
||||
|
||||
let audio_encoder = AudioEncoder::new(&AudioEncoderInit::new(
|
||||
@@ -294,17 +305,41 @@ async fn run_encoder_worklet(
|
||||
}
|
||||
|
||||
let frame = event.data();
|
||||
process_audio(&frame, &mut current_processor);
|
||||
let state = process_audio(&frame, &mut current_processor);
|
||||
|
||||
match AudioData::new(frame.unchecked_ref()) {
|
||||
Ok(data) => {
|
||||
let _ = audio_encoder.encode(&data);
|
||||
match state {
|
||||
TransmitState::Silent => {
|
||||
// Don't encode or send anything
|
||||
return;
|
||||
}
|
||||
Err(err) => {
|
||||
error!(
|
||||
"error creating AudioData object {:?} during event {:?}",
|
||||
err, event,
|
||||
);
|
||||
TransmitState::Transmitting => {
|
||||
// Normal transmission
|
||||
match AudioData::new(frame.unchecked_ref()) {
|
||||
Ok(data) => {
|
||||
let _ = audio_encoder.encode(&data);
|
||||
}
|
||||
Err(err) => {
|
||||
error!(
|
||||
"error creating AudioData object {:?} during event {:?}",
|
||||
err, event,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
TransmitState::Terminator => {
|
||||
// Mark this as a terminator before encoding
|
||||
pending_terminator.store(true);
|
||||
match AudioData::new(frame.unchecked_ref()) {
|
||||
Ok(data) => {
|
||||
let _ = audio_encoder.encode(&data);
|
||||
}
|
||||
Err(err) => {
|
||||
error!(
|
||||
"error creating AudioData object {:?} during event {:?}",
|
||||
err, event,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user