try to run denoising

This commit is contained in:
2025-10-25 18:15:26 -06:00
parent cfb8144561
commit 260decc9af
7 changed files with 261 additions and 42 deletions
+21
View File
@@ -48,6 +48,9 @@ pub enum Command {
channel: ChannelId,
user: UserId,
},
UpdateMicEffects {
denoise: bool,
},
Disconnect,
}
@@ -633,6 +636,7 @@ pub fn ControlView(config: Resource<ClientConfig>) -> Element {
},
};
let denoise = use_signal(|| false);
rsx!(
// Server control
div {
@@ -672,6 +676,23 @@ pub fn ControlView(config: Resource<ClientConfig>) -> Element {
}
}
span { class: "{spacer}" }
button {
class: match denoise() {
true => toggle_button_on,
false => toggle_button,
},
role: "switch",
aria_checked: denoise(),
onclick: move |_| {
let new_denoise = !denoise();
*denoise.write_unchecked() = new_denoise;
net.send(UpdateMicEffects { denoise: new_denoise })
},
match denoise() {
true => rsx!(span { class: "material-symbols-outlined", style: "{button_style}", "cadence"}),
false => rsx!(span { class: "material-symbols-outlined", style: "{button_style}", "graphic_eq"}),
}
}
button {
class: match mute || self_mute {
true => toggle_button_on,
+28
View File
@@ -0,0 +1,28 @@
use crossbeam::atomic::AtomicCell;
use std::sync::Arc;
#[derive(Default)]
pub struct AudioProcessor {
df: Option<::df::DFState>,
}
impl AudioProcessor {
pub fn new_denoising() -> Self {
let df = ::df::DFState::default();
AudioProcessor { df: Some(df) }
}
}
impl AudioProcessor {
pub fn process(&mut self, audio: &[f32]) -> Box<[f32]> {
let mut output: Box<[f32]> = vec![0f32; audio.len()].into();
if let Some(df) = &mut self.df {
df.process_frame(audio, &mut output);
} else {
output.copy_from_slice(audio);
}
output
}
}
pub type AudioProcessorSender = Arc<AtomicCell<Option<AudioProcessor>>>;
+7 -2
View File
@@ -1,4 +1,5 @@
use crate::app::Command;
use crate::effects::AudioProcessor;
use color_eyre::eyre::{eyre, Error};
use cpal::traits::{DeviceTrait, HostTrait};
use dioxus::hooks::{UnboundedReceiver, UnboundedSender};
@@ -13,7 +14,7 @@ use tokio::net::TcpStream;
use tokio_rustls::rustls;
use tokio_rustls::rustls::client::danger::{HandshakeSignatureValid, ServerCertVerifier};
use tokio_rustls::rustls::pki_types::{CertificateDer, ServerName, UnixTime};
use tokio_rustls::rustls::ClientConfig;
use tokio_rustls::rustls::ClientConfig as RlsClientConfig;
use tokio_rustls::rustls::DigitallySignedStruct;
use tokio_rustls::TlsConnector;
use tokio_util::compat::{TokioAsyncReadCompatExt as _, TokioAsyncWriteCompatExt as _};
@@ -50,6 +51,10 @@ impl AudioSystem {
})
}
pub fn set_processor(&self, processor: AudioProcessor) {
// TODO
}
pub fn start_recording(&mut self, each: impl FnMut(Vec<u8>) + 'static) -> Result<(), Error> {
// TODO
Ok(())
@@ -192,7 +197,7 @@ pub async fn network_connect(
) -> Result<(), Error> {
info!("connecting");
let config = ClientConfig::builder()
let config = RlsClientConfig::builder()
.dangerous()
.with_custom_certificate_verifier(Arc::new(NoCertificateVerification))
.with_no_client_auth();
+46 -30
View File
@@ -1,12 +1,11 @@
use crate::app::Command;
use crate::effects::{AudioProcessor, AudioProcessorSender};
use color_eyre::eyre::{bail, eyre, Error};
use dioxus::prelude::*;
use futures::{AsyncRead, AsyncWrite};
use futures_channel::mpsc::UnboundedSender;
use gloo_timers::future::TimeoutFuture;
use mumble_protocol::control::{ClientControlCodec, ControlPacket};
use mumble_protocol::voice::{VoicePacket, VoicePacketPayload};
use mumble_protocol::Serverbound;
use js_sys::Float32Array;
use mumble_protocol::control::ClientControlCodec;
use mumble_web2_common::ClientConfig;
use reqwest::Url;
use std::time::Duration;
@@ -72,20 +71,33 @@ impl<T> ResultExt<T> for Result<T, JsError> {
self.map_err(|e| JsValue::from(e)).ey()
}
}
pub struct AudioSystem(AudioContext);
pub struct AudioSystem {
webctx: AudioContext,
processors: AudioProcessorSender,
}
impl AudioSystem {
pub fn new() -> Result<Self, Error> {
// Create MediaStreams to playback decoded audio
// The audio context is used to reproduce audio.
let audio_context = configure_audio_context();
Ok(AudioSystem(audio_context))
let webctx = configure_audio_context();
let processor = AudioProcessorSender::default();
Ok(AudioSystem {
webctx,
processors: processor,
})
}
pub fn set_processor(&self, processor: AudioProcessor) {
self.processors.store(Some(processor))
}
pub fn start_recording(&mut self, each: impl FnMut(Vec<u8>) + 'static) -> Result<(), Error> {
let audio_context_worklet = self.0.clone();
let audio_context_worklet = self.webctx.clone();
let processors = self.processors.clone();
spawn(async move {
match run_encoder_worklet(&audio_context_worklet, each).await {
match run_encoder_worklet(&audio_context_worklet, each, processors).await {
Ok(node) => info!("created encoder worklet: {:?}", &node),
Err(err) => error!("could not create encoder worklet: {err}"),
}
@@ -94,8 +106,6 @@ impl AudioSystem {
}
pub fn create_player(&mut self) -> Result<AudioPlayer, Error> {
let audio_context = &self.0;
let audio_stream_generator =
MediaStreamTrackGenerator::new(&MediaStreamTrackGeneratorInit::new("audio")).ey()?;
@@ -105,12 +115,10 @@ impl AudioSystem {
let media_stream = MediaStream::new_with_tracks(&js_tracks).ey()?;
// Create MediaStreamAudioSourceNode
let audio_source = audio_context
.create_media_stream_source(&media_stream)
.ey()?;
let audio_source = self.webctx.create_media_stream_source(&media_stream).ey()?;
// Connect output of audio_source to audio_context (browser audio)
audio_source
.connect_with_audio_node(&audio_context.destination())
.connect_with_audio_node(&self.webctx.destination())
.ey()?;
// Create callback functions for AudioDecoder
@@ -194,9 +202,22 @@ impl PromiseExt for Promise {
}
}
fn process_audio(frame: &JsValue, processor: &mut AudioProcessor) {
let Ok(samples) = Reflect::get(&frame, &"data".into()) else {
return;
};
let Ok(samples) = samples.dyn_into::<Float32Array>() else {
return;
};
let input = samples.to_vec();
let output = processor.process(&input);
samples.copy_from(&output);
}
async fn run_encoder_worklet(
audio_context: &AudioContext,
mut each: impl FnMut(Vec<u8>) + 'static,
processors: AudioProcessorSender,
) -> Result<AudioWorkletNode, Error> {
let constraints = MediaStreamConstraints::new();
constraints.set_audio(&JsValue::TRUE);
@@ -264,23 +285,18 @@ async fn run_encoder_worklet(
audio_encoder.configure(&encoder_config);
info!("created audio encoder");
let download_buffer = std::cell::RefCell::new(Vec::new());
let mut current_processor = AudioProcessor::default();
let onmessage: Closure<dyn FnMut(MessageEvent)> = Closure::new(move |event: MessageEvent| {
match AudioData::new(event.data().unchecked_ref()) {
Ok(data) => {
let x = web_sys::AudioDataCopyToOptions::new(0);
x.set_format(web_sys::AudioSampleFormat::F32);
let mut sub_buffer = vec![0; data.allocation_size(&x).unwrap() as usize];
data.copy_to_with_u8_slice(&mut sub_buffer, &x);
download_buffer.borrow_mut().append(&mut sub_buffer);
if download_buffer.borrow().len() > 48000 * 10 * 4 {
//pub fn download_data(data: Vec<u8>, filename: &str) -> Result<(), JsValue> {
//download_data(download_buffer.borrow().to_vec(), "download_buffer.pcm32");
download_buffer.borrow_mut().clear();
}
if let Some(new_processor) = processors.take() {
current_processor = new_processor;
}
audio_encoder.encode(&data);
let frame = event.data();
process_audio(&frame, &mut current_processor);
match AudioData::new(frame.unchecked_ref()) {
Ok(data) => {
let _ = audio_encoder.encode(&data);
}
Err(err) => {
error!(
+13 -1
View File
@@ -29,7 +29,11 @@ use tracing::debug;
use tracing::error;
use tracing::info;
use crate::effects::AudioProcessor;
use crate::imp::AudioSystem;
pub mod app;
mod effects;
pub mod imp;
mod msghtml;
@@ -161,7 +165,7 @@ pub async fn network_loop<R: imp::ImpRead, W: imp::ImpWrite>(
match command {
Some(Command::Disconnect) => break,
Some(command) => {
let res = accept_command(command, &mut send_chan);
let res = accept_command(command, &mut send_chan, &mut audio);
if let Err(err) = res {
info!("error accepting command {:?}", err)
}
@@ -179,6 +183,7 @@ pub async fn network_loop<R: imp::ImpRead, W: imp::ImpWrite>(
fn accept_command(
command: Command,
send_chan: &mut UnboundedSender<ControlPacket<mumble_protocol::Serverbound>>,
audio: &mut AudioSystem,
) -> Result<(), Error> {
use Command::*;
let Some(session) = STATE.server.read().session else {
@@ -280,6 +285,13 @@ fn accept_command(
let _ = send_chan.unbounded_send(u.into());
}
Connect { .. } | Disconnect => (),
UpdateMicEffects { denoise } => {
if denoise {
audio.set_processor(AudioProcessor::new_denoising());
} else {
audio.set_processor(AudioProcessor::default());
}
}
}
Ok(())