diff --git a/src/lib.rs b/src/lib.rs index ac0a887..f88c082 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,6 @@ pub mod app; +use std::collections::HashMap; use std::time::Duration; use app::STATE; @@ -8,6 +9,7 @@ use futures::SinkExt; use futures::StreamExt; use mumble_protocol::control::ControlPacket; use mumble_protocol::control::{msgs, ClientControlCodec}; +use mumble_protocol::voice::VoicePacketDst; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use web_sys::console; @@ -31,25 +33,71 @@ use wasm_bindgen_futures::spawn_local as spawn; // Borrowed from // https://github.com/security-union/videocall-rs/blob/main/videocall-client/src/decode/config.rs#L6 -fn configure_audio_context(audio_stream_generator: &MediaStreamTrackGenerator) -> AudioContext { - let js_tracks = web_sys::js_sys::Array::new(); - js_tracks.push(audio_stream_generator); - let media_stream = MediaStream::new_with_tracks(&js_tracks).unwrap(); +fn configure_audio_context() -> AudioContext { let mut audio_context_options = AudioContextOptions::new(); audio_context_options.sample_rate(48000 as f32); let audio_context = AudioContext::new_with_context_options(&audio_context_options).unwrap(); - let gain_node = audio_context.create_gain().unwrap(); - gain_node.set_channel_count(1); - let source = audio_context - .create_media_stream_source(&media_stream) - .unwrap(); - let _ = source.connect_with_audio_node(&gain_node).unwrap(); - let _ = gain_node - .connect_with_audio_node(&audio_context.destination()) - .unwrap(); audio_context } +fn create_decoder(audio_context: &AudioContext) -> AudioDecoder { + let audio_stream_generator = + MediaStreamTrackGenerator::new(&MediaStreamTrackGeneratorInit::new("audio")).unwrap(); + + // Create MediaStream from MediaStreamTrackGenerator + let js_tracks = web_sys::js_sys::Array::new(); + js_tracks.push(&audio_stream_generator); + let media_stream = MediaStream::new_with_tracks(&js_tracks).unwrap(); + + // Create MediaStreamAudioSourceNode + let audio_source = audio_context.create_media_stream_source(&media_stream).unwrap(); + // Connect output of audio_source to audio_context (browser audio) + audio_source.connect_with_audio_node(&audio_context.destination()).unwrap(); + + + // Create callback functions for AudioDecoder + let error = Closure::wrap(Box::new(move |e: JsValue| { + console::log_1(&e); + }) as Box); + + + // This knows what MediaStreamTrackGenerator to use as it closes around it + let output = Closure::wrap(Box::new(move |audio_data: AudioData| { + let writable = audio_stream_generator.writable(); + if writable.locked() { + return; + } + if let Err(e) = writable.get_writer().map(|writer| { + spawn(async move { + if let Err(e) = JsFuture::from(writer.ready()).await { + console::log_1(&format!("write chunk error {:?}", e).into()); + } + if let Err(e) = JsFuture::from(writer.write_with_chunk(&audio_data)).await { + console::log_1(&format!("write chunk error {:?}", e).into()); + }; + writer.release_lock(); + }); + }) { + console::log_1(&e); + } + }) as Box); + + let audio_decoder = AudioDecoder::new(&AudioDecoderInit::new( + error.as_ref().unchecked_ref(), + output.as_ref().unchecked_ref(), + )) + .unwrap(); + + audio_decoder.configure(&AudioDecoderConfig::new("opus", 1, 48000)); + console::log_1(&"Created Audio Decoder".into()); + + // This is required to prevent these from being deallocated + error.forget(); + output.forget(); + + audio_decoder +} + pub async fn network_entrypoint() { // This sleep is to allow the user to interact with the window so the MediaStream // can be created. This works around Chrome's autoplay policy rules. This will @@ -73,7 +121,6 @@ pub async fn network_entrypoint() { ) .unwrap(); - //web_sys::js_sys::Reflect::set(&object, &JsValue::from_str("value"), &hash).unwrap(); web_sys::js_sys::Reflect::set(&object, &"value".into(), &hash).unwrap(); let array = web_sys::js_sys::Array::new(); @@ -180,49 +227,12 @@ pub async fn network_entrypoint() { }); } - // Create callback functions for AudioDecoder - let error = Closure::wrap(Box::new(move |e: JsValue| { - console::log_1(&e); - }) as Box); // Create MediaStreams to playback decoded audio - let audio_stream_generator = - MediaStreamTrackGenerator::new(&MediaStreamTrackGeneratorInit::new("audio")).unwrap(); // The audio context is used to reproduce audio. - let _audio_context = configure_audio_context(&audio_stream_generator); - - let output = Closure::wrap(Box::new(move |audio_data: AudioData| { - console::log_1(&"Got audio".into()); - console::log_1(&audio_data.clone().into()); - let writable = audio_stream_generator.writable(); - if writable.locked() { - return; - } - if let Err(e) = writable.get_writer().map(|writer| { - wasm_bindgen_futures::spawn_local(async move { - if let Err(e) = JsFuture::from(writer.ready()).await { - console::log_1(&format!("write chunk error {:?}", e).into()); - } - if let Err(e) = JsFuture::from(writer.write_with_chunk(&audio_data)).await { - console::log_1(&format!("write chunk error {:?}", e).into()); - }; - writer.release_lock(); - }); - }) { - console::log_1(&e); - } - }) as Box); - - let audio_decoder = AudioDecoder::new(&AudioDecoderInit::new( - error.as_ref().unchecked_ref(), - output.as_ref().unchecked_ref(), - )) - .unwrap(); - - console::log_1(&"Created Audio Decoder".into()); - console::log_1(&audio_decoder); - - audio_decoder.configure(&AudioDecoderConfig::new("opus", 1, 48000)); + let audio_context = configure_audio_context(); + // Create map of session_id -> AudioDecoder + let mut decoder_map = HashMap::new(); loop { match reader.next().await { @@ -238,7 +248,13 @@ pub async fn network_entrypoint() { payload, position_info, } => { - console::log_1(&"Voice packet found".into()); + // Get or create audio decoder for this user + let audio_decoder = decoder_map.entry(session_id).or_insert_with(|| create_decoder(&audio_context)); + // This will over time (as users join and leave) leak + // AudioDecoders, MediaStreamTrackGenerators, MediaStreams, and MediaStreamAudioSourceNodes. + // A better way to handle this would be to delete and create all the audio + // infra on channel join and update it as new users join the channel, dropping + // any audio packets that come in the meantime. if let mumble_protocol::voice::VoicePacketPayload::Opus( audio_payload, end_bit, @@ -253,7 +269,7 @@ pub async fn network_entrypoint() { )) .unwrap(), ); - console::log_1(&"Oueued audio chunk for decoding".into()); + //console::log_1(&"Oueued audio chunk for decoding".into()); } } _ => { @@ -261,8 +277,6 @@ pub async fn network_entrypoint() { // I think? } } - console::log_1(&"Got UDP tunnel".into()); - console::log_1(&format!("{:#?}", u).into()); } _ => { console::log_1(&format!("{:#?}", msg).into());