mic sorta working but aweful

This commit is contained in:
2024-07-02 19:32:43 -06:00
parent 94800cc6fb
commit 30245b2fb8
4 changed files with 310 additions and 134 deletions
+151 -23
View File
@@ -1,35 +1,48 @@
pub mod app;
use std::collections::HashMap;
use std::time::Duration;
use anyhow::Error;
use app::STATE;
use async_std::channel::Sender;
use futures::SinkExt;
use futures::StreamExt;
use manganis::{file, mg};
use mumble_protocol::control::ControlPacket;
use mumble_protocol::control::{msgs, ClientControlCodec};
use mumble_protocol::voice::VoicePacket;
use mumble_protocol::voice::VoicePacketDst;
use mumble_protocol::voice::VoicePacketPayload;
use std::collections::HashMap;
use std::time::Duration;
use wasm_bindgen::prelude::*;
use wasm_bindgen_futures::JsFuture;
use wasm_bindgen_futures::spawn_local as spawn;
use wasm_bindgen_futures::{future_to_promise, JsFuture};
use web_sys::console;
use web_sys::js_sys::Promise;
use web_sys::js_sys::Reflect;
use web_sys::js_sys::Uint8Array;
use web_sys::window;
use web_sys::AudioContext;
use web_sys::AudioContextOptions;
use web_sys::AudioData;
use web_sys::AudioDataInit;
use web_sys::AudioDecoder;
use web_sys::AudioDecoderConfig;
use web_sys::AudioDecoderInit;
use web_sys::AudioEncoder;
use web_sys::AudioEncoderConfig;
use web_sys::AudioEncoderInit;
use web_sys::AudioWorkletNode;
use web_sys::EncodedAudioChunk;
use web_sys::EncodedAudioChunkInit;
use web_sys::EncodedAudioChunkType;
use web_sys::MediaStream;
use web_sys::MediaStreamConstraints;
use web_sys::MediaStreamTrackGenerator;
use web_sys::MediaStreamTrackGeneratorInit;
use web_sys::MessageEvent;
use web_sys::WebTransport;
use web_sys::WebTransportOptions;
use wasm_bindgen_futures::spawn_local as spawn;
use web_sys::WorkletOptions;
// Borrowed from
// https://github.com/security-union/videocall-rs/blob/main/videocall-client/src/decode/config.rs#L6
@@ -40,6 +53,108 @@ fn configure_audio_context() -> AudioContext {
audio_context
}
trait PromiseExt {
fn into_future(self) -> JsFuture;
}
impl PromiseExt for Promise {
fn into_future(self) -> JsFuture {
self.into()
}
}
async fn create_encoder_worklet(
audio_context: &AudioContext,
packets: Sender<ControlPacket<mumble_protocol::Serverbound>>,
) -> Result<AudioWorkletNode, JsValue> {
let stream = window()
.unwrap()
.navigator()
.media_devices()?
.get_user_media_with_constraints(MediaStreamConstraints::new().audio(&JsValue::TRUE))?
.into_future()
.await?
.dyn_into()
.map_err(|e| JsError::new(&format!("not a stream: {e:?}")))?;
let options = WorkletOptions::new();
Reflect::set(
&options,
&"processorOptions".into(),
&wasm_bindgen::module(),
)?;
let module = "rust_mic_worklet.js";
console::log_1(&format!("Loading mic worklet from {module:?}").into());
audio_context
.audio_worklet()?
.add_module_with_options(module, &options)?
.into_future()
.await?;
let source = audio_context.create_media_stream_source(&stream)?;
let worklet_node = AudioWorkletNode::new(audio_context, "rust_mic_worklet")?;
let error: Closure<dyn FnMut(JsValue)> = Closure::new(|e| console::error_1(&e));
// This knows what MediaStreamTrackGenerator to use as it closes around it
let mut sequence_num = 0;
let output: Closure<dyn FnMut(EncodedAudioChunk)> =
Closure::new(move |audio_data: EncodedAudioChunk| {
let mut array = vec![0u8; audio_data.byte_length() as usize];
audio_data.copy_to_with_u8_array(&mut array);
let _ = packets.try_send(ControlPacket::UDPTunnel(Box::new(VoicePacket::Audio {
_dst: std::marker::PhantomData,
target: 0,
session_id: (),
seq_num: sequence_num,
payload: VoicePacketPayload::Opus(array.into(), false),
position_info: None,
})));
sequence_num = sequence_num.wrapping_add(1);
});
let audio_encoder = AudioEncoder::new(&AudioEncoderInit::new(
error.as_ref().unchecked_ref(),
output.as_ref().unchecked_ref(),
))
.unwrap();
// This is required to prevent these from being deallocated
error.forget();
output.forget();
audio_encoder.configure(
&AudioEncoderConfig::new("opus")
.number_of_channels(1)
.sample_rate(48000),
);
console::log_1(&"Created Audio Encoder".into());
let onmessage: Closure<dyn FnMut(MessageEvent)> = Closure::new(move |event: MessageEvent| {
match AudioData::new(event.data().unchecked_ref()) {
Ok(data) => {
audio_encoder.encode(&data);
}
Err(err) => {
console::error_1(&err);
console::debug_1(&event);
}
}
});
Reflect::set(
&Reflect::get(&worklet_node, &"port".into())?,
&"onmessage".into(),
onmessage.as_ref(),
)?;
onmessage.forget();
source.connect_with_audio_node(&worklet_node)?;
worklet_node.connect_with_audio_node(&audio_context.destination())?;
Ok(worklet_node)
}
fn create_decoder(audio_context: &AudioContext) -> AudioDecoder {
let audio_stream_generator =
MediaStreamTrackGenerator::new(&MediaStreamTrackGeneratorInit::new("audio")).unwrap();
@@ -50,17 +165,19 @@ fn create_decoder(audio_context: &AudioContext) -> AudioDecoder {
let media_stream = MediaStream::new_with_tracks(&js_tracks).unwrap();
// Create MediaStreamAudioSourceNode
let audio_source = audio_context.create_media_stream_source(&media_stream).unwrap();
let audio_source = audio_context
.create_media_stream_source(&media_stream)
.unwrap();
// Connect output of audio_source to audio_context (browser audio)
audio_source.connect_with_audio_node(&audio_context.destination()).unwrap();
audio_source
.connect_with_audio_node(&audio_context.destination())
.unwrap();
// Create callback functions for AudioDecoder
let error = Closure::wrap(Box::new(move |e: JsValue| {
console::log_1(&e);
}) as Box<dyn FnMut(JsValue)>);
// This knows what MediaStreamTrackGenerator to use as it closes around it
let output = Closure::wrap(Box::new(move |audio_data: AudioData| {
let writable = audio_stream_generator.writable();
@@ -106,15 +223,19 @@ pub async fn network_entrypoint() {
console::log_1(&"Rust via WASM!".into());
let server_hash = vec![
14, 162, 111, 176, 34, 113, 218, 69, 177, 18, 13, 180, 232, 204, 49, 65, 161, 195, 36, 238,
23, 95, 174, 190, 24, 216, 105, 89, 236, 147, 206, 139,
];
let Ok(server_hash): Result<Vec<u8>, _> = env!("WEBTRANSPORT_SERVER_HASH")
.trim_matches(&['[', ']'])
.split(',')
.map(|x| x.trim().parse())
.collect()
else {
panic!("could not parse server hash")
};
let hash = web_sys::js_sys::Uint8Array::from(server_hash.as_slice());
let object = web_sys::js_sys::Object::new();
web_sys::js_sys::Reflect::set(
Reflect::set(
&object,
&JsValue::from_str("algorithm"),
&JsValue::from_str("sha-256"),
@@ -227,10 +348,19 @@ pub async fn network_entrypoint() {
});
}
// Create MediaStreams to playback decoded audio
// The audio context is used to reproduce audio.
let audio_context = configure_audio_context();
let audio_context_worklet = audio_context.clone();
let packet_sender_worklet = send_chan.clone();
spawn(async move {
match create_encoder_worklet(&audio_context_worklet, packet_sender_worklet).await {
Ok(node) => console::log_2(&"Created audio worklet:".into(), &node),
Err(err) => console::error_1(&err),
}
});
// Create map of session_id -> AudioDecoder
let mut decoder_map = HashMap::new();
@@ -249,17 +379,15 @@ pub async fn network_entrypoint() {
position_info,
} => {
// Get or create audio decoder for this user
let audio_decoder = decoder_map.entry(session_id).or_insert_with(|| create_decoder(&audio_context));
let audio_decoder = decoder_map
.entry(session_id)
.or_insert_with(|| create_decoder(&audio_context));
// This will over time (as users join and leave) leak
// AudioDecoders, MediaStreamTrackGenerators, MediaStreams, and MediaStreamAudioSourceNodes.
// A better way to handle this would be to delete and create all the audio
// infra on channel join and update it as new users join the channel, dropping
// any audio packets that come in the meantime.
if let mumble_protocol::voice::VoicePacketPayload::Opus(
audio_payload,
end_bit,
) = payload
{
if let VoicePacketPayload::Opus(audio_payload, end_bit) = payload {
let js_audio_payload = Uint8Array::from(audio_payload.as_ref());
audio_decoder.decode(
&EncodedAudioChunk::new(&EncodedAudioChunkInit::new(