My current aim is to record a 3-5s bit of raw audio material using the device’s microphone and converting this into an encoded base64 string of byte[]. The audio itself is required to be mono sound and in 44100Hz to be compatible with the API i am using.
My current approach using the MediaRecorder seems to force me to attach some kind of coded and container as soon as i assemble the Blob from the gathered stream. Another alternative, since i dont require a file output is the use of the AudioContext by attaching a MediaStreamAudioSourceNode to it, but i have difficulty processing the incoming data from this approach.
Below the current approach based on MediaRecorder:
public readonly arrayBufferToBase64 = (arrayBuffer: ArrayBuffer) => {
const byteArray = new Uint8Array(arrayBuffer);
let base64String = "";
for (let i = 0; i < byteArray.length; i++) {
base64String += String.fromCharCode(byteArray[i]);
}
return btoa(base64String);
};
public readonly audioSetup = async () => {
if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
navigator.mediaDevices
.getUserMedia({
audio: {
channelCount: 1,
sampleRate: 44100,
},
})
.then((stream) => {
const recorder = new MediaRecorder(stream);
const chunks: BlobPart[] | undefined = [];
recorder.addEventListener("dataavailable", function (e) {
chunks.push(e.data);
});
recorder.addEventListener("stop", () => {
const blob = new Blob(chunks, { type: "audio/wav" });
const url = URL.createObjectURL(blob);
// const audio = new Audio();
// audio.src = url;
// audio.play();
var reader = new FileReader();
reader.onload = () => {
console.log(
this.arrayBufferToBase64(reader.result as ArrayBuffer)
);
};
reader.readAsArrayBuffer(blob);
});
// Start recording
recorder.start();
// Stop recording after 3 seconds
setTimeout(function () {
recorder.stop();
}, 5000);
})
.catch(function (err) {
console.error("Error accessing microphone:", err);
});
} else {
console.error("MediaRecorder API not supported");
}