Duplicate Recording Requests in Browser Extension for Mic/Screen sourced Audio Transcription

I have a website where a function is the user can transcribe audio from their mic and pc audio at the same time, to run other things (core function). Since he browser won’t let computer audio to be recorded through it, I need to have this done through an extension.

The extension does this, and sends the chunks to the API, this happens perfectly.

What happens that shouldn’t, though, is it appears to keep duplicating recording requests on itself. This leads to a lot of errors in transcription, and corrupt audio files.

So, in a nut shell, if someone can help me find a way to make it loop without erroneous/duplicate blobs being sent, while not duplicating processes to it is light on the computer memory, I would be very greatful!

Here’s the contentScript.js code (where these functions are happening)

// contentScript.js
console.log("Content script has loaded.");

let mediaRecorder;
let isRecording = false;
let audioContext = new AudioContext();
let mixedStream = null;
let recording = false;
let recordingCount = 0; // Variable to count how many times startRecording has been invoked
let lastAudioHash = null;
let audioQueue = [];
let processingAudio = false;
let mediaRecording = 0;
const MIN_TIME_GAP = 5000; // Minimum time difference (in milliseconds) to consider non-duplicate
let lastAudioSentTime = null; // Stores the timestamp of the last sent audio

chrome.runtime.onMessage.addListener((request, sender, sendResponse) => {
    console.log("Received command:", request.command);
    switch (request.command) {
        case "startRecording":
            if (!isRecording) {
                startRecording(sendResponse);
            } else {
                console.log("Recording already in progress.");
                sendResponse({ status: 'success', message: 'Recording already started.' });
            }
            break;
        case "stopRecording":
            stopRecording();
            sendResponse({ status: 'success', message: 'Recording stopped.' });
            console.log("Recording stopped.");
            break;
    }
    return true; // This keeps the sendResponse callback valid for asynchronous use
});

async function startRecording(sendResponse) {
    console.log(`Starting recording`);
    if (recordingCount > 1) {
        console.log("Exiting function because recordingCount is greater than 1.");
        return; // Exit the function
    }
    
    try {
        if (!mixedStream || !isStreamActive(mixedStream)) {
            mixedStream = await initializeStreams();
        }
        if (audioContext.state === 'suspended') {
            await audioContext.resume();
        }
        setupRecorder(mixedStream);
        //isRecording = true;
        recording = true;
        
        console.log("Recording started successfully.");
    } catch (error) {
        console.error("Failed to start media capture:", error);
    }
}

async function initializeStreams() {
    let micStream = await navigator.mediaDevices.getUserMedia({ audio: true });
    let displayStream = await navigator.mediaDevices.getDisplayMedia({ video: true, audio: true });
    return mixStreams(micStream, displayStream);
}

function mixStreams(micStream, displayStream) {
    const micSource = audioContext.createMediaStreamSource(micStream);
    const displaySource = audioContext.createMediaStreamSource(displayStream);
    const destination = audioContext.createMediaStreamDestination();
    micSource.connect(destination);
    displaySource.connect(destination);
    return destination.stream;
}

function setupRecorder(stream) {

    mediaRecorder = new MediaRecorder(stream, { mimeType: 'audio/webm; codecs=opus' });
    mediaRecorder.ondataavailable = handleDataAvailable;
    if (mediaRecording === 0){
        mediaRecording = 1;
        mediaRecorder.start(20000); // Record in 10-second chunks
    }

    mediaRecorder.addEventListener("dataavailable", event => {
        if (event.data.size > 0 && recording) {
            if (recording) { // If still recording, restart immediately
                mediaRecording = 0;
                mediaRecorder.stop; // Record in 10-second chunks
                setupRecorder(stream);
            }
            processQueue(event.data); // Pass episodeId to function
        }
    });

    mediaRecorder.onstop = () => {
        console.log("Recorder stopped.");
        if (isRecording) {
            console.log("Restarting recording...");
        }
    };
}

function handleDataAvailable(event) {
    if (event.data.size > 0) {
        enqueueAudio(event.data); // Changed to enqueue audio for processing
    }
}

function enqueueAudio(blob) {
    audioQueue.push(blob);
    if (!processingAudio) {
        processQueue();
    }
}

function processQueue() {
    if (audioQueue.length === 0) {
        processingAudio = false;
        return;
    }
    
    processingAudio = true;
    const blob = audioQueue.shift();
    sendAudioToServer(blob);
}

function sendAudioToServer(blob) {
    // Basic check for valid blob size (avoid empty blobs)
    if (!blob || blob.size === 0) {
      console.log("Empty audio blob detected. Skipping sending.");
      processingAudio = false;
      processQueue(); // Process next item in the queue
      return;
    }
  
    // Check for content type (MIME type) if available
    if (blob.type && !blob.type.startsWith('audio/')) {
      console.log("Invalid audio file type:", blob.type, ". Skipping sending.");
      processingAudio = false;
      processQueue(); // Process next item in the queue
      return;
    }
  
    // Basic silence detection
    const audioContext = new AudioContext();
    const reader = new FileReader();
    reader.onload = () => {
      const arrayBuffer = reader.result;
      audioContext.decodeAudioData(arrayBuffer, (decodedAudio) => {
        const channelData = decodedAudio.getChannelData(0); // Assuming single channel audio
  
        let isSilent = true;
        for (let i = 0; i < channelData.length; i++) {
          if (Math.abs(channelData[i]) > SILENCE_THRESHOLD) {
            isSilent = false;
            break;
          }
        }
  
        if (isSilent) {
          console.log("Audio clip appears silent. Skipping sending.");
          processingAudio = false;
          processQueue(); // Process next item in the queue
          return;
        } else {

            const currentAudioHash = simpleHash(blob); // Hash the blob directly
            const currentTime = Date.now(); // Get current timestamp
        
            // Check for duplicate based on hash and time difference
            if (currentAudioHash === lastAudioHash && currentTime - lastAudioSentTime < MIN_TIME_GAP) {
            console.log("Duplicate audio detected (within time threshold). Skipping...");
            processingAudio = false;
            processQueue(); // Process next item in the queue
            return;
            }

            lastAudioHash = currentAudioHash;
            lastAudioSentTime = currentTime;
        
            lastAudioHash = currentAudioHash;
        
            const reader = new FileReader();
            reader.onloadend = () => {
            const base64AudioMessage = reader.result.split(',')[1];
        
            console.log("Sending new audio data to background for transcription...");
        
            chrome.runtime.sendMessage({
                command: "transcribeAudio",
                audioData: reader.result
            }, response => {
                if (chrome.runtime.lastError) {
                console.error("Error sending audio data:", chrome.runtime.lastError.message);
                } else {
                console.log("Transcription response received:", response);
                sendTranscription(response);
                }
            });
            };
        
            // Read data only if not a duplicate
            reader.readAsDataURL(blob);
        
      }
    }, (error) => {
      console.error("Error decoding false audio file (ignore this and the DOM Exception):", error);
      processingAudio = false;
      processQueue(); // Process next item in the queue
      return;
    });
  };
  reader.readAsArrayBuffer(blob);

  // Silence detection constants (adjust as needed)
  const SILENCE_THRESHOLD = 0.001; // Adjust based on your audio levels and noise floor
  }
  
  // Function to compute a simple hash from a string
  function simpleHash(str) {
    let hash = 0;
    for (let i = 0; i < str.length; i++) {
      const char = str.charCodeAt(i);
      hash = ((hash << 5) - hash) + char;
      hash = hash & hash; // Convert to 32bit integer
    }
    return hash;
  }
  
  function sendTranscription(transcription) {
    console.log("Sending transcription to background...");
    chrome.runtime.sendMessage({
        command: "sendTranscription",
        transcription: transcription
    }, response => {
        if (chrome.runtime.lastError) {
            console.error("Error sending transcription data:", chrome.runtime.lastError.message);
        } else {
            console.log("Transcription response received:", response);
        }
    });
}

async function stopRecording() {
    if (mediaRecorder && mediaRecorder.state === 'recording') {
        mediaRecorder.stop();
    }
    isRecording = false;
    recording = false;
    audioContext.close(); // Clean up the audio context
    audioContext = new AudioContext(); // Reinitialize the audio context
    mixedStream = null; // Reset the mixed stream
}

function isStreamActive(stream) {
    return stream && stream.active;
}

After implementing some filters for this issue, the transcription happens as needed, however, because the duplication is ongoing, it keeps doubling the processes, even though they’re being exited it not used.

This uses a LOT of memory on the user’s computer, leading it to crash in a few minutes.

I can’t make the chunks bigger to reduce process duplication as the transcription needs to be pretty stable. The website adds time stamps according to when the audio transcription is added, so a streaming voice-to-text won’t work. So I have to do the chunking.

I’ve tried for days to fix this, but it either does this, or doesn’t transcribe past the first audio chunk when I implement any safeguards other than the below.

I’m sorry for the amount of code, but I want you to have all of the context to see what could be wrong.

I can’t have it request premission every time it loops for another chunk to record, because this won’t allow seamless transcription, as the user will keep having to select the screen sharing options and approve, which is a terrible UX.