BadRequestException: Your request timed out because no new audio was received for 15 seconds

I am getting this error in AWS transcribe. From the logs I understood my connection is valid & audio data is also valid. But in the server level getting this error.

my component code:

import { Injectable } from '@angular/core';
import { Subject } from 'rxjs';
import * as io from 'socket.io-client';

type result={
  transcript:string,
  asset_box_index:number
}

@Injectable({
  providedIn: 'root'
})
export class AwsService {
  private connection?:io.Socket;
  private currentRecognition?:string;
  private recognitionHistory:string='';
  private transcribeResult:string='';
  private transcribeResultSubject = new Subject<result>;
  transcribeResult$ = this.transcribeResultSubject.asObservable();

  connect():void{
    if(this.connection) this.disconnect()
    this.connection = io.connect("https://****.com",{
      path:"/awstranscribeservice/socket.io",
    });

    this.connection.on("connect",()=>{
      console.log("connected",this.connection?.id);
    })

    this.connection.on("receive_audio_text",(data)=>{
      this.speechRecognized(data);
      console.log("recieved audio text",data);
    })

    this.connection.on("disconnect",()=>{
      console.log("disconnected",this.connection?.id);
    })
  }

  disconnect():void{
    if(this.connection){
      this.connection.emit("stopAWSRecognition");
      this.connection.disconnect();
      this.connection = undefined;
    }
  }


  private audioContextMS!: AudioContext;
  private audioInputMS!: MediaStreamAudioSourceNode;
  private processorMS!: ScriptProcessorNode;

  startMSRecording(){
    navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
      this.audioContextMS = new AudioContext();
      this.audioInputMS = this.audioContextMS.createMediaStreamSource(stream);
      this.processorMS = this.audioContextMS.createScriptProcessor(1024, 1, 1);

      this.audioInputMS.connect(this.processorMS);
      this.processorMS.connect(this.audioContextMS.destination);

      this.processorMS.onaudioprocess = (event) => {
        const float32Array = event.inputBuffer.getChannelData(0);
        const int16Array = new Int16Array(float32Array.length);
        for(let i=0;i<float32Array.length;i++){
          int16Array[i] = Math.max(-32768, Math.min(32767, float32Array[i] * 32768));
        }
        this.connection?.emit('audio_data', int16Array.buffer);
      };
      this.connection?.emit('startTranscription');  
    })
    .catch((error) => {
      console.error('Error accessing microphone:', error);
    });
  }

  stopMSRecording(){
    if(this.audioContextMS && this.audioInputMS && this.processorMS){
      this.audioContextMS.close();
      this.audioInputMS.disconnect();
      this.processorMS.disconnect();
      this.connection?.emit('stopTranscription');
    }
  }

  private speechRecognized(data:any):void{
    if(data.isFinal){
      this.currentRecognition='...';
      this.recognitionHistory+=data.text;
    }else{
      this.currentRecognition = data.text;
    }
    this.transcribeResult = this.recognitionHistory+' '+this.currentRecognition;
    this.transcribeResultSubject.next({transcript:this.transcribeResult,asset_box_index:0});   
  }

}

nodejs code:

const express = require("express");
const router = express.Router();
const speech = require("@google-cloud/speech");
const logger = require("morgan");
const bodyParser = require("body-parser");
const cors = require("cors");
const http = require("http");
const { Server } = require("socket.io");
const { TranscribeStreamingClient, StartStreamTranscriptionCommand, LanguageCode } = require('@aws-sdk/client-transcribe-streaming');
const { fromInstanceMetadata} = require('@aws-sdk/credential-providers');
const defaultconfig = require('./config');

const app = express();
const corsOptions = {
  origin: "*",
  methods: ["GET", "POST"],
  allowedHeaders: "*",
};
app.use(cors(corsOptions));
app.use(logger("dev"));
app.use(bodyParser.json());

const server = http.createServer(app);
const io = new Server(server, {
  path: "/awstranscribeservice/socket.io",
  cors: corsOptions
});

let transcribeStream = null;
let a = 10;
let transcribeClient;

transcribeClient = new TranscribeStreamingClient({
  region: 'us-east-1',
  credentials: fromInstanceMetadata(),
});


app.use('/awstranscribeservice', router);

let isProcessing = false;
let audioQueue = [];

io.on("connection", (socket) => {
  console.log("** a user connected - " + socket.id + " **n");

  let isTrasncribing = false;
  let audioStream;

  socket.on("disconnect", () => {
    console.log("** user disconnected ** n");
  });


  //from the aws support pages
  socket.on("startTranscription", async () => {
    console.log("Starting transcription for aws");
    isTrasncribing = true;
    let buffer = Buffer.from('');
    audioStream = async function* () {
      while(isTrasncribing){
        const chunk = await new Promise((resolve) => {
          console.log("Waiting for audio data from the client");
          socket.once('audio_data',resolve)
        });
        if(chunk === null){
          break;
        }
        yield {AudioEvent: {AudioChunk: chunk}};
      }
    };
    const command = new StartStreamTranscriptionCommand({
      LanguageCode: 'en-US',
      MediaSampleRateHertz: 44100,
      MediaEncoding: 'pcm',
      AudioStream: audioStream(),
    });

    try{
      const response = await transcribeClient.send(command);
      for await (const event of response.TranscriptResultStream){
        if(!isTrasncribing) break;
        if(event.TranscriptEvent){
          const results = event.TranscriptEvent.Transcript.Results;
          if(results.length > 0 &&results[0].Alternatives.length > 0){
            const transcript = results[0].Alternatives[0].Transcript;
            const isFinal = results[0].IsPartial === false;
            socket.emit('receive_audio_text',{text: transcript, isFinal: isFinal});
          }
        }
      }
    }catch(err){
      console.error("Error during transcription: " + err);
    }
  });

  socket.on("stopTranscription", () => {
    console.log("Stopping transcription for aws with the new implementation");
    isTrasncribing = false;
  });
  socket.on("audio_data", (data) => {
    console.log("Received audio data from the client");
    if(isTrasncribing){
      socket.emit('audio_data',data);
    }
  });


});


const PORT = defaultconfig.port;
server.listen(PORT,cors(corsOptions),() => {
  console.log(`WebSocket server listening on http://localhost:${PORT}.`);
});