Post-processing Segmentation Mask from ONNX Model in Web (ONNX Runtime JS)

I’m currently working on running my custom ONNX segmentation model directly in the browser using ONNX Runtime for Web with HTML and JavaScript. The model is loading and running inference successfully. However, I’m stuck on the post-processing logic, specifically when it comes to filtering and visualizing the segmentation mask.

I would appreciate any guidance or example implementations for:

Converting the model output into a usable mask

Resizing or transforming the output to match the original image

Filtering/thresholding the segmentation mask correctly in JavaScript

My use case is similar to how YOLOv8-seg outputs masks, but adapted to ONNX Runtime Web. If anyone has experience with post-processing segmentation outputs in the browser using ONNX models, your input would be very helpful.

The Output segmenation :

enter image description here

Actual bit map :
enter image description here

HTML CODE :

<!DOCTYPE html>
<html>
  <head>
    <title>YOLOv8 Segmentation Viewer</title>
    <script src="https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.min.js"></script>
    <style>
      canvas {
        border: 1px solid black;
        display: block;
        margin-top: 10px;
      }
    </style>
  </head>
  <body>
    <h3>YOLOv8 Segmentation Mask Demo</h3>
    <input type="file" id="upload" accept="image/*" />
    <canvas id="canvas"></canvas>

    <script>
      const inputSize = 640;

      document.getElementById("upload").addEventListener("change", async (e) => {
        const file = e.target.files[0];
        const img = new Image();
        img.src = URL.createObjectURL(file);

        img.onload = async () => {
          const canvas = document.getElementById("canvas");
          const ctx = canvas.getContext("2d");
          canvas.width = img.width;
          canvas.height = img.height;
          ctx.drawImage(img, 0, 0);

          const offscreen = new OffscreenCanvas(inputSize, inputSize);
          const offCtx = offscreen.getContext("2d");
          offCtx.drawImage(img, 0, 0, inputSize, inputSize);
          const imageData = offCtx.getImageData(0, 0, inputSize, inputSize);
          const inputTensor = preprocess(imageData);

          const session = await ort.InferenceSession.create("best_on.onnx");
          const feeds = { images: inputTensor };
          const results = await session.run(feeds);

          const output0 = results[Object.keys(results)[0]].data;
          const output1 = results[Object.keys(results)[1]].data;

          const [numProposals, numChannels] = [8400, 37];
          const prototypeH = 160;
          const prototypeW = 160;

          for (let i = 0; i < numProposals; i++) {
            const conf = output0[i * numChannels + 4];
            if (conf < 0.5) continue;

            const maskCoeffs = output0.slice(
              i * numChannels + 5,
              i * numChannels + 5 + 32
            );
            const mask = new Float32Array(prototypeH * prototypeW).fill(0);

            for (let m = 0; m < 32; m++) {
              for (let j = 0; j < prototypeH * prototypeW; j++) {
                mask[j] += maskCoeffs[m] * output1[m * prototypeH * prototypeW + j];
              }
            }

            for (let j = 0; j < mask.length; j++) {
              mask[j] = 1 / (1 + Math.exp(-mask[j]));
            }

            drawMaskOnCanvas(
              mask,
              prototypeH,
              prototypeW,
              canvas,
              ctx,
              img.width,
              img.height
            );
            break;
          }
        };
      });

      function preprocess(imageData) {
        const [r, g, b] = [[], [], []];
        for (let i = 0; i < imageData.data.length; i += 4) {
          r.push(imageData.data[i] / 255);
          g.push(imageData.data[i + 1] / 255);
          b.push(imageData.data[i + 2] / 255);
        }
        return new ort.Tensor(
          "float32",
          new Float32Array([...r, ...g, ...b]),
          [1, 3, 640, 640]
        );
      }

      function drawMaskOnCanvas(mask, maskH, maskW, canvas, ctx, imgW, imgH) {
        const imageData = ctx.getImageData(0, 0, imgW, imgH);
        const maskResized = resizeMask(mask, maskW, maskH, imgW, imgH);
        for (let y = 0; y < imgH; y++) {
          for (let x = 0; x < imgW; x++) {
            const m = maskResized[y * imgW + x];
            if (m > 1) {
              const idx = (y * imgW + x) * 4;
              imageData.data[idx] = 255;
              imageData.data[idx + 1] = 0;
              imageData.data[idx + 2] = 0;
              imageData.data[idx + 3] = 100;
            }
          }
        }
        ctx.putImageData(imageData, 0, 0);
      }

      function resizeMask(src, srcW, srcH, dstW, dstH) {
        const dst = new Float32Array(dstW * dstH);
        for (let y = 0; y < dstH; y++) {
          const sy = ((y + 0.5) * srcH) / dstH - 0.5;
          const y0 = Math.max(Math.floor(sy), 0);
          const y1 = Math.min(y0 + 1, srcH - 1);
          const yLerp = sy - y0;
          for (let x = 0; x < dstW; x++) {
            const sx = ((x + 0.5) * srcW) / dstW - 0.5;
            const x0 = Math.max(Math.floor(sx), 0);
            const x1 = Math.min(x0 + 1, srcW - 1);
            const xLerp = sx - x0;

            const top =
              (1 - xLerp) * src[y0 * srcW + x0] + xLerp * src[y0 * srcW + x1];
            const bottom =
              (1 - xLerp) * src[y1 * srcW + x0] + xLerp * src[y1 * srcW + x1];
            dst[y * dstW + x] = (1 - yLerp) * top + yLerp * bottom;
          }
        }
        return dst;
      }
    </script>
  </body>
</html>

any one suggest me the i am doing wrong any where ?

My goal is to detect and process the segmentation mask on an input image using a custom ONNX model running in the browser with ONNX Runtime Web.

Like the image below:

enter image description here

How can I parse the action for my ReAct Agent

I want to

  1. Split the string on the newline character (“n”)
  2. Search through the array of strings for one that has “Action:”
    •  regex to use: 
      
    •  const actionRegex = /^Action: (w+): (.*)$/
      
  3. Parse the action (function and parameter) from the string
async function agent(query) {
    
    const response = await openai.chat.completions.create({
        model: "gpt-3.5-turbo",
        messages: [
            { role: "system", content: systemPrompt },
            { role: "user", content: query }
        ]
    })

    console.log(response.choices[0].message.content)
}

agent("What book should I read next? I like self-help books.")

Is Number(“NaN”) = NaN because js recognizes “NaN” as a value of type number or because its a string and not a number?

For example Number(“Infinity”) = Infinity because it recognizes it as of type number, so I was wondering if Number(“NaN”) = NaN for the same reason that Number(“potato”) = NaN or because it recognizes it as a number like it does “Infinity”?

I’ve tried googling and asking gpt, gpt said that its because it’s a string and not a number and it dosent recognize it as such like it does with “Infinity” but I don’t really trust it

How to expand on image on hover to full size?

So I’m creating a website which is going to be a portfolio showcasing a person art and what not. So right now and I have it to where when you hover on the image it expands and fades out the background using an expanded state and a regular state where the images are in a card where they are aligned into a grid. I was wondering how I could get the images to expand not on click but rather on hover as right now in the JavaScript I’m using a function.

card.addEventListener('click', () => { if (card.classList.contains('expanded')) { const src = card.querySelector('img').src; preview.style.background = url(‘${src}’) center/contain no-repeat; preview.classList.add('show'); } });

I want to have it so when I hover over On Hover it’s supposed to be on click like fully expanded. On Click

What is wrong in this ProgressBar Component

What is wrong in this code , why is it failing the test cases giving error like these :
› on clicking button +10%, increments progress correctly
Unable to find an element with the text: 10%. This could be because the text is broken up by multiple elements. In this case, you can provide a function for your text matcher to make your matcher more flexible.

import React,{useState,useEffect} from "react";

function ProgressBar() {
   
   const [barWidth,setBarWidth]= useState(10);
   
   
   const decreaseWidth = () =>{
        setBarWidth(prev=>{
          const newWidth = Math.max(0,prev-10);
          return newWidth;
        });
    }

    const increaseWidth = () =>{
        setBarWidth(prev=>{
          const newWidth = Math.min(100,prev+10);
          return newWidth;
        }); 
        
    }

    const getBgColor = (value) => { 
        if (value < 40) return "red";
        else if (value >= 40 && value< 80) return "orange";
        return "green";
    }
  

   const barStyle={
       height:"100%",
       width:`${barWidth}%`,
       backgroundColor:getBgColor(barWidth),
       display:"flex",
   }
   
  
    return (
        <div style={{display:"flex",flexDirection:"column"}}>
           
            <h1>Progress bar</h1>
            <div  style={{
       backgroundColor:"#fff2cc",
       width:"100%",
       height:"30px", 
       borderRadius:"10px",
       position : "relative",
       overflow:"hidden"
   }} >
               <div id="testBgColor" style={barStyle} >
                    <p style={{ margin: "auto", position: "absolute", left: "50%", top: "50%", transform: "translate(-50%,-50%)", fontWeight: "bold" }}>{barWidth+"%"}</p> 
               </div>                
            </div>   
            <div >
               <button style={{margin:"2%"}} onClick={decreaseWidth}>-10% 
               </button>
               <button style={{margin:"1%"}} onClick={increaseWidth}>+10% 
               </button>
            </div>
        </div>
    );
}

export default ProgressBar;

Swiper.js not autoplaying and slides disappearing during transition in React (Vite + Swiper 11)

I’m using Swiper.js v11 with React and Vite, and I’m encountering two issues:

  1. Items overflow from swiper-wrapper at large screens.
  2. During swiping (manual or autoplay), the in-active slide briefly disappears or causes layout flickers.

Problem observations:

  1. During swipe transitions, the in-active slide disappears momentarily.
  2. I noticed inline styles like style=”width: 547.405px; margin-right: 25.392px;” being added, even though I haven’t defined them in CSS.

Any insights or corrections to my setup?
Thanks in advance!

slide image before
slide image after

ReviewSwiper

import { useEffect, useRef } from 'react';
import Swiper from 'swiper';
import 'swiper/css';

function calculateSpaceBetween() {
    const width = window.innerWidth;
    if (width >= 1440) return Math.max(20, Math.min((width * 1.38) / 100, 30));
    else if (width >= 768) return Math.max(10, Math.min((width * 1.3) / 100, 20));
    else return 10;
}

export default function ReviewSwiper({ children }: { children: React.ReactNode }) {
    const swiperRef = useRef<HTMLDivElement | null>(null);

    useEffect(() => {
        if (!swiperRef.current) return;

        const swiperInstance = new Swiper(swiperRef.current, {
            loop: true,
            slidesPerView: 'auto',
            spaceBetween: calculateSpaceBetween(),
            pagination: {
                el: '.review-swiper-pagination',
                clickable: true,
            },
            navigation: {
                nextEl: '.review-swiper-button-next',
                prevEl: '.review-swiper-button-prev',
            },
            scrollbar: {
                el: '.review-swiper-scrollbar',
            },
        });

        const resizeHandler = () => {
            swiperInstance.params.spaceBetween = calculateSpaceBetween();
            swiperInstance.update();
        };

        window.addEventListener('resize', resizeHandler);
        return () => {
            window.removeEventListener('resize', resizeHandler);
            swiperInstance.destroy(true, true);
        };
    }, []);

    return (
        <div className="swiper review-swiper" ref={swiperRef}>
            {children}
        </div>
    );
}

Usage

<ReviewSwiper>
   <div className="swiper-wrapper rss-review__review-items-container">
      {items.map((item, index) => (
         <div className="swiper-slide rss-review__review-items-container__item" key={index}>
            <div className="rss-review__review-items-container__item__header-container">
               <div className="rss-review__review-items-container__item__header-container__info-container">
                  <h6 className="rss-review__review-items-container__item__header-container__info-container__title font-16-20">{item.name}</h6>
                  <p className="rss-review__review-items-container__item__header-container__info-container__slug font-14-18">{item.company}</p>
               </div>
               <div className="rss-review__review-items-container__item__header-container__profile-container">
                  <div className="rss-review__review-items-container__item__header-container__profile-container__item-container">
                     <a><img src='someimage' className="rss-review__review-items-container__item__header-container__profile-container__item-container__item" /></a>
                  </div>
                  <!-- another item -->
                  <!-- another item -->
               </div>
            </div>
            <p className="rss-review__review-items-container__item__content font-16-18">{item.review}</p>
         </div>
      ))}
   </div>
</ReviewSwiper>

css

.swiper {
    &-slide { width: auto; height: auto !important; }
    &-button-prev, &-button-next {
        color: transparent;
        top: unset;
        left: unset;
        right: unset;
        bottom: unset;
        transform: unset;
        margin: unset;
        z-index: unset;
    }
    &-button-prev { margin-right: clamp(12px, 0.83vw, 16px); }
    &-items-wrapper { @include screens.max-md { display: flex; justify-content: center; }}
}
&__review-items-container {
    display: flex;
    justify-content: space-between;
    align-items: stretch;
    margin-top: clamp(24px, 6vw, 30px);
    overflow: hidden;
    @include screens.min-smd {
        gap: clamp(10px, 1.3vw, 20px);
    }
    @include screens.min-md {
        gap: clamp(20px, 1.38vw, 30px);
    }

    &__item {
        display: flex !important;
        flex-direction: column;
        flex: 0 0 100%;
        gap: clamp(24px, 1.5vw, 30px);
        background-color: var(--color-peach-96);
        border-radius: 10px;
        padding: clamp(40px, 2.5vw, 50px);
        gap: clamp(24px, 1.5vw, 30px);

        @include screens.min-smd {
            text-align: left;
            flex: 0 0 calc((100% - 1.3vw) / 2);
        }
        @include screens.min-md {
            flex: 0 0 calc((100% - 2.76vw) / 3);
        }

        &__header-container {
            display: flex;
            align-items: center;
            justify-content: space-between;

            &__info-container {
                text-align: left;
                &__title {
                    font-weight: 500;
                    color: var(--color-grey-20);
                }

                &__slug {
                    font-weight: 400;
                    color: var(--color-grey-40);
                }
            }

            &__profile-container {
                display: flex;
                align-items: center;
                gap: clamp(8px, 0.5vw, 10px);

                &__item-container {
                    display: grid;
                    place-items: center;
                    border: 1px solid var(--color-peach-90);
                    border-radius: 8px;
                    width: clamp(44px, 3vw, 52px);
                    height: clamp(44px, 3vw, 52px);

                    &__item {
                        color: var(--color-purple-50);
                        width: clamp(24px, 1.5vw, 28px);
                        height: clamp(24px, 1.5vw, 28px);
                    }
                }
            }
        }

        &__content {
            text-align: left;
            flex: 1;
            font-weight: 400;
            color: var(--color-grey-30);
        }
    }
}

How to create Free AI Headshot Generator? [closed]

I wanna to create https://gpt-image.dev/

I’m working on building a free, self-hosted AI headshot generator—a web service that takes a user’s photo (or even just a text prompt) and produces a polished, studio-style headshot. I’m looking for guidance on which open-source models, libraries, and deployment approaches make this feasible without relying on paid APIs. Any pointers to example code or projects to fork would be hugely appreciated.

Background

  • Goal: Allow users to upload a photo (or optionally enter a prompt) and receive a high-quality headshot in real time.
  • Constraints:
    • Must use only free/open-source components (no paid inference APIs).
    • Should run on a single GPU server (e.g., an AWS/GCP/Hetzner instance).
    • Prefer a simple Python-based web backend (Flask, FastAPI, etc.).

What I’ve Tried

  1. Stable Diffusion + DreamBooth:
    • I fine-tuned a Stable Diffusion v1.5 checkpoint on a small dataset of portraits following the DreamBooth tutorial.
    • The results are okay, but they often hallucinate backgrounds or distort facial features.
  2. ControlNet with pose/face modules:
    • I experimented with ControlNet’s face landmarks conditioning to preserve facial structure.
    • It improves consistency but still doesn’t guarantee studio lighting or background removal.
  3. Hugging Face Inference API:
    • Tested the “stable-diffusion-inpainting” and “face-restoration” endpoints—great quality, but they are rate-limited and not free for production.
  4. OpenCV + dlib for preprocessing:
    • I can detect and align faces reliably, but integrating this with the model pipeline in real time is proving tricky.

Environment

  • Programming Language: Python 3.10
  • Frameworks: PyTorch, Hugging Face Diffusers, OpenCV
  • Deployment Target: Ubuntu 22.04, single NVIDIA T4 GPU
  • Web Server: FastAPI + Uvicorn

What I’m Looking For

  • Model recommendations: Are there specialized open models (e.g. pre-trained portrait-style checkpoints or LoRA adapters) that excel at headshot generation?
  • Pipeline examples: Sample code or GitHub repos demonstrating a full upload → preprocess → inference → postprocess → return workflow.
  • Real-time considerations: Tips for batching, GPU memory management, or quantization to keep latency under ~5 seconds per image.
  • Background/lighting control: Advice on achieving consistent studio backgrounds (white/gray) and professional lighting effects.

Any pointers to tutorials, code snippets, libraries, or open projects would be incredibly helpful. Thank you in advance!

Does V8 optimize inner functions based on closure values?

For example:

function makeFunc(a,b,c,d,e) {
    return () => {
        if (a) { /* do something expensive not referencing b,c,d,e */ }
        if (b) { /* do something expensive not referencing a,c,d,e */ }
        if (c) { /* do something expensive not referencing a,b,d,e */ }
        if (d) { /* do something expensive not referencing a,b,c,e */ }
        if (e) { /* do something expensive not referencing a,b,c,d */ }
    }
}

const func = makeFunc(true, false, false, false, false)
for ( let i=0; i < 100_000; i++) func()

I’m hoping that, in the example, V8 would optimize away the if (x) tests on the closure, emitting just the

/* do something expensive not referencing b,c,d,e */

after if (a) to be performed 100,000 times.

URL metadata preview not working with react router

I have a react.js website which uses the react router plugin to direct different component files for different routes, as seen below in my App.js file:

import React from 'react';
import {
  BrowserRouter as Router,
  Routes,
  Route
} from "react-router-dom";

import Sidebar from "./Components/Sidebar/Sidebar.js"
import Home from "./Components/Home/Home.js"
import Discogs2Youtube from './Components/Discogs2Youtube/Discogs2Youtube';

function App() {
  const homeIconPath = "./ico/martinbarker.ico";

  return (
    <Router>
      <Routes>
        <Route
          path="/"
          element={
            <Sidebar
              pageTitle="Martin Barker"
              icon={homeIconPath}
            >
              <Home />
            </Sidebar>
          }
        />

        <Route
          path="/discogs2youtube"
          element={
            <Sidebar
              pageTitle="Discogs2Youtube"
              pageSubTitle="Extract all YouTube videos from a Discogs artist/label/list."
              icon={homeIconPath}
            >
              <Discogs2Youtube />
            </Sidebar>
          }
        />

      </Routes>
    </Router>
  );
}

export default App;

I want to have custom tag previews for each route when I share the URL on twitter, discord, etc.. I am trying to do this with the react-helmet-async method like so:

import React, { useState, useEffect, useRef } from 'react';
import { Helmet } from 'react-helmet-async';
import 'react-lite-youtube-embed/dist/LiteYouTubeEmbed.css';
import styles from './Discogs2Youtube.module.css';

function Discogs2Youtube() {
    return (
        <>
            <Helmet>
                <title>Discogs2YouTube</title>
                <meta name="description" content="Extract youtube links from discogs " />

                {/* Google / Search Engine Tags */}
                <meta itemprop="name" content="Discogs2YouTube" />
                <meta itemprop="description" content="Extract youtube links from discogs " />
                <meta itemprop="image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/f/fe/Discogs-logo-billboard-1548-1092x722.jpg/330px-Discogs-logo-billboard-1548-1092x722.jpg" />

                {/* Facebook Meta Tags */}
                <meta property="og:url" content="https://jermasearch.com/discogs2youtube" />
                <meta property="og:type" content="website" />
                <meta property="og:title" content="Discogs2YouTube" />
                <meta property="og:description" content="Extract youtube links from discogs " />
                <meta property="og:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/f/fe/Discogs-logo-billboard-1548-1092x722.jpg/330px-Discogs-logo-billboard-1548-1092x722.jpg" />

                {/* Twitter Meta Tags */}
                <meta name="twitter:card" content="summary_large_image" />
                <meta name="twitter:title" content="Discogs2YouTube" />
                <meta name="twitter:description" content="Extract youtube links from discogs " />
                <meta name="twitter:image" content="https://upload.wikimedia.org/wikipedia/commons/thumb/f/fe/Discogs-logo-billboard-1548-1092x722.jpg/330px-Discogs-logo-billboard-1548-1092x722.jpg" />
            </Helmet>
            <div className={styles.container}>
                Page Content
            </div>
        </>
    );
}

export default Discogs2Youtube;

But when I test my production URL “https://jermasearch.com/discogs2youtube” in a website such as “https://www.heymeta.com/results?url=https://jermasearch.com/discogs2youtube”, I can see there is no social media card preview for the URL:
enter image description here

I think the meta tags are coming from my index.html file which is empty as seen below in react:

<!DOCTYPE html>
<html lang="en">

<head>

  <!-- Enable cross-origin isolation -->
  <meta http-equiv="Cross-Origin-Embedder-Policy" content="require-corp" />
  <meta http-equiv="Cross-Origin-Opener-Policy" content="same-origin" />

</head>

<body>
  <div id="root"></div>
</body>

</html>

How can I add unique social media URL preview details for each individual route?
My code is located at: https://github.com/MartinBarker/aws-react-docker-ghactions
Thanks

Why is a SPAN element destroying Model properties in ASP.NET MVC 5?

In my model ConfirmationDetail, there is a List<ConfirmationNote> Notes property:

public class ConfirmationDetail
{
    public int OrderNumber { get; set; }
    public string SerialNumber { get; set; }
    public DateTime OrderDate { get; set; }
    public string Status { get; set; }
    public List<ConfirmationNote> Notes { get; set; } = new List<ConfirmationNote>();
    public bool OrderConfirmed { get; set; }
}

ConfirmationNote is defined here:

public class ConfirmationNote
{
    public DateTime Date { get; set; }
    public string Text { get; set; }
    public string EnteredBy { get; set; }
    public string DealerEmail1 { get; set; }
    public string DealerEmail2 { get; set; }
    public bool excludeEmail { get; set; }
}

Below is a snippet of the View:

<div class="submitForm form-group">
    <div class="submitFormInner">
        <div>
            @Html.HiddenFor(m => m.Detail.OrderNumber)
            @Html.HiddenFor(m => m.Detail.SerialNumber)
            @Html.HiddenFor(m => m.Detail.OrderDate)
            @Html.HiddenFor(m => m.Status)

            @Html.LabelFor(m => m.Note.Text, "NOTE TO SALES:")
            @Html.TextAreaFor(m => m.Note.Text, 4, 50,
            new
            {
                id = "NoteText",
                name = "txtNoteText",
                maxlength = 245,
                placeholder = "245 character limit.",
                @class = "text-danger form-control",
                onkeyup = "showRemaining(NoteText, spanNoteText, 245)"
            })
            <span id="spanNoteText" style="display: block;" />
            @if (Model.Note.DealerEmail1 != null)
            {
                <br />
                @Html.LabelFor(m => m.Note.DealerEmail1, "Email 1: ")
                @Html.EditorFor(m => m.Note.DealerEmail1)
                <br />
                @Html.LabelFor(m => m.Note.DealerEmail2, "Email 2: ")
                @Html.EditorFor(m => m.Note.DealerEmail2)
            }
        </div>
        <div>
            <input type="submit" value="Send Note" class="btn btn-default" />
        </div>
    </div>
</div>

I was having issues with users pasting too much text into a Note, so I created this JavaScript in the View to display how many characters were left.

<script>
    function showRemaining(itemX, statusX, maxchar) {
        const len = itemX.value.length;
        let number = 0;
        if (0 < len) {
            number = maxchar - len;
        } else {
            number = maxchar;
        }
        statusX.textContent = 'Remaining: ' + number; // problem line?
    }
</script>

In the last line of the JavaScript (with “problem line?”), If I let it write the text to statusX.textContent, then the email addresses will be NULL whenever I submit the model to the Controller. The Note.Text is not NULL.

I can comment out “problem line?” and the email addresses will still exist in the Controller.

The JavaScript added at “problem line?” was for a ticket that I had earlier this year that is already closed, so I need it to be part of the code.

What am I doing wrong, and how do I fix it? I want to display the number of characters left, but I don’t want to delete the salesman’s email addresses either.

If I need to add Controller code, just ask. I don’t think it is necessary, though.

Tampermonkey: removing blank rows left after deleting table content

I’m writing a Tampermonkey script that removes rows from a table on RateYourMusic voting pages if the descriptor is ‘scary’, ‘disturbing’, or ‘macabre’. That part works — but the blank rows that remain (empty green blocks) won’t go away.

Here’s how it looks like:

enter image description here

(I should say that I barely know any JavaScript, and I’ve been struggling with this problem for a while using ChatGPT to help.)

document.querySelectorAll('td > div:first-child').forEach(div => {
  const descriptor = div.textContent.trim().toLowerCase();
  if (['scary', 'disturbing', 'macabre'].includes(descriptor)) {
    const tr = div.closest('tr');
    if (tr) {
      console.log('remove', tr.innerHTML)
      tr.remove(); // this works!
    }
  }
});

document.querySelectorAll('tr').forEach(tr => {
  const text = tr.textContent.replace(/s|u200B|u00A0/g, '');
  if (text === '' && tr.offsetHeight > 30) {
    console.log('remove empty',tr.innerHTML)
    tr.remove(); // this *doesn't* work reliably
  }
});
td {
  border: 1px solid black
}
<table>
  <tbody>
    <tr>
      <td colspan="2">
        <div>macabre</div>
      </td>
    </tr>
    <tr>
      <td colspan="2">&nbsp;</td>
    </tr>
  </tbody>
</table>

The second part is meant to clean up leftover ghost rows — visually tall trs with no content — but they’re still showing up. I’ve tried using .textContent, .innerText, and different height thresholds. I also confirmed in DevTools that the remaining rows really are trs, sometimes just containing nbsp;.

Here’s what one of them looks like in DevTools:

<tr>
  <td colspan="2">&nbsp;</td>
</tr>

How can I reliably detect and remove these “ghost” rows?

Why Can This Not Write To Directory ‘Output’?

I am attempting to write a file to an existing directory, using node, path, and fs.

How it’s supposed to work:

  1. Init mock data.
  2. Loop through mock data.
  3. Write mock string to existing directory ‘output’
  4. End program.

How it’s working:

  1. Init mock data.
  2. Loop through mock data.
  3. Attempt to write to existing directory.
  4. Yield error:

Error:

throw new Error(`Error writing: ${err.message}`);
                                ^

Error: Error writing: ENOENT: no such file or directory, open 'C:Usersusernametestcheeriooutput55-207-0-228_2025-04-29_15:27:51.txt'
    at C:UsersusernametestcheeriocomponentsWriteFile.js:31:11
    at node:fs:2385:7
    at FSReqCallback.oncomplete (node:fs:188:23)

Repository

I’m working off this repository. The function handling node:fs writefile is found at /component/WriteFile.js; it is being invoked here, on these lines..

Project Tree

This is the project structure:

project-root/
├── components/             
├── node_modules/            
├── output/                  // Target for file write. 
├── .gitignore               
├── index.js               
├── LICENSE                 
├── package-lock.json        
├── package.json            
└── README.md                

WriteFile Snippet

Posting relevant code here for convenience.
WriteFile.js

const fs = require('node:fs');
const path = require('path');

const makeFile = async (fileName, { contentString, ip }) => {
    const now = new Date();
    const dateString =
        now.getFullYear() +
        '-' +
        String(now.getMonth() + 1).padStart(2, '0') +
        '-' +
        String(now.getDate()).padStart(2, '0') +
        '_' +
        String(now.getHours()).padStart(2, '0') +
        ':' +
        String(now.getMinutes()).padStart(2, '0') +
        ':' +
        String(now.getSeconds()).padStart(2, '0');

    contentString = `DATE: ${dateString}nFor ip: ${ip}n${contentString}`;

    const filepath = path.join(
        __dirname,
        '..',
        'output',
        `${fileName}_${dateString}.txt`
    );

    try {
        await fs.writeFile(filepath, contentString, 'utf16le', (err) => {
            if (err) {
                throw new Error(`Error writing: ${err.message}`);
            }
        });
        return 'Success';
    } catch (error) {
        console.error('nError:n', error.message, 'n');
    } finally {
        // Code that will run regardless of try/catch result
        // Remember, don't have a return in finally.
        console.log('Final completed.');
    }
};

module.exports = { makeFile };

Invoked At:

Which is being called at:

async function main() {
    let start = performance.now();
    let elapse;
    i = 0;
    for (const ip of ipList) {
        i++;
        if (i > 3) {
            break;
        }
        const sleep = (ms) =>
            new Promise((resolve) => {
                setTimeout(resolve, ms);
            });
        await sleep(500);

        await makeFile(ip.replaceAll('.', '-'), {
            contentString: 'Mockdata',
            ip: ip
        });
    }
    elapse = performance.now() - start;
    console.log('Total time elapsed: ', elapse / 1000);
}

how can I extract audio from video mp4 for macOS

I’m building a React JS app where I need to extract audio from a video file and convert it into a Blob with MIME type ‘audio/wav’. The extracted Blob will later be uploaded to Ai like chahgpt for Speech-to-Text.my this tow function working on web and desktop windows application but it problem on only macOS. thanks in advance.


    const extractAudioFromVideoFallback = async (videoFile: File): Promise<Blob | null> => {
    const audioContext = new AudioContext();

    try {
        const arrayBuffer = await videoFile.arrayBuffer();
        const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);

        const offlineCtx = new OfflineAudioContext({
            numberOfChannels: audioBuffer.numberOfChannels,
            length: audioBuffer.length,
            sampleRate: audioBuffer.sampleRate,
        });

        const source = offlineCtx.createBufferSource();
        source.buffer = audioBuffer;
        source.connect(offlineCtx.destination);
        source.start();

        const renderedBuffer = await offlineCtx.startRendering();
        return audioBufferToWavBlob(renderedBuffer);
    } catch (err) {
        console.warn("Decoding failed or no audio track found:", err);
        return null;
    }
};

const audioBufferToWavBlob = (audioBuffer: AudioBuffer): Blob => {
    const numOfChan = audioBuffer.numberOfChannels;
    const length = audioBuffer.length * numOfChan * 2 + 44;
    const buffer = new ArrayBuffer(length);
    const view = new DataView(buffer);

    const writeString = (view: DataView, offset: number, string: string) => {
        for (let i = 0; i < string.length; i++) {
            view.setUint8(offset + i, string.charCodeAt(i));
        }
    };

    let offset = 0;
    writeString(view, offset, 'RIFF'); offset += 4;
    view.setUint32(offset, 36 + audioBuffer.length * numOfChan * 2, true); offset += 4;
    writeString(view, offset, 'WAVE'); offset += 4;
    writeString(view, offset, 'fmt '); offset += 4;
    view.setUint32(offset, 16, true); offset += 4;
    view.setUint16(offset, 1, true); offset += 2;
    view.setUint16(offset, numOfChan, true); offset += 2;
    view.setUint32(offset, audioBuffer.sampleRate, true); offset += 4;
    view.setUint32(offset, audioBuffer.sampleRate * numOfChan * 2, true); offset += 4;
    view.setUint16(offset, numOfChan * 2, true); offset += 2;
    view.setUint16(offset, 16, true); offset += 2;
    writeString(view, offset, 'data'); offset += 4;
    view.setUint32(offset, audioBuffer.length * numOfChan * 2, true); offset += 4;

    let interleaved = new Int16Array(audioBuffer.length * numOfChan);
    for (let i = 0; i < audioBuffer.length; i++) {
        for (let channel = 0; channel < numOfChan; channel++) {
            const sample = audioBuffer.getChannelData(channel)[i];
            const s = Math.max(-1, Math.min(1, sample));
            interleaved[i * numOfChan + channel] = s < 0 ? s * 0x8000 : s * 0x7FFF;
        }
    }

    let index = 44;
    for (let i = 0; i < interleaved.length; i++, index += 2) {
        view.setInt16(index, interleaved[i], true);
    }

    return new Blob([buffer], { type: 'audio/wav' });
};


Hide Non-Visible Slides + Make Responsive

I have a few things that I am trying to do with SwiperJS. You can see on my site here: http://newgl.greenlegion.com

ISSUE 1: i cannot figure out a way to see more of the slide on mobile. It’s tough since the text on the slide gets bigger and you do not want it to run off the slide. Needs a min-height of the view port for the div of the slide. Shocked this is not responsive.

ISSUE 2: I am trying to hide the non-visible slides since instead of transitioning larger, I want them smaller on a timeline. The slides in the background are larger so you see them.

I tried to “hide” and “show” with d-none based on if there is a class of swiper-slide-visible is present, but does not seem to work (or I am calling the add/removeClass wrong).

    const targetElement = $(".swiper-slide");

    function checkAndHandleClass() {
        if (targetElement.hasClass("swiper-slide-visible")) {
            console.log("Element has 'swiper-slide-visible'");
            targetElement.removeClass("d-none");  //DOES NOT WORK
        } else {
            console.log("Element does not have 'swiper-slide-visible'");
            targetElement.addClass("d-none"); //DOES NOT WORK
        }
    }

    // Initial check on page load
    checkAndHandleClass();

    // Mutation Observer for monitoring class changes
    const observer = new MutationObserver(function(mutations) {
        mutations.forEach(function(mutation) {
            if (mutation.attributeName === "class") {
                checkAndHandleClass();
            }
        });
    });

    observer.observe(targetElement[0], {
        attributes: true,
        attributeFilter: ["class"]
    });