Testing MUI components that change with screen size

I have an MUI Drawer component that is given different props based on the size of the window — above a certain window width, it becomes a permanent drawer and docks to the left side of the screen, and below a certain width it becomes a temporary drawer that is hidden until a button is pressed to reveal it. Here is the full code for this component:

import React, { useContext, useState } from 'react';
import { Box, Divider, Drawer, List, Toolbar } from '@mui/material';
import ArchiveIcon from '@mui/icons-material/Archive';
import DeleteIcon from '@mui/icons-material/Delete';
import { useTheme } from '@mui/material/styles';
import useMediaQuery from '@mui/material/useMediaQuery';

import AppDataContext from '../../SupportingModules/AppDataContext';
import FilterCheckbox from './FilterCheckbox';
import TagEditor from './TagEditor';
import FeatureButton from './FeatureButton';
import DrawerHeader from './DrawerHeader';


/**
 * TagDrawer contains checkboxes to select tags for image filtering.
 * 
 * @returns The TagDrawer component to be rendered in the app.
 */
export default function TagDrawer() {
    const {appData, appState} = useContext(AppDataContext);
    const [editTags, setEditTags] = useState(false);
    const theme = useTheme();
    const smallScreen = useMediaQuery(theme.breakpoints.down('md'));

    const drawerStyles = (style) => {
        const widthChooser = style=='permanent' ? 240 : "100%"
        return {
            backgroundColor: '#666666',
            width: widthChooser,
            flexShrink: 0,
            ['& .MuiDrawer-paper']:
                { width: widthChooser, backgroundColor: '#aaaaaa' },
            marginRight: '0.5%',
        };
    };

    const handleArchive = () => {
        // Not yet implemented
        console.log("Archive button clicked!");
    };

    const handleTrash = () => {
        // Not yet implemented
        console.log("Trash button clicked!");
    };

    const handleEdit = () => {
        // Toggle edit mode
        setEditTags(!editTags);
    };


    const permanentDrawerProps = {
        variant: 'permanent',
        sx: [drawerStyles('permanent'), {display: { xs: 'none', md: 'block' }}],
        'data-testid': 'permanent-drawer'
    };

    const temporaryDrawerProps = {
        variant: 'temporary',
        open: appState.drawerOpen,
        ModalProps: {keepMounted: true}, // improves performance
        sx: [drawerStyles('temporary'), {display: { xs: 'block', md: 'none' }}],
        'data-testid': 'temporary-drawer'
    };
    
    return (
        <Drawer {...(smallScreen ? temporaryDrawerProps : permanentDrawerProps)}>
            <Toolbar/>
            <Box sx={{ overflow: 'auto' }}>
                <DrawerHeader handleEdit={handleEdit}/>
                <List> {/* List tags available for filtering */}
                        {(appData.tagData.map((tag) => {
                            const tagId = tag.id;
                            const tagName = tag.name;
                            return(
                                editTags ?
                                
                                <TagEditor
                                text={tagName}
                                key={tagId}
                                tagId={tagId}/>
                                :
                                <FilterCheckbox
                                text={tagName}
                                key={tagId}
                                tagId={tagId}/>
                            )
                        }))}
                </List>
                <Divider/>
                <List> {/* Misc non-tag/filter options */}
                    <FeatureButton
                        name="Archive"
                        startIcon={<ArchiveIcon/>}
                        action={handleArchive}
                        active={false}/>
                    <FeatureButton
                        name="Trash"
                        startIcon={<DeleteIcon/>}
                        action={handleTrash}
                        active={false}/>
                </List>
            </Box>
        </Drawer>
    );
};

I’m trying to test this component, including that it responds appropriately to changes in window size. This has proven to be a little complicated, as changing the screen size hasn’t been very straightforward, and jsdom doesn’t support certain aspects of the window, and MUI’s documentation is absolutely abysmal in all things, testing in particular. Here’s what I’m working with so far:

import React from 'react';
import { render, screen, fireEvent } from '@testing-library/react';
import '@testing-library/jest-dom';

import AppDataContext from '../../SupportingModules/AppDataContext';
import TagDrawer from './TagDrawer';

describe('TagDrawer', () => {
    let appData;
    let appState;
    let component;
    beforeEach(() => {
        appData = {
            imageData: [], // not used in this component
            tagData: [
                {'id': '10', 'name': 'Favorites', 'owner': '1'},
                {'id': '12', 'name': 'Star Wars', 'owner': '1'},
                {'id': '13', 'name': 'Dad Jokes', 'owner': '1'}
            ],
            imageTagData: [] // not used in this component
        };
        appState = { drawerOpen: false };

        component = render(
            <AppDataContext.Provider value={{appData, appState}}>
                <TagDrawer/>
            </AppDataContext.Provider>
        )
    });
    
    test('shows permanent drawer on large screen', () => {
        // I know how to do this one but only because it renders large by default
    });

    test('shows temporary drawer on small screen', () => {
        Object.defineProperty(window, 'innerWidth', {
            writable: true,
            configurable: true,
            value: 150,
        });

        window.dispatchEvent(new Event('resize'));

        expect(screen.getByTestId('temporary-drawer')).toBeInTheDocument();
    });
    
    test('temporary drawer opens and closes', () => {
        // gotta figure out the above test first
    });

    test('correctly renders FilterCheckbox components', () => {
        const checkboxes = screen.getAllByRole('checkbox');
        expect(checkboxes.length).toEqual(appData.tagData.length);
    });

    test('correctly renders TagEditor components', () => {
        const editButton = screen.getByRole('switch');
        fireEvent(
            editButton,
            new MouseEvent('click', {
                bubbles: true,
                cancelable: true
            })
        );

        const editors = screen.getAllByRole('textbox');
        expect(editors.length).toEqual(appData.tagData.length);
    });
});

Currently the only failing test is the one where I am checking that testId ‘temporary-drawer’ is in the document.

I have read MUI’s documentation on testing generally and on testing components using the useMediaQuery hook, but the two combined amount to less than four paragraphs and a useless, unexplained example, and have not been helpful to me in understanding what I need to do here (if you’d like details on what’s confusing I’ll provide in comments). I understand that jsdom doesn’t implement the window.matchMedia that is required for this hook to work, and that somehow the css-mediaquery package is supposed to help with this, but I can’t find a full explanation anywhere, and in fact reading other sources just makes the issue more confusing, as everyone seems to have a totally different way to handle this every time I find a new source.

How can I get my component to render in a way that I can test it properly?

Issue with hardhat test

I am learning solidity and here i accountered with some test error, i was runing script test using hardhat npx hardhat test.
this error was produced

Instead change the require of chai.js in C:UsersuserDesktophardtprojecttestsample-test.js to a dynamic import() which is available in all CommonJS modules.
    at Object.<anonymous> (C:UsersuserDesktophardtprojecttestsample-test.js:2:20) {
  code: 'ERR_REQUIRE_ESM'
}

i am using node version 20 and in my wiew i was thinking it accept require keyword. what to do next?
test code was this:
const { assert } = require(“chai”);

// the describe scope encapsulates an entire test called TestModifyVariable
// the it says the behavior that should be expected from the test
describe(“TestModifyVar”, function () {
it(“should change x to 74”, async function () {

// this line creates an ethers ContractFactory abstraction: https://docs.ethers.org/v5/api/contract/contract-factory/
const ModifyVar = await ethers.getContractFactory("ModifyVar");

// we then use the ContractFactory object to deploy an instance of the contract

` const contract = await ModifyVar.deploy(500);

// wait for contract to be deployed and validated!
await contract.deployed();

// modify x from 10 to 1337 via this function!
await contract.setVar();
// getter for state variable x
const newX = await contract.variable();
assert.equal(newX.toNumber(), 74);

});
});`

i tried to add type module in package.json and it didnt work.

CSS to Vertical Center Text fails on Two Side by Side Containers Placed in Class by HTML

I have two containers that have header text that changes via JavaScript. The text describes files listed in containers below them. The containers are side by side horizontally on the page (left to right). I have encompassed the two Containers in a class to apply Vertical and Horizontal Centering to both of them with this html code:

HTML

`<div class="Container_Headers">
    <div = "Container_FileList_Header"></div>
    <div = "Container_FileDesc_Header"></div>
</div>

`

The following CSS in a separate .css file is applied to the class.

CSS

`#Container_Headers {
  display: inline-flex;
  align-items: center;
  justify-content: center;
}`



//Apply Classes via JavaScript
document.getElementById("Container_FileList_Header").setAttribute('class',"textstyle2     textstyle5 Container_Headers");
document.getElementById("Container_FileDesc_Header").setAttribute('class',"textstyle2 textstyle5 Container_Headers");

The CSS for the other textstyles is generated by RocketCake (the program I use to develop the website):
CSS

`.textstyle2 { font-size:12pt; font-family:Arial, Helvetica, sans-serif; color:#000000;  }
.textstyle5 { text-align:center; }`

This code works fine on a single container and centers the text both horizontally and vertically. It fails to vertically center the text on two side by side containers however. The text is aligned to the top but centered horizontally.

Any help is greatly appreciated.

How to detect facial feature points in photos on React Native?

I want to use the Android app to detect the facial feature points of the photos taken to calculate the vertical distance from the earlobe to the chin. But now the PhotoScreen screen only displays the photos taken and the message “Calculating the average vertical distance from earlobe to chin…”,that is my App.tsx:

import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react';
import {
  Platform,
  StyleProp,
  StyleSheet,
  useWindowDimensions,
  View,
  ViewStyle,
  Text,
  Image,
  BackHandler,
} from 'react-native';
import {
  Frame,
  useCameraDevices,
  useFrameProcessor,
} from 'react-native-vision-camera';
import {
  Dimensions,
  Face,
  faceBoundsAdjustToView,
  scanFaces,
  sortFormatsByResolution,
} from '@mat2718/vision-camera-face-detector';
import { runOnJS } from 'react-native-reanimated';
import { Camera } from 'react-native-vision-camera';
import Animated, {
  useSharedValue,
  withTiming,
  useAnimatedStyle,
} from 'react-native-reanimated';
import { createStackNavigator } from '@react-navigation/stack';
import { NavigationContainer } from '@react-navigation/native';
import { navigationRef, navigate } from './RootNavigation'; // 確保正確導入navigate
import ImageResizer from 'react-native-image-resizer';
import FaceDetection from '@react-native-ml-kit/face-detection';
import RNFS from 'react-native-fs';
import PhotoScreen from './PhotoScreen'; // 確保正確導入PhotoScreen

const focalLength = 2700; // 焦距(單位:像素)
const sensorHeight = 0.47; // 感光元件尺寸(單位:厘米)

const Stack = createStackNavigator();

const CameraScreen = () => {
  const [hasPermission, setHasPermission] = useState(false);
  const devices = useCameraDevices();
  const direction = 'front';
  const device = devices[direction];
  const camera = useRef(null);
  const [faces, setFaces] = useState([]);
  const { height: screenHeight, width: screenWidth } = useWindowDimensions();
  const landscapeMode = screenWidth > screenHeight;
  const [frameDimensions, setFrameDimensions] = useState();
  const [isActive, setIsActive] = useState(true);
  const [error, setError] = useState(null);
  const shouldTakePicture = useRef(false);
  const hasTakenPicture = useRef(false);
  const [photoPath, setPhotoPath] = useState(null);
  const countdown = useSharedValue(3);
  const countdownFinished = useSharedValue(false);
  const [isCountingDown, setIsCountingDown] = useState(false);
  const countdownText = useAnimatedStyle(() => ({
    opacity: countdown.value === 0 ? 0 : 1,
    transform: [{ scale: countdown.value === 0 ? 0 : 1 }],
  }));
  const distanceBuffer = useRef([]); // 用於保存距離值的緩衝區
  const BUFFER_SIZE = 5; // 緩衝區大小
  const frameCounter = useRef(0); // 初始化 frameCounter
  const [distance, setDistance] = useState(null);
  const [angle, setAngle] = useState(null);

  useEffect(() => {
    const backAction = () => {
      setPhotoPath(null);
      hasTakenPicture.current = false;
      distanceBuffer.current = [];
      setDistance(null);
      setAngle(null);
      return true;
    };

    const backHandler = BackHandler.addEventListener('hardwareBackPress', backAction);
    return () => backHandler.remove();
  }, []);

  useEffect(() => {
    return () => {
      setIsActive(false);
    };
  }, []);

  const formats = useMemo(() => device?.formats.sort(sortFormatsByResolution), [device?.formats]);
  const [format, setFormat] = useState(formats && formats.length > 0 ? formats[0] : undefined);

  const handleScan = useCallback((frame, newFaces) => {
    const isRotated = !landscapeMode;
    setFrameDimensions(
      isRotated
        ? {
            width: frame.height,
            height: frame.width,
          }
        : {
            width: frame.width,
            height: frame.height,
          },
    );
    setFaces(newFaces);
  }, [landscapeMode]);

  useEffect(() => {
    setFormat(formats && formats.length > 0 ? formats[0] : undefined);
  }, [device]);

  const frameProcessor = useFrameProcessor(
    frame => {
      'worklet';
      try {
        const scannedFaces = scanFaces(frame);
        runOnJS(handleScan)(frame, scannedFaces);
      } catch (e) {
        runOnJS(setError)(e.message);
      }
    },
    [handleScan],
  );

  useEffect(() => {
    (async () => {
      try {
        const status = await Camera.requestCameraPermission();
        setHasPermission(status === 'authorized');
      } catch (e) {
        setError(e.message);
      }
    })();
  }, []);

  const processImage = async (uri) => {
    console.log('Processing image:', uri);
    const fixedPath = `${RNFS.DocumentDirectoryPath}/fixed_photo.jpg`;
    await RNFS.copyFile(uri, fixedPath);
    const manipResult = await ImageResizer.createResizedImage(fixedPath, 800, 600, 'JPEG', 100);
    console.log('Resized image:', manipResult.uri);

    try {
      const faces = await FaceDetection.detect(manipResult.uri, { landmarkMode: 'all' });
      // console.log('Detected face landmarks:', faces);
      if (faces.length > 0) {
        // 這裡可以添加其他處理邏輯
      } else {
        console.log('No faces detected in the image');
      }
    } catch (error) {
      console.error('Error detecting face landmarks:', error);
    }
  };

  const takePicture = useCallback(async () => {
    if (camera.current) {
      const photo = await camera.current.takePhoto();
      console.log('Photo taken:', photo);
      setPhotoPath(photo.path);
      shouldTakePicture.current = false;
      hasTakenPicture.current = true;
      countdown.value = 0;
      setIsCountingDown(false);
      countdownFinished.value = false;
      processImage(photo.path);
      navigate('PhotoScreen', { imagePath: photo.path });
    }
  }, [camera]);

  useEffect(() => {
    if (shouldTakePicture.current && !isCountingDown) {
      setIsCountingDown(true);
      countdown.value = 3;
      countdown.value = withTiming(0, { duration: 3000 }, finished => {
        if (finished) {
          countdownFinished.value = true;
        }
      });
    }
  }, [shouldTakePicture.current, isCountingDown]);

  useEffect(() => {
    if (countdownFinished.value) {
      takePicture();
    }
  }, [countdownFinished.value, takePicture]);

  const styles = StyleSheet.create({
    boundingBox: {
      borderRadius: 5,
      borderWidth: 3,
      borderColor: 'yellow',
      position: 'absolute',
    },
    crossSectionContainer: {
      height: 15,
      width: 15,
      position: 'absolute',
      justifyContent: 'center',
      alignItems: 'center',
      top: screenHeight / 2,
      left: screenWidth / 2,
    },
    verticalCrossHair: {
      height: '100%',
      position: 'absolute',
      justifyContent: 'center',
      alignItems: 'center',
      borderColor: 'yellow',
      borderWidth: 1,
    },
    horizontalCrossHair: {
      width: '100%',
      position: 'absolute',
      justifyContent: 'center',
      alignItems: 'center',
      borderColor: 'yellow',
      borderWidth: 1,
    },
    photoPreview: {
      position: 'absolute',
      top: 0,
      left: 0,
      width: '100%',
      height: '100%',
      zIndex: 10,
    },
    distanceText: {
      position: 'absolute',
      top: 40,
      left: 0,
      right: 0,
      textAlign: 'center',
      color: 'white',
      backgroundColor: 'rgba(0, 0, 0, 0.5)',
      padding: 10,
      fontSize: 20,
      zIndex: 20,
    },
    angleText: {
      position: 'absolute',
      top: 80,
      left: 0,
      right: 0,
      textAlign: 'center',
      color: 'white',
      backgroundColor: 'rgba(0, 0, 0, 0.5)',
      padding: 10,
      fontSize: 20,
    },
    countdownText: {
      position: 'absolute',
      top: screenHeight / 2 - 50,
      left: 0,
      right: 0,
      textAlign: 'center',
      fontSize: 100,
      color: 'white',
      zIndex: 10,
    },
    photoDistanceText: {
      position: 'absolute',
      bottom: 40,
      left: 0,
      right: 0,
      textAlign: 'center',
      color: 'white',
      backgroundColor: 'rgba(0, 0, 0, 0.5)',
      padding: 10,
      fontSize: 20,
      zIndex: 20,
    },
  });

  const boundingStyle = useMemo(
    () => ({
      position: 'absolute',
      top: 0,
      left: 0,
      width: screenWidth,
      height: screenHeight,
    }),
    [screenWidth, screenHeight],
  );

  const calculateFaceDistance = (face) => {
    if (!face.bounds || face.bounds.width === undefined) {
      console.log('Face bounds or width is undefined');
      return null;
    }

    const focalLength = 2700;
    const realEyeDistance = 6.3;
    const eyeDistanceInPixels = face.bounds.width;

    const distance = (focalLength * realEyeDistance) / eyeDistanceInPixels;
    // console.log('Calculated distance (cm):', distance);
    return distance;
  };

  const calculateFaceAngle = (face) => {
    if (face.rollAngle === undefined) {
      return null;
    }
    return face.rollAngle;
  };

  const handleFaces = useCallback((newFaces) => {
    frameCounter.current += 1;

    if (newFaces.length > 0) {
      const calculatedDistance = calculateFaceDistance(newFaces[0]);
      const calculatedAngle = calculateFaceAngle(newFaces[0]);
      // console.log('Calculated distance (cm):', calculatedDistance);
      // console.log('Calculated angle (degrees):', calculatedAngle);

      if (calculatedDistance !== null) {
        setDistance(prevDistance => {
          if (calculatedDistance.toFixed(2) !== (prevDistance?.toFixed(2) || '')) {
            return calculatedDistance;
          }
          return prevDistance;
        });

        distanceBuffer.current.push(calculatedDistance);
        if (distanceBuffer.current.length > BUFFER_SIZE) {
          distanceBuffer.current.shift();
        }

        const avgDistance = distanceBuffer.current.reduce((a, b) => a + b, 0) / distanceBuffer.current.length;
        // console.log(`Average distance (cm): ${avgDistance}`);

        if (frameCounter.current % 5 === 0) {
          if (avgDistance >= 24 && avgDistance <= 26 && Math.abs(calculatedAngle) <= 3 && !shouldTakePicture.current && !hasTakenPicture.current) {
            shouldTakePicture.current = true;
          } else if (
            !(avgDistance >= 24 && avgDistance <= 26 && Math.abs(calculatedAngle) <= 3) &&
            isCountingDown
          ) {
            countdown.value = 3;
            setIsCountingDown(false);
            shouldTakePicture.current = false;
          }
        }
      }

      if (calculatedAngle !== null) {
        setAngle(calculatedAngle);
      }
    }
  }, [isCountingDown]);

  useEffect(() => {
    handleFaces(faces);
  }, [faces, handleFaces]);

  return device != null && hasPermission ? (
    <>
      {error && (
        <View style={{ position: 'absolute', top: 0, left: 0, right: 0, backgroundColor: 'red' }}>
          <Text style={{ color: 'white' }}>{error}</Text>
        </View>
      )}
      <Camera
        style={StyleSheet.absoluteFill}
        device={device}
        torch={'off'}
        isActive={isActive}
        ref={camera}
        photo={true}
        frameProcessor={frameProcessor}
        frameProcessorFps={30}
        audio={false}
        format={format}
      />
      <View style={styles.crossSectionContainer}>
        <View style={styles.verticalCrossHair} />
        <View style={styles.horizontalCrossHair} />
      </View>
      <View style={boundingStyle} testID="faceDetectionBoxView">
        {frameDimensions &&
          (() => {
            const mirrored = Platform.OS === 'android' && direction === 'front';
            const { adjustRect } = faceBoundsAdjustToView(
              frameDimensions,
              {
                width: screenWidth,
                height: screenHeight,
              },
              landscapeMode,
              50,
              50,
            );
            return faces
              ? faces.map((i, index) => {
                  const { left, ...others } = adjustRect(i.bounds);
                  return (
                    <View
                      key={index}
                      style={[
                        styles.boundingBox,
                        {
                          ...others,
                          [mirrored ? 'right' : 'left']: left,
                        },
                      ]}
                    />
                  );
                })
              : null;
          })()}
      </View>
      {distance !== null && (
        <Text style={styles.distanceText}>
          {`距離: ${distance.toFixed(2)} cm`}
        </Text>
      )}
      {angle !== null && (
        <Text style={styles.angleText}>
          {`角度: ${angle.toFixed(2)} 度`}
        </Text>
      )}
      {photoPath && (
        <>
          <Image source={{ uri: `file://${photoPath}` }} style={styles.photoPreview} />
          <Text style={styles.photoDistanceText}>照片已拍攝</Text>
        </>
      )}
      {isCountingDown && (
        <Animated.Text style={[styles.countdownText, countdownText]}>
          {countdown.value > 0 ? countdown.value.toFixed(0) : ''}
        </Animated.Text>
      )}
    </>
  ) : null;
};

const App = () => {
  return (
    <NavigationContainer ref={navigationRef}>
      <Stack.Navigator>
        <Stack.Screen name="Camera" component={CameraScreen} options={{ headerShown: false }} />
        <Stack.Screen name="PhotoScreen" component={PhotoScreen} />
      </Stack.Navigator>
    </NavigationContainer>
  );
};

export default App;

and this is my PhotoScreen.tsx:

import React, { useEffect, useState, useRef } from 'react';
import { View, Image, Text, StyleSheet, Platform, PermissionsAndroid, Alert } from 'react-native';
import RNFS from 'react-native-fs';
import { WebView } from 'react-native-webview';

export default function PhotoScreen({ route }) {
  const { imagePath } = route.params;
  const [distanceInCm, setDistanceInCm] = useState(null);
  const [htmlContent, setHtmlContent] = useState('');
  const [permissionsGranted, setPermissionsGranted] = useState(false);
  const [errorMessage, setErrorMessage] = useState('');
  const [isWebViewLoaded, setIsWebViewLoaded] = useState(false);
  const webViewRef = useRef(null);

  const onMessage = (event) => {
    console.log('Received message from WebView:', event.nativeEvent.data);
    try {
      const data = JSON.parse(event.nativeEvent.data);
      if (data.type === 'face_landmarks') {
        console.log('Received face landmarks data:', data);
        setDistanceInCm(data.averageDistanceCm);
      } else if (data.type === 'error') {
        console.error('Error from WebView:', data.message);
        setErrorMessage(data.message);
      } else if (data.type === 'log') {
        console.log('WebView log:', data.message);
      }
    } catch (error) {
      console.error('Error parsing WebView message:', error);
    }
  };

  useEffect(() => {
    const requestPermissions = async () => {
      if (Platform.OS === 'android') {
        try {
          const granted = await PermissionsAndroid.request(
            PermissionsAndroid.PERMISSIONS.READ_EXTERNAL_STORAGE,
            {
              title: 'Storage Permission',
              message: 'This app needs access to your storage to load images.',
              buttonNeutral: 'Ask Me Later',
              buttonNegative: 'Cancel',
              buttonPositive: 'OK',
            }
          );
          if (granted === PermissionsAndroid.RESULTS.GRANTED) {
            console.log('Storage permission granted');
            setPermissionsGranted(true);
          } else {
            console.log('Storage permission denied');
            setErrorMessage('存儲權限是使用此功能所必需的。');
          }
        } catch (err) {
          console.warn(err);
          setErrorMessage('請求權限時發生錯誤。');
        }
      } else {
        setPermissionsGranted(true);
      }
    };

    requestPermissions();
  }, []);

  useEffect(() => {
    const loadFaceMesh = async () => {
      try {
        const base64Image = await RNFS.readFile(imagePath, 'base64');
        const htmlContent = `
          <!DOCTYPE html>
          <html>
          <head>
            <meta name="viewport" content="width=device-width, initial-scale=1.0">
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script>
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-converter"></script>
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-webgl"></script>
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow/[email protected]"></script>
            <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/[email protected]"></script>
          </head>
          <body style="margin:0; padding:0;">
            <img id="image" style="display:none;"/>
            <canvas id="canvas" style="width:100%; height:100%;"></canvas>
            <script>
              "use strict";
              let model;
              import { FaceMesh } from 'https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh';
              const faceMesh = new FaceMesh();

              function sendMessage(type, data) {
                window.ReactNativeWebView.postMessage(JSON.stringify({ type, ...data }));
              }

              async function loadModel() {
                try {
                  await faceMesh.initialize();
                  sendMessage('log', { message: 'Face Mesh model loaded' });
                } catch (error) {
                  sendMessage('error', { message: 'Error loading model: ' + error.message });
                }
              }

              async function processImage() {
                try {
                  const image = document.getElementById('image');
                  const canvas = document.getElementById('canvas');
                  const ctx = canvas.getContext('2d');

                  sendMessage('log', { message: 'Image width: ' + image.width });
                  sendMessage('log', { message: 'Image height: ' + image.height });

                  canvas.width = image.width;
                  canvas.height = image.height;
                  ctx.drawImage(image, 0, 0);

                  const predictions = await faceMesh.estimateFaces({
                    input: canvas,
                  });

                  sendMessage('log', { message: 'Predictions: ' + JSON.stringify(predictions) });

                  if (predictions.length > 0) {
                    const landmarks = predictions[0].scaledMesh;
                    const leftEarLobe = landmarks[234];
                    const rightEarLobe = landmarks[454];
                    const chin = landmarks[152];
                    const leftDistance = Math.abs(leftEarLobe[1] - chin[1]);
                    const rightDistance = Math.abs(rightEarLobe[1] - chin[1]);
                    const averageDistance = (leftDistance + rightDistance) / 2;

                    sendMessage('log', { message: 'Average distance: ' + averageDistance });

                    const sensorHeight = 0.47;
                    const focalLength = 2700;
                    const cmPerPixel = sensorHeight / focalLength;
                    const averageDistanceCm = averageDistance * cmPerPixel;

                    sendMessage('log', { message: 'Average distance (cm): ' + averageDistanceCm });

                    sendMessage('face_landmarks', { 
                      landmarks: landmarks,
                      averageDistanceCm: averageDistanceCm
                    });
                  } else {
                    sendMessage('error', { message: 'No face detected' });
                  }
                } catch (error) {
                  sendMessage('error', { message: 'Image processing failed: ' + error.message });
                }
              }

              async function init() {
                try {
                  await loadModel();
                  image.onload = async () => {
                    try {
                      const predictions = await faceMesh.estimateFaces({
                        input: tf.browser.fromPixels(canvas),
                      });
                      sendMessage('log', { message: 'Predictions: ' + JSON.stringify(predictions) });          
                    } catch (error) {
                      sendMessage('error', { message: 'Error processing image: ' + error.message });
                    }
                  };
                  image.src = 'data:image/jpeg;base64,${base64Image}';
                } catch (error) {
                  sendMessage('error', { message: 'Error initializing face mesh: ' + error.message });
                }
              }

              init();
            </script>
          </body>
          </html>
        `;
        setHtmlContent(htmlContent);
      } catch (error) {
        console.error('Error loading image:', error);
        setErrorMessage('加載圖像時發生錯誤,請稍後再試。');
      }
    };

    if (permissionsGranted) {
      loadFaceMesh();
    }
  }, [imagePath, permissionsGranted]);

  return (
    <View style={styles.container}>
      <Image source={{ uri: `file://${imagePath}` }} style={styles.image} />
      {htmlContent !== '' && permissionsGranted && (
        <WebView
          ref={webViewRef}
          originWhitelist={['*']}
          source={{ html: htmlContent }}
          onMessage={onMessage}
          style={{ width: 1, height: 1, opacity: 0 }}
          javaScriptEnabled={true}
          domStorageEnabled={true}
          androidLayerType="software"
          androidHardwareAccelerationDisabled={true}
          onLoadEnd={() => {
            console.log('WebView loaded');
            setIsWebViewLoaded(true);
          }}
          onError={(syntheticEvent) => {
            const { nativeEvent } = syntheticEvent;
            console.error('WebView error: ', nativeEvent);
            setErrorMessage(`WebView 錯誤: ${nativeEvent.description}`);
          }}
        />
      )}
      {distanceInCm !== null ? (
        <Text style={styles.text}>耳垂到下巴的平均垂直距離: {distanceInCm.toFixed(2)} 厘米</Text>
      ) : errorMessage ? (
        <Text style={styles.errorText}>{errorMessage}</Text>
      ) : (
        <Text style={styles.text}>
          {isWebViewLoaded ? '正在計算耳垂到下巴的平均垂直距離...' : '正在加載模型...'}
        </Text>
      )}
    </View>
  );
}

const styles = StyleSheet.create({
  container: {
    flex: 1,
    alignItems: 'center',
    justifyContent: 'center',
    backgroundColor: '#f0f0f0',
  },
  image: {
    width: 300,
    height: 300,
    resizeMode: 'contain',
    marginBottom: 20,
  },
  text: {
    marginTop: 20,
    fontSize: 18,
    textAlign: 'center',
    padding: 10,
  },
  errorText: {
    marginTop: 20,
    fontSize: 18,
    textAlign: 'center',
    padding: 10,
    color: 'red',
  },
});

Main.tsx:

import React from 'react';
import { NavigationContainer } from '@react-navigation/native';
import { createStackNavigator } from '@react-navigation/stack';
import App from './App';
import PhotoScreen from './PhotoScreen';
import { navigationRef } from './RootNavigation';

const Stack = createStackNavigator();

export default function Main() {
  return (
    <NavigationContainer ref={navigationRef}>
      <Stack.Navigator initialRouteName="Camera">
        <Stack.Screen name="Camera" component={App} />
        <Stack.Screen name="PhotoScreen" component={PhotoScreen} />
      </Stack.Navigator>
    </NavigationContainer>
  );
}

RootNavigation.js:

import * as React from 'react';

export const navigationRef = React.createRef();

export function navigate(name, params) {
  navigationRef.current?.navigate(name, params);
}

package.json:

{
  "name": "vision-camera-face-detector-example",
  "description": "Example app for vision-camera-face-detector",
  "version": "0.0.1",
  "private": true,
  "scripts": {
    "android": "react-native run-android",
    "ios": "react-native run-ios",
    "start": "react-native start",
    "pods": "pod-install --quiet"
  },
  "dependencies": {
    "@babel/plugin-transform-class-properties": "^7.24.7",
    "@babel/plugin-transform-private-methods": "^7.24.7",
    "@babel/plugin-transform-private-property-in-object": "^7.24.7",
    "@react-native-ml-kit/face-detection": "^1.3.2",
    "@react-navigation/core": "^6.4.17",
    "@react-navigation/elements": "^1.3.31",
    "@react-navigation/native": "^6.1.18",
    "@react-navigation/stack": "^6.4.1",
    "react": "18.1.0",
    "react-native": "0.70.6",
    "react-native-fs": "^2.20.0",
    "react-native-gesture-handler": "^2.18.1",
    "react-native-image-resizer": "^1.4.5",
    "react-native-reanimated": "2.10.0",
    "react-native-safe-area-context": "^4.10.8",
    "react-native-vision-camera": "^2.15.2",
    "react-native-webview": "^13.10.5"
  },
  "devDependencies": {
    "@babel/core": "^7.25.2",
    "@babel/runtime": "^7.15.3",
    "@types/react": "^18.0.26",
    "@types/react-native": "^0.70.8",
    "babel-plugin-module-resolver": "^4.1.0",
    "eslint": "^7.32.0",
    "metro-react-native-babel-preset": "^0.66.2",
    "react-test-renderer": "18.1.0",
    "typescript": "^5.5.4"
  }
}

I want to calculate the average of the vertical distance from the left earlobe to the chin and the vertical distance from the right earlobe to the chin through facial feature points, and display it on the phone screen together with the photos taken. Can anyone tell me what is wrong with the program?Thanks!

I want to calculate the average of the vertical distance from the left earlobe to the chin and the vertical distance from the right earlobe to the chin through facial feature points, and display it on the phone screen together with the photos taken. Can anyone tell me what is wrong with the program?Thanks!

Cookies are not sent to dev server from localhost even I map it in hosts file

I have frontend application in http:// dev-app.company.local and backend in http:// dev-app.company.local/backend

Cookies are working in dev server, ‘cos they are in same domain. But in the localhost cookies are not sent, which is also understandable, domains are different.

How can I send cookies from localhost to my backend on http:// dev-app.company.local/backend ?

P.S: Set Cookies header with JS is not an option, the browser removes it

I tried to change hosts file in OS, mapped my localhost to http:// local.dev-app.company.local and http:// company.local, while backend stays the same. but cookies are not sent to the server

I have changed cookie domain to .dev-app.company.local, it did not work either

Setting Cookies header with JS is not an option, the browser removes it. Maybe there is a way to disable this behavior idk. It is for development.

How do i create a stomp server with Express

I’m trying to create a websocket communitcation between express server and react native application, the client side is documented very well.


import { Client } from '@stomp/stompjs';

import { WebSocket } from 'ws';
Object.assign(global, { WebSocket });

const client = new Client({
  brokerURL: 'ws://localhost:15674/ws',
  onConnect: () => {
    client.subscribe('/topic/test01', message =>
      console.log(`Received: ${message.body}`)
    );
    client.publish({ destination: '/topic/test01', body: 'First Message' });
  },
});

client.activate();

but I don’t fine any example on the server side, at least in express, to show how sending the message to the client should be set up. I saw examples in spring boot but not express js. what am i missing?

thanks

Chaining middlewares in Next js 14 for auth.js

I have a next js application which has multiple lingual support. So I have a middleware to redirect users to URL with the locale. I want to implement auth js in this application.

export function middleware(request: any) {
  const pathname = request.nextUrl.pathname;

  const pathnameIsMissingLocale = locales.every(
    (locale) => !pathname.startsWith(`/${locale}/`) && pathname !== `/${locale}`
  );

  if (pathnameIsMissingLocale) {
    const locale = getLocale(request);

    return NextResponse.redirect(
      new URL(`/${locale}/${pathname}`, request.url)
    );
  }
}

export const config = {
  matcher: [
    // Skip all internal paths (_next, assets, api)
    //"/((?!api|assets|.*\..*|_next).*)",
    "/((?!api|assets|docs|.*\..*|_next).*)",
    // Optional: only run on root (/) URL
  ],
};

In this application, I want to implement authentication using auth.js v5. According to the docs, I have to add the below snippet in the middleware without any matcher.

import { auth as middleware } from "@/lib/auth";

How to resolve this issue? TIA

Renaming one of the functions doesn’t help as it has to be middleware for it work.

Google Charts Treemap Navigation not working on iOS?

I’m using Google Charts and it all seems to work generally fine on Desktop – specifically I can single-click down the tree, and right-click up the three.

However on iOS (don’t have an Android to hand) I can navigate ‘down’ the tree on but I don’t seem to be able to go back ‘up’ the tree.

Am I missing something obvious?

The documentation (https://developers.google.com/chart/interactive/docs/gallery/treemap) suggests the following options are available:

'click', 'contextmenu', 'dblclick', 'mouseout', 'mouseover'. With 'contextmenu' corresponds to the right-click.

<!DOCTYPE html>
<html>
<head>
  <title>File System TreeMap</title>
  <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
  <script type="text/javascript">
    google.charts.load('current', {'packages':['treemap']});
    google.charts.setOnLoadCallback(drawChart);

    function drawChart() {
      var data = new google.visualization.DataTable();
      data.addColumn('string', 'Name');
      data.addColumn('string', 'Parent');
      data.addColumn('number', 'Size');

      data.addRows([
        ['data', null, 1366275107471],
        ['TEST-FOLDER', 'data', 1366262437854],
        ['BigFolder', 'TEST-FOLDER', 1091265105286],
        ['subfolder1', 'TEST-FOLDER', 93057215638],
        ['import', 'TEST-FOLDER', 68027988721],
        ['smallFolder2', 'subfolder1', 36929298127],
        ['smallFolder1', 'subfolder1', 29465188106],
        ['userarea', 'TEST-FOLDER', 26341000093],
        // More rows here
      ]);

      var tree = new google.visualization.TreeMap(document.getElementById('chart_div'));

      var optionsV50 = {
        headerHeight: 30,
        showScale: true,
        useWeightedAverageForAggregation: true,
        eventsConfig: {
          rollup: ['contextmenu'],
          drilldown: ['click'],
        }

      };

      tree.draw(data, optionsV50);
    }
  </script>
</head>
<body>
  <div id="chart_div" style="width: 900px; height: 600px;"></div>
</body>
</html>

Javascript window.open() method [closed]

is there a way to pass more than one variable to the window.open() method? For example:

window.open(“myfile.html”, variable1, variable2);

I’m trying to open myfile.html from another html file using window.open and I’m using variable1 and variable2 in myfile.html.

Thanks.

Resolve blocking of main thread on heavy workloads

I am building a tiny library that interfaces with the Canvas API through functional means.

Some functions may take too long – either due to their complexity – or a programming error (infinite loop).

If possible, how would I be able to detect that the main thread is being blocked?

Obviously JavaScript is single-threaded so is there a way to go around this?

I never had experience with Web Workers, this primary issue needs to be resolved as I’m writing a small benchmark utility (for development) and it needs to terminate a task if it exceeds a certain execution time threshold. A while loop with a counter can’t help here if the function gets into an infinite loop (right?)


JavaScript is single-threaded, I’m assuming Web Workers could resolve the problem I’m facing outlined above.
My experience with Web Workers is limited, and don’t know how easy it is to apply to resolve the problem.

Getting “Uncaught (In promise) TypeError” when porting my Manifest v2 Chromium/Chrome Extension to Manifest v3

I am currently in the process of porting my manifest v2 Chrome/Chromium extension to manifest v3 and while I got the structure changed to support manifest v3, I keep getting this error when modifying my codebase. It is an extension called “PopupSound” that plays certain sounds when a browser navigation event is triggered. This is the error.

Uncaught (in promise) TypeError: self.AudioContext is not a constructor. sw.js:5 (anonymous function)

Here is my code.

sw.js (Service Worker)

let audioContext;

// Initialize AudioContext and AudioWorklet
async function initAudio() {
  audioContext = new self.AudioContext();
  await audioContext.audioWorklet.addModule('audio-processor.js');
}

// Call this when the service worker installs
initAudio();

function playSound(filename) {
  if (!audioContext) {
    initAudio().then(() => playSound(filename));
    return;
  }

  fetch(filename)
    .then(response => response.arrayBuffer())
    .then(arrayBuffer => audioContext.decodeAudioData(arrayBuffer))
    .then(audioBuffer => {
      const source = audioContext.createBufferSource();
      source.buffer = audioBuffer;

      const workletNode = new AudioWorkletNode(audioContext, 'audio-processor');
      source.connect(workletNode).connect(audioContext.destination);

      source.start();
    })
    .catch(error => console.error('Error playing sound:', error));
}

function onNav({ frameId }) {
  if (frameId > 0) return;
  playSound("click.ogg");
}

chrome.webNavigation.onCreatedNavigationTarget.addListener(onNav);
chrome.webNavigation.onBeforeNavigate.addListener(onNav);
chrome.webNavigation.onReferenceFragmentUpdated.addListener(onNav);
chrome.webNavigation.onHistoryStateUpdated.addListener(onNav);

chrome.downloads.onChanged.addListener(delta => {
  if (delta.state && delta.state.current === "complete") {
    playSound("DownloadComplete.ogg");
  }
  if (delta.error && delta.error.current) {
    playSound("DownloadFailed.ogg");
  }
});

chrome.tabs.onUpdated.addListener((tabId, { mutedInfo }) => {
  if (mutedInfo && mutedInfo.reason === "user") {
    playSound("Unlock.ogg");
  }
});

audio-processor.js

class AudioProcessor extends AudioWorkletProcessor {
    process(inputs, outputs, parameters) {
        // Your audio processing logic here
        // For now, we'll just pass the audio through
        const input = inputs[0];
        const output = outputs[0];
        for (let channel = 0; channel < output.length; ++channel) {
            output[channel].set(input[channel]);
        }
        return true;
    }
}

registerProcessor('audio-processor', AudioProcessor);


manifest.json Manifest

{
  "manifest_version": 3,
  "name": "__MSG_extensionName__",
  "description": "__MSG_extensionDescription__",
  "author": "Michaael G.",
  "version": "3.0",
  "default_locale": "en",
  "offline_enabled": true,

  "icons": {
    "32": "icon_32.png",
    "96": "icon_96.png",
    "128": "icon_128.png"
  },

  "background": {
    "service_worker": "sw.js"
  },

  "host_permissions": [
    "<all_urls>"
  ],

  "web_accessible_resources": [
    {
      "resources": [
        "audio-processor.js",
        "click.ogg",
        "DownloadComplete.ogg",
        "DownloadFailed.ogg",
        "Unlock.ogg"
      ],
      "matches": [
        "<all_urls>"
      ]
    }
  ],

  "permissions": [
    "webNavigation", 
    "downloads",
    "tabs"
  ]
}

I need some help and guidance when resolving this issue. Thank you.

picking image resolution based on screen height * pixel density with srcset, picture, img html

I’d like to load a set of photos for which the limiting dimension will be height and also consider pixel density.

The images will fill the screen vertically and allow cropping horizontally in portrait orientation.

According to this CSStricks article, browsers automatically adjust srcset w units for pixel density:
https://css-tricks.com/responsive-images-youre-just-changing-resolutions-use-srcset/

I couldn’t find any mention of the pixel density adjustment in the mozilla docs, but srcset doesn’t include an h unit, so it looks like picking a resolution based on screen height falls under the category of “art direction”, which requires the picture element.

I was able to get this syntax to work, but I’m wondering if there is a more compact syntax for picking resolution based on screen height.

Also, there is something I don’t understand about mozilla’s definition of the srcset attribute, which states, “Space characters, other than the whitespace separating the URL and the corresponding condition descriptor, are ignored; this includes both leading and trailing space, as well as space before or after each comma. ”
https://developer.mozilla.org/en-US/docs/Web/API/HTMLImageElement/srcset

I learned here that Firefox and codepen require whitespace after commas, while Chrome and Edge ignore this whitespace:
Firefox fails to load picture elements with srcset, while Chrome and Edge successfully load photos

I’m interested to learn if there is a more compact syntax than this one that will allow picking a resolution based on screen height * pixel density:

<picture style="position: absolute;">
  <source srcset="https://example.com/h-768.webp, https://example.com/mh-1728.webp 2x, https://example.com/h-1728.webp 3x" media="(max-height: 768px)">
  <source srcset="https://example.com/h-1080.webp, https://example.com/h-1728.webp 2x, https://example.com/h-2796.webp 3x" media="(max-height: 864px)">
  <source srcset="https://example.com/h-1080.webp, https://example.com/h-2796.webp 2x" media="(max-height: 932px)">
  <source srcset="https://example.com/h-2796.webp" media="(min-height: 1081px)">
  <img src="https://example.com/h-768.webp" alt="alt" class="class">
</picture>

Response of my div after I trigger a function

Consider I have this simple div:

<div class="diamond"><span class="material-symbols-outlined" id="locks">lock</span></div>

The CSS for this is below:

.rotate-and-change {
    transform: rotate(360deg);
}
.diamond {
    aspect-ratio: 1 / 1;
    margin: 0px -2px;
    background-color: #f44336;
    clip-path: polygon(50% 0%, 100% 50%, 50% 100%, 0% 50%);
    scale: 0.8;
}
#locks{
    padding: 8px;
    transition: transform 0.5s ease;  
}

And the function that I am having trouble with is:

    let diamonds=document.getElementsByClassName('diamond');
    let locks=document.querySelectorAll('#locks');

function changeTheMarksInDiamonds(parameter){
    setTimeout(() => {
    for(let i=0;i<=correctGuess-parameter;i++){
    diamonds[i].style.background=parameter==0?"#f44336":"#4caf50";
    //diamonds[i].innerHTML=parameter==1?"<span id="locks" class="material-symbols-outlined" >check</span>":"<span id="locks"class="material-symbols-outlined" >lock</span>";
    //diamonds[i].innerHTML=parameter==1?"<span id="locks" class="material-symbols-outlined" >check</span>":"<span id="locks"class="material-symbols-outlined" >lock</span>";
    /*
    const newLockElement = diamonds[i].querySelector("#locks");
    newLockElement.classList.add("rotate-and-change");
    */
}
  }, 500);
  if(parameter==1){
    document.getElementsByClassName('conditionsStatistics')[0].style.paddingLeft=200-correctGuess*50+"px";
  }

}

So, whenever I call this changeTheMarksInDiamonds(parameter) function with any parameter, it is supposed to add the class rotate-and-change to the span of id locks after 0.5 seconds. But nothing such happens. The content in the span changes surely though, but the id locks get attached as a class named locks, that I do not want to. Moreover, the rotation-transformation effect also does not work. I though there is a simple logic being missed in my JavaScript code, but how to solve it?

Any kind of assistance is appreciated.

JS: How to access a Button element inside several shadow roots of html page using JS? [duplicate]

I need to get the Claim button on this page https://bioniq.io/launch/motokos-at-nashville/public (after login by Google) using Javascript.

We can follow steps as below:

  1. Visit https://bioniq.io/launch/motokos-at-nashville/public

  2. Login using Google

  3. Claim

You can check in below screen for HTML structure: Click here to view the image

I tried to use below script to access Claim button but I cannot.

document.querySelector('bioniq-app').shadowRoot.querySelector('bioniq-launch-sale-main-page').shadowRoot.querySelector('bioniq-section-container').shadowRoot.querySelector('bioniq-launch-sale-header').shadowRoot.querySelector('bioniq-well').shadowRoot.querySelector('bioniq-launch-sale-voting').shadowRoot.querySelector('bioniq-launch-sale-group').shadowRoot.querySelector('toniq-button').shadowRoot.querySelector('button').click();

It cannot run and return the error. Could you please help to review and advise?

Thank you for your help

Error with schema table not connecting to express server

I am trying to set up a backend express API to display contact details on a contacts us page and then save the details from the form into my DB but i keep getting an error status 500 and the following error in my server

error saving data: error: relation "test" does not exist
    at /Users/server/node_modules/pg-pool/index.js:45:11
    at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
    at async /Users/server/server.js:27:5 {

Here is my server

require('dotenv').config();
const express = require('express')
const app = express()
const port = 8080
const db = require('./db')

app.use(express.json())

app.use(express.urlencoded({ extended: true }));


app.get('/api/contact', (req, res) => {
  res.json({
        header: "Contact Us",
        body: " We've been around since 2013.",
        phone: "(123) 456-7890",
        email: "[email protected]",
        postalAddress: "1234 Property Lane, Suite 567, Citytown, ST 12345",
        businessHours: "Monday - Friday, 9 AM - 5 PM"
  })
})

app.post('/api/submit', async (req, res) => {
  const { firstName, lastName, email, phone, message } = req.body;

  try {
    await db.query(
      'INSERT INTO test (first_name, last_name, email, phone_number, message) VALUES ($1, $2, $3, $4, $5)',
      [firstName, lastName, email, phone, message]
    )
    res.status(200).json({message: 'form submitted'})
  } catch (error) {
    console.log('error saving data:', error);
    res.status(500).json({error: 'an erorr occured'})
  }
})

app.listen(port, () => {
  console.log(`server listening on port ${port}`);
})

DB.js file

const pg = require('pg');
require('dotenv').config();

const db = new pg.Pool({
  connectionString: process.env.DATABASE_URL,
});

module.exports = db;

env.

DATABASE_URL='postgresql://localhost:5432/contact_test'

I have double checked my DB and table and no syntax error and have dropped the table and re created . I also have manually added records in psql which works but when testing through postman with the http://localhost:8080/api/submit endpoint i keep getting the same error. Can I please get some help. Thanks.