How can I eliminate One drive picker v8 button error AADSTS900023?

I’m attempting to implement changes as suggested by this post to use the One Drive JS picker with SDK v8 but having one button/link instead of two. I’ve run into multiple errors, and currently seeing this error:

invalid_request: 900023 – [2025-04-23 20:55:37Z]: AADSTS900023: Specified tenant identifier ‘consumer’ is neither a valid DNS name, nor a valid external domain. Trace ID: b139cb9f-e16b-4a40-b3ec-1c4e1c2c2100

Honestly this feels like a big game of whack-a-mole… -_-. Has anyone been successful in loading the One drive picker for both consumer and organization accounts?

Below is the script used to handle clicks on the picker button (rendered from a Laravel Blade PHP template – so sections like {!! !!} will be replaced with a value):

const baseUrl = "https://onedrive.live.com/picker";
    const msalConsumerAuthority = 'https://login.microsoftonline.com/consumer';
    const msalOrgAuthority = 'https://login.microsoftonline.com/common';
    let currentUser;
    let type = 'org';
    const delegateId = '{!! $delegateId !!}';
    const msalParams = {
        auth: {
            authority: "https://login.microsoftonline.com/common",
            clientId: "{!! $CLIENT_ID !!}",
            redirectUri: `${window.location.origin}${window.location.pathname}`
        },
    }

    const instance = new msal.PublicClientApplication(msalParams);
    const scopes = ['.default']; // 'https://graph.microsoft.com/.default'
    const oneDriveConsumerScopes = ['OneDrive.ReadWrite'];
    const oneDriveOrgScopes = ['.default']; // 'https://graph.microsoft.com/.default'
    // For personal accounts this will always be the tid. Refer https://learn.microsoft.com/en-us/entra/identity-platform/id-token-claims-reference
    function checkPersonalAccount({ idTokenClaims: { tid } }) {
        return tid === "9188040d-6c67-4c5b-b112-36a304b66dad";
    }

    function checkOrgAccount({ idTokenClaims: { tid } }) {
        return tid !== "9188040d-6c67-4c5b-b112-36a304b66dad";
    }
    /**
     * Combines an arbitrary set of paths ensuring and normalising the slashes
     * This is used for getting scopes based on the resource type in Onedrive Picker
     * @param paths 0 to n path parts to combine
     */
    function combine(...paths) {
        return paths
            .map((path) => path.replace(/^[\|/]/, "").replace(/[\|/]$/, ""))
            .join("/")
            .replace(/\/g, "/");
    }

    /**
     *
     * @param {Object} Properties
     * @param {import("@azure/msal-browser").IPublicClientinstancelication} Properties.instance MSAL Instance to use
     * @param {string[]} Properties.scopes Scopes to ask for in the token. This is overridden when type and resource are passed
     * @param {import("@azure/msal-browser").AccountInfo} Properties.currentUser User Account to get the access token for
     * @param {string} Properties.Authority URL to use for OAuth
     * @param {string | undefined} Properties.type Which Type of Resource to fetch from, this is only used in the onedrive file picker event listener. This will be used to get access tokens according to the resource
     * @param {string | undefined} Properties.resource Which Resource to scope for, this is only used in the one drive file picker event listener.
     * @returns {Promise<string>} Access Token for the particular scope
     */
    async function getOneDriveAccessToken({
                                                     currentUser,
                                                     instance,
                                                     scopes,
                                                     authority,
                                                     type,
                                                     resource,
                                                 }) {
        let accessToken = "";
        let currentScopes = scopes;
        console.log('getOneDriveAccessToken() - type:', type, 'resource: ', resource, ' scopes: ', scopes, ' authority: ', authority);
        switch (type) {
            case "SharePoint":
            case "SharePoint_SelfIssued":
                currentScopes = [`${combine(resource, ".default")}`];
                break;
            default:
                break;
        }
        const popupOptions = {
            scopes: currentScopes,
            authority: type === 'org' ? msalOrgAuthority : msalConsumerAuthority
        };
        if (currentUser) {
            popupOptions.account = currentUser
        }
        try {
            console.log('calling instance.acquireTokenSilent() with scopes: ', currentScopes);
            // See if we have already the id token saved
            const resp = await instance.acquireTokenSilent(popupOptions);
            console.log('setting active account after callling acquireTokenSilent(): ', resp.account);
            instance.setActiveAccount(resp.account);
            accessToken = resp.accessToken;
        } catch (e) {
            console.log(' getOneDriveAccessToken() - caught e:', e);
            if (e.message.includes('invalid_grant') || e.message.includes('no_account_error')) {
                console.log('calling acquireTokenPopup()  with options: ', popupOptions, 'type: ', type);
                return instance.acquireTokenPopup(popupOptions);
            } else {
                throw e;
            }
        }
        return accessToken;
    }
    function checkUser(data) {
        console.log('checkUser() - data: ', data);
        const personalAccount = instance.getAllAccounts().find(checkPersonalAccount);
        const orgAccount = instance.getAllAccounts().find(checkOrgAccount);
        currentUser = personalAccount || orgAccount;
        console.log('checkUser() - personalAccount: ', personalAccount, ' orgAccount: ', orgAccount);
        if (personalAccount) {
            type = 'personal';
            scopes.length = 0;
            scopes.push('OneDrive.ReadOnly');
        } else {
            if (data?.resource) {
                scopes.length = 0;
                oneDriveOrgScopes.length = 0;
                scopes.push(`${combine(data.resource, ".default")}`);
                oneDriveOrgScopes.push(`${combine(data.resource, ".default")}`);
                console.log('pushed combined scope to scopes', scopes);
            } else {
                scopes.push('Files.Read.All', 'Files.Read', 'User.Read');
                console.log('pushed default files scopes to scopes');
            }
        }
    }

    // the options we pass to the picker page through the querystring
    const oneDrivePickerOptions = {
        sdk: "8.0",
        entry: {},
        authentication: {},
        // prompt: 'consent',
        messaging: {
            origin: window.location.href,
            channelId: "27"
        },
        selection: {
            mode: 'multiple',
            enablePersistence: true
        },
        typesAndSources: {
            mode: "files",
            pivots: {
                oneDrive: true,
                recent: true,
            },
        },
    };

    const onedrivePortEventListener = ({ port, oneDriveWindow, type }) => async (message) => {
        console.log('onedrivePortEventListener() - message:', message, ' type: ', type);
        switch (message.data.type) {
            case "notification":
                break;
            case "command": {
                port.postMessage({
                    type: "acknowledge",
                    id: message.data.id,
                });
                const {
                    command,
                    items, // This is the files picked from the picker
                    type: commandType,
                    resource,
                } = message.data.data;
                if (command === 'authenticate') {
                    checkUser(message.data?.data);
                }
                // This is the place, Where the documentation missed out on a key detail but it will be used in their sample codes. They don't explain why it is needed.
                const tokenOptions =
                    type === "personal"
                        ? {
                            scopes: oneDriveConsumerScopes,
                            authority: msalConsumerAuthority,
                            currentUser,
                            instance,
                        }
                        : {
                            scopes: oneDriveOrgScopes,
                            authority: msalOrgAuthority,
                            currentUser,
                            instance,
                            type: commandType, // In the getOneDriveAccessToken, you would have seen one switch statement based on type. For tenant users we can't use the same resource rather the picker emits this resource and their access type for that we have to get an access token.
                            resource,
                        };
                switch (command) {
                    case "authenticate": {
                        // Based on the token options above, we can send the token to the picker
                        const token = await getOneDriveAccessToken(tokenOptions);
                        console.log('authenticated command - token: ', token);
                        if (token != null) {
                            port.postMessage({
                                type: "result",
                                id: message.data.id,
                                data: {
                                    result: "token",
                                    token,
                                },
                            });
                        } else {
                            console.log(`Could not get auth token for command: ${command}`);
                        }
                        break;
                    }

                    case "close":
                        oneDriveWindow.close();
                        break;

                    case "pick": {
                        // You can use the items from message.data.data and get the files picked by the users.
                        port.postMessage({
                            type: "result",
                            id: message.data.id,
                            data: {
                                result: "success",
                            },
                        });
                        oneDriveWindow.close();
                        break;
                    }

                    default:
                        port.postMessage({
                            result: "error",
                            error: {
                                code: "unsupportedCommand",
                                message: command,
                            },
                            isExpected: true,
                        });
                        break;
                }
                break;
            }
            default:
                break;
        }
    };
    let oneDriveWindow = null;
    let port = null;

    async function launchPicker(e) {
        //const authToken = await getAuthToken();
        oneDriveWindow = window.open("", "Picker", "width=800,height=600")
        const authToken = await getOneDriveAccessToken({
            instance,
            currentUser,
            type,
            authority: msalOrgAuthority, //msalConsumerAuthority,
            scopes: ['User.Read'] //oneDriveOrgScopes //oneDriveConsumerScopes,
        }); //*/
        const queryString = new URLSearchParams({
            filePicker: JSON.stringify({
                ...oneDrivePickerOptions,
                entry: { // See the entry difference for org
                    oneDrive: {
                        files: {},
                    },
                },
            }),
            locale: "en-us",
        });

        const url = `${baseUrl}?${queryString}`;

        const form = oneDriveWindow.document.createElement("form");
        form.setAttribute("action", url);
        form.setAttribute("method", "POST");
        oneDriveWindow.document.body.append(form);

        const input = oneDriveWindow.document.createElement("input");
        input.setAttribute("type", "hidden")
        input.setAttribute("name", "access_token");
        input.setAttribute("value", authToken);
        form.appendChild(input);
console.log('launchPicker() - submitting form');
        form.submit();
        sendMessageToParentWindow({oneDrivePickerOpened: true});

        window.addEventListener("message", (event) => {
            console.log('message received: ', event);
            if (event.source && event.source === oneDriveWindow) {
                const message = event.data;
                if (message.type === "initialize" && message.channelId === oneDrivePickerOptions.messaging.channelId) {
                    port = event.ports[0];
                    port.addEventListener("message", onedrivePortEventListener({port, oneDriveWindow, type}));
                    port.start();
                    port.postMessage({
                        type: "activate",
                    });
                }
            }
        });
        e.preventDefault();
    }
    function sendMessageToParentWindow(message) {
        if (window.parent != window) {
            if (delegateId) {
                message.delegateId = delegateId;
            }
            window.parent.postMessage(message, '*');
        }
    }
    window.addEventListener('DOMContentLoaded', function() {
        document.addEventListener('click', launchPicker);
    });

TypeError: Cannot read properties of undefined (reading ‘id’) when using createAdapter() from @socket.io/cluster-adapter

I’m trying to set up Socket.IO with cluster support using PM2, following this docs.

Here’s my code:

import { createAdapter } from "@socket.io/redis-adapter";
import { createAdapter as createClusterAdapter } from "@socket.io/cluster-adapter";
import { setupWorker } from "@socket.io/sticky";

const app = express();
export const server = http.createServer(app);
export const io = new Server(server, {
    transports: ['websocket', 'polling'],
    adapter: createAdapter(redisClient, redisSubClient)
});

io.adapter(createClusterAdapter()); // src/server.ts:76:4
setupWorker(io);

While I try to run my code, it showing this error

TypeError: Cannot read properties of undefined (reading 'id')
    at new ClusterAdapter (/home/michioxd/backend/node_modules/.pnpm/@[email protected][email protected]/node_modules/@socket.io/cluster-adapter/dist/index.js:61:34)
    at new <anonymous> (/home/michioxd/backend/node_modules/.pnpm/@[email protected][email protected]/node_modules/@socket.io/cluster-adapter/dist/index.js:40:16)
    at Namespace._initAdapter (/home/michioxd/backend/node_modules/.pnpm/[email protected]/node_modules/socket.io/dist/namespace.js:99:24)
    at Server.adapter (/home/michioxd/backend/node_modules/.pnpm/[email protected]/node_modules/socket.io/dist/index.js:188:17)
    at file:///home/michioxd/src/server.ts:76:4
    at ModuleJob.run (node:internal/modules/esm/module_job:274:25)
    at onImport.tracePromise.__proto__ (node:internal/modules/esm/loader:644:26)

I’m running the server using PM2 with the following command:

pm2 start index.js -i max

Why does this error occur? Am I using createAdapter() incorrectly? How should I properly set up Socket.IO with cluster support and PM2?

Show different tabs when selecting in dropdown

I want to display different tab based on the selected item. This is working before when I am using buttons but when I changed it to dropdown menu it stopped working.

HTML:

<option class="tab">
        <select class="tablinks" onclick="openCity(event, 'AT3')" id="defaultOpen">Transatlantic 3</select>
        <select class="tablinks" onclick="openCity(event, 'AT4')">Transatlantic 4</select>
         <select class="tablinks" onclick="openCity(event, 'AL5')">Transatlantic 5</select>
      </option>

            <div id="AT3" class="tabcontent">
                <img src="images/AT3 (via CoGH)_20250118.jpg" class="service-map">
            </div>

            <div id="AT4" class="tabcontent">
                <img src="images/AT4 (via CoGH)_20250118.jpg" class="service-map">
            </div>

            <div id="AL5" class="tabcontent">
                <img src="images/AL5 (via CoGH)_20250118.jpg" class="service-map">
            </div>

JS:

    function openCity(evt, cityName) {
  var i, tabcontent, tablinks;
  tabcontent = document.getElementsByClassName("tabcontent");
  for (i = 0; i < tabcontent.length; i++) {
    tabcontent[i].style.display = "none";
  }
  tablinks = document.getElementsByClassName("tablinks");
  for (i = 0; i < tablinks.length; i++) {
    tablinks[i].className = tablinks[i].className.replace(" active", "");
  }
  document.getElementById(cityName).style.display = "block";
  evt.currentTarget.className += " active";
}
document.getElementById("defaultOpen").click();

Authorization for NextJS Application

I am a newbie in Next.JS & I want to integrate Authentication & Authorization in my Application. FYI for now the project structure is like this –

├── .next
├── node_modules
├── public
├── src
│   ├── Components
│   │   └── LandingPage
│   │   │   ├── LoginForm.jsx
│   │   │   └── LoginForm.module.css
│   │   └── DashBoard
│   │       ├── DashBoard.jsx
│   │       └── DashBoard.module.css
│   ├── pages
│   │   ├── index.jsx
│   │   ├── _app.jsx
│   │   ├── _document.jsx
│   │   └── dashboard
│   │       └── index.jsx
│   └── styles
│       └── globals.css
├── .env
├── .gitignore
├── eslint.config.mjs
├── jsconfig.json
├── next.config.mjs
├── package-lock.json
├── package.json
└── README.md

& I have two APIs for the Authorization & Authentication. My task is to use JWT token for Authentication – role & session ID for Authorization

This is the API response from POSTMAN for login –

{
    "success": true,
    "message": "Logged in successfully",
    "data": {
        "access_token": "JWT token", //hiding actual token for security purpose
        "user_info": {
            "user_id": 1000,
            "username": "pritam",
            "role": "1000",
            "last_login": "2025-04-28T10:35:59.578Z",
            "created_at": "2024-12-18T09:19:29.000Z"
        }
    }
}

This is the API response from POSTMAN for session using bearer token –

{
    "success": true,
    "message": "Session active.",
    "sid": "E-lqAr78APRFyNDFPagBOnPkofwJeilZ",
    "cookies": {
        "parsed": {
            "connect.sid": "s:E-lqAr78APRFyNDFPagBOnPkofwJeilZ.ZeRm/rjOjxerh/6x7ltsU980Fo5osr9VSWc2UoABnlk"
        },
        "raw": "connect.sid=s%3AE-lqAr78APRFyNDFPagBOnPkofwJeilZ.ZeRm%2FrjOjxerh%2F6x7ltsU980Fo5osr9VSWc2UoABnlk"
    },
    "data": {
        "id": 1000,
        "username": "pritam",
        "role": "1000"
    }
}

Now I wrote this function for login form submit –

  const handleLogin = async () => {
  
    try {
      const payload = {
        username: formData.username,
        password: formData.password,
      };
  
      const res = await axios.post(
        'http://localhost:3000/api/v1/login/superadmin',
        payload,
        {
          headers: {
            'Content-Type': 'application/json'
          }
        }
      );
  
      if(res.data.success) {
        const { access_token, user_info } = res.data.data;
        Cookies.set('access_token', access_token, { expires: 1, secure: true, sameSite: 'Strict' });
        Cookies.set('role', user_info.role, { expires: 1, secure: true, sameSite: 'Strict' });

        router.replace('/dashboard');
      }
      else {
        toast.error('Login failed. Please check credentials.', {
          position: 'top-right',
          autoClose: 3000,
        });
      }

    } catch (err) {
      toast.error('Login failed. Please check credentials.', {
        position: 'top-right',
        autoClose: 3000,
      });
    }
  };

Now what I planned is that I’ll create a middleware & every time react route in a different page (except ‘/’ this route which is by default login form landing page), it will trigger a useEffect that will validate if cookies data have the correct role which is role === 1000 & the login user’s session is valid or not. If both are false then automatically route to login page (again login for a new token or role is not authorized to access the webpages) & if both yes then only the route will allow. So this way we can protect every route calling in the application. Now I have done this _app.jsx code for this logic –

_app.jsx –

function MyApp({ Component, pageProps }) {
  const router = useRouter();

  useEffect(() => {
    const checkSession = async () => {

      // Skip session check on login page ('/') 
      if (router.pathname === '/') {
        return;
      }

      const token = Cookies.get('access_token');
      const role = Cookies.get('role');

      // If token or role missing or invalid, redirect to login
      if (!token || role !== '1000') {
        router.replace('/');
        toast.error('You do not have Authentication to access this page, Please Login First !!!', {
          position: 'top-right',
          autoClose: 4000,
        });
        return;
      }

      try {
        console.log(token);
        const res = await axios.get('http://localhost:3000/api/v1/check-session/superadmin', {
          headers: {
            Authorization: `Bearer ${token}`,
          },
          withCredentials: true
        });

        if (res.data.success) {
          console.log("response",res);
          Cookies.set('session_id', res.data.sid, { expires: 1, secure: true, sameSite: 'Strict' });
          // router.replace('/dashboard');
        } else {
          toast.error('You do not have Authentication to access this page, Please Login First !!!', {
            position: 'top-right',
            autoClose: 4000,
          });
          router.replace('/');
        }
      } catch (error) {
        toast.error('Network error !!! Please try again...', {
          position: 'top-right',
          autoClose: 2000,
        });
        router.replace('/');
      }
    };

    checkSession();
  }, [router.pathname]); // runs whenever route changes

  return (
    <>
      <Component {...pageProps} />
      <ToastContainer />
    </>
  );
}

But this is not working. I am having a CORS error which is this –

Access to XMLHttpRequest at
‘http://localhost:3000/api/v1/check-session/superadmin’ from origin
‘http://localhost:3030’ has been blocked by CORS policy: Response to
preflight request doesn’t pass access control check: The value of the
‘Access-Control-Allow-Origin’ header in the response must not be the
wildcard ‘*’ when the request’s credentials mode is ‘include’. The
credentials mode of requests initiated by the XMLHttpRequest is
controlled by the withCredentials attribute.Understand this error
C:UsersTitas
SahaDocumentsdevLIMS-Super-Admin-Frontendsrcpages_app.jsx:37

         GET http://localhost:3000/api/v1/check-session/superadmin net::ERR_FAILED

I did some R&D about this & found that this is common issue when dealing with cookie data for security purpose. I made changes into the backend server.js code, but still facing this issue.

I have changed this line –
from this >

app.use(cors('*'));
app.use((req, res, next) => {
    res.setHeader("Access-Control-Allow-Origin", "*");
    next();
});

To this >

app.use(cors({
  origin: 'http://localhost:3030',
  credentials: true,
}));

app.options('*', cors({
  origin: 'http://localhost:3030',
  credentials: true,
}));

But in the end I give up on this & can’t fix. IDK whether I am doing wrong logic or is there something else that I don’t know about. Can anyone help me with this ?

How to access the DOM of an CORS ?

I want to access the document of an iframe that is loaded from a different domain than the main page.
I know that this is normally impossible due to browser security policies, but I found some workarounds using Puppeteer to interact with the iframe like a real user.
However, I want to go beyond simply interacting — I want to actually access the iframe’s DOM.

My idea was: Start Puppeteer with DevTools open, then simulate clicking inside the iframe using Puppeteer’s mouse actions.
This would trigger Chrome’s behavior where DevTools automatically switches context to the iframe (as it normally does when you manually select an element inside an iframe using the inspect tool).
After that, I should be able to use page.evaluate() or a similar function to access the iframe’s DOM directly.

Is there a way to implement this idea using Puppeteer?

How to fix manual input for datepicker

I am trying to use the primevue datepicker in a form submission form. I am using zod as schema validation.
The problem is, I cannot enter the date by hand. When I do this, a day already appears and add onto the date I want to enter.

<template>

      <Form
        v-slot="$form"
        :initialValues="initialValues"
        :resolver="resolver"
        :validateOnValueUpdate="true"
        :validateOnBlur="false"
        @submit="onFormSubmit"
      >

            <DatePicker
              aria-label="Startdatum"
              inputId="start_date"
              id="start_date"
              v-model="initialValues.eventStartDate"
              name="start_date"
              placeholder="Tag/Monat/Jahr"
              :minDate="currentDate"
              dateFormat="dd/mm/yy"
              showIcon
              fluid
              manualInput
              :showOnFocus="true"
            />
      </Form>
</template>

<script setup lang="ts">

const initialValues = ref<FormValues>({
  start_date: null,
});

interface FormValues {
  start_date: null;
}

const formSchema = z
  .object({
       start_date: z
      .union([
        z
          .string()
          .regex(/^d{2}/d{2}/d{4}$/, "Das eingegebene Datum ist ungültig.")
          .refine((value) => value !== null, "Enddatum ist erforderlich."),
        z.date(),
        z.null(),
      ])
      .refine((value) => value !== null, "Die Anfangszeit ist erforderlich."), // Stellt sicher, dass es nicht null bleibt

</script>

When I remove the “name” or “format” attribute, I am able to enter a date by myself without any trouble. So I feel it has something to do with the validaiton form.

Chroma npm package keeps crashing between page refreshes

I’m developing an app that uses Chroma to store vectors. I’m running into the problem that initialy an operation is running fine but after a page refresh I’m getting the following notice: Please install chromadb-default-embed as a dependency with, e.g. npm install chromadb-default-embed. Installing it won´t solve the problem though. It just keeps showing the same behavior of running fine at initial load but after a page refresh it gives me this error.

Below is my code for reference (NuxtJs3)

import { getQuery } from 'h3';
import prisma from '~/server/utils/prisma';
import { ChromaClient } from 'chromadb';

const chromaClient = new ChromaClient();

export default defineEventHandler(async (e) => {
    const { userUuid } = getQuery(e);

    try {
        const user = await prisma.user.findUnique({
            where: {
                uuid: userUuid
            }
        });
        
        const { sex, preference, goal } = user;

        console.log(sex, preference, goal);

        let collection = await chromaClient.getCollection({ name: 'bios' });

        const bios = await collection.get({
            ids: [userUuid]
        });

        if (bios.documents.length > 0 && sex && goal) {
            let where = {
                where: {
                    'uuid': {
                        '$ne': userUuid
                    }
                },
                '$and': {
                    'goal': {
                        '$eq': goal
                    }
                }
            };

            if (preference) {
                where['$and'] = {
                    'sex': preference
                }
            }
            else {
                where['$or'] = [{
                    'sex': 'Male'
                }, {
                    'sex': 'Female'
                }];

                where['$or'] = [{
                    'preference': sex
                }, {
                    'preference': null
                }];
            }

            console.log(where);

            const matches = await collection.query({
                queryTexts: [bios.documents[0]],
                where,
                nResults: 3
            });

            console.log(matches);
        }
    } 
    catch (error) {
        console.error(error);
            
        throw createError({
            statusCode: 500,
            message: error
        });
    }
 });

What is the role of writableStrategy and readableStrategy in a TransformStream?

The Web Streams API allows configuring back pressure through queuing strategies. My understanding is that back pressure makes the producer of a stream slow down if the consumer of the stream cannot process the data as fast as they are produced. To allow the producer of the stream to produce data in parallel to the consumer consuming it, a stream has an internal queue where it can cache some chunks before the consumer consumes them. This way, the producer can produce some data in advance, but not too much. The size of this queue can be configured through a queuing strategy. For object streams, the queue size is configured as number of objects (meaning number of chunks); for byte streams, it is configured as a number of bytes (meaning the sum of chunk sizes).

Since web streams are quite new and queuing strategies seem to be considered an advanced use case, there is not much detailed documentation about it online.

TransformStreams allow specifying two strategies: a writableStrategy and a readableStrategy. Does this mean that a TransformStream has two queues? If so, what is the difference between the two? I suspect that the writableStrategy will cache the input chunks before they are sent through the transform function, while the readableStrategy will cache the already transformed output chunks. Is my assumption correct?

how to optimize animation that uses nth-child

I am using the nth-child pseudo-class for an animation that makes the text appear as if it is being typed out. Each time nth-child is used, it is used to delay the animation on that letter. Considering I am going to be using this for longer sequences of text than just “Sample Text” here, and the delay time goes up in a consistent pattern, I was wondering if there is a way to optimize this so I don’t have to deal with writing pretty much the same thing 30 times? Thanks in advance!

const typeDiv = document.getElementById("typer");

typeDiv.addEventListener("animationend", whenEnd);

function whenEnd() {
  document.getElementById("typer").style.color = "black";
}
#typer span {
  animation-name: typing;
  animation-duration: 2s;
  animation-iteration-count: 1;
}


#typer span:nth-child(1) {
  animation-delay: 0s;
}

#typer span:nth-child(2) {
  animation-delay: 0.2s;
}

#typer span:nth-child(3) {
  animation-delay: 0.4s;
}

#typer span:nth-child(4) {
  animation-delay: 0.6s;
}

#typer span:nth-child(5) {
  animation-delay: 0.8s;
}

#typer span:nth-child(6) {
  animation-delay: 1s;
}

#typer span:nth-child(7) {
  animation-delay: 1.2s;
}

#typer span:nth-child(8) {
  animation-delay: 1.4s;
}

#typer span:nth-child(9) {
  animation-delay: 1.6s;
}

#typer span:nth-child(10) {
  animation-delay: 1.8s;
}

#typer span:nth-child(11) {
  animation-delay: 2s;
}


@keyframes typing {

  0%,
  98% {
    color: transparent;
  }

  100% {
    color: black;
  }
}
<p id="typer" style="color:transparent;">
  <span>S</span><span>a</span><span>m</span><span>p</span><span>l</span><span>e</span><span> </span><span>T</span><span>e</span><span>x</span><span>t</span>
</p>

Vertical with autoplay and autoheight has weird jumps

I’m trying to create a “marquee” carousel style with Swiper.

My swiper properties are the following:

{
    loop: true,
    speed: 5000,
    allowTouchMove: false,
    slidesPerView: "auto",
    spaceBetween: 10,
    autoHeight: true,
    direction: "vertical",
    autoplay: {
      delay: 0,
      disableOnInteraction: false,
    },
 };

Also, I’m setting this property to swiper wrapper, so the autoplay runs smoothly without increasing and decreasing speed when reaching the end/star of a new slide item.

.swiper-wrapper{
    transition-timing-function: linear;
}

Sandbox: https://codesandbox.io/p/sandbox/g4yzhx

Initially the carousel runs smoothly and well, but if you wait just for some seconds, you’ll notice the slide starts to gradually have some hard jumps, maybe due to a miss calculaiton problem perphaps, not sure.

I’ve tried dozens of different properties and approaches, using several swiper methods combinations, but had no success yet. Lost count of how many swiper api properties I’ve used.

Access to original E-mail when forwarding

i work with Javascript and i have to develop an Outlook-Web-Addin.

I have been trying to get the received date of an E-Mail.
I need it as an addition to the subject-string (left!).

The function “Office.context.mailbox.item.subject.setAsync” is only available in “composemode”. The composemode means New / Reply / Forward.

But in “composemode” i don’t get the received date anymore.
I need an access to the original E-Mail.

I don’t know, how to continue.
Thanks for advices.

Why do base64 images not appear in Dompdf even though they are correctly generated?

I’m generating a PDF using Dompdf in Laravel.
In my Blade view, I’m capturing parts of the page as images using html2canvas and converting them to base64.

Example of my JavaScript code:

const rondes = await html2canvas(document.querySelector('.rondes'), { scale: 2 });
const veh = await html2canvas(document.querySelector('.veh'), { scale: 2 });

const pictures = [
    rondes.toDataURL('image/png'),
    veh.toDataURL('image/png')
];

Then, I send these base64 images to my Laravel controller, and I inject them into the generated HTML:

$html = '
<img src="' . $images[0] . '" style="max-width:150px;">
<img src="' . $images[1] . '" style="max-width:150px;">
';

Finally, I render the PDF with Dompdf:

$dompdf = App::make('dompdf.wrapper');
$dompdf->loadHtml($html);
$dompdf->setPaper('A4', 'portrait');
$dompdf->render();

What works :

  • The base64 strings seem correct when I console.log(pictures) in the browser.
  • I can see the images in my Blade view if I display them inside .

Problem :

  • In the final PDF, the images don’t appear.
  • Instead, only the image alt attribute shows up or a blank space.
  • No error in the browser or Laravel logs.

Extra :

  • I activated isRemoteEnabled and isHtml5ParserEnabled
  • Dompdf version : 3.1.1

Is there something specific Dompdf requires for base64 images to render correctly ?

How to remove button after switching back to layer? [closed]

Я новичок в leaflet и пытаюсь написать свою карту. В целом, пока получилось сделать все то, что я хотел кроме одного: после перехода на главный слой кнопка назад не удаляется. Как это реализовать, чтобы она удалялась? Скидываю на всякий случай весь код.

<link rel="stylesheet" href="/resources/assets/leaflet.css" />
<script src="/resources/assets/leaflet/js/thereach/leaflet.js"></script>
<link rel="stylesheet" href="/resources/assets/leaflet/Control.FullScreen.css" />
<script src="/resources/assets/leaflet/Control.FullScreen.js"></script>

<div align="center" id="map" style="height: 600px;width: 600px;" allowfullscreen="allowfullscreen"></div>

<script>

    const bounds = [
        [0, 1280],
        [1280, 0]
    ];

    const dungeonIcon = L.icon({
      iconUrl: '/resources/assets/leaflet/icons/dungeon.png',
      iconSize: [48, 48],
      iconAnchor: [22, 14],
    });

    const areafinterestIcon = L.icon({
      iconUrl: '/resources/assets/leaflet/icons/poi_areaofinterest_complete.png',
      iconSize: [48, 48],
      iconAnchor: [22, 14],
    });

    const houseIcon = L.icon({
      iconUrl: '/resources/assets/leaflet/icons/poi_group_house_owned.png',
      iconSize: [48, 48],
      iconAnchor: [22, 14],
    });

    const arenaIcon = L.icon({
      iconUrl: '/resources/assets/leaflet/icons/poi_solotrial_complete.png',
      iconSize: [48, 48],
      iconAnchor: [22, 14],
    });

    const campfireIcon = L.icon({
      iconUrl: '/resources/assets/leaflet/icons/poi_camp_complete.png',
      iconSize: [48, 48],
      iconAnchor: [22, 14],
    });

  const map = L.map('map', {
    crs: L.CRS.Simple,
    minZoom: -1,
    maxZoom: 4,
    maxBounds: bounds,
    center: [640, 640],
    fullscreenControl: true,
    fullscreenControlOptions: {position: 'topleft'},
    forcePseudoFullscreen: true,
    zoom: 0
  }).setView([640, 640], 0);

  const mainBounds = [[0, 0], [1280, 1280]];
  const mainLayer = L.imageOverlay('/images/6/67/Reach_base.jpg', mainBounds).addTo(map);
  map.fitBounds(mainBounds);

// events are fired when entering or exiting fullscreen.
map.on('enterFullscreen', function () {
    console.log('entered fullscreen');
    map.setZoom(0);
});

map.on('exitFullscreen', function () {
    console.log('exited fullscreen');
    map.setZoom(-1);
});

  function addZones() {
L.marker([1064, 478], {icon: dungeonIcon}).addTo(map).bindPopup("<div align=center><a href=/index.php/test>test</a><hr>test</div>");
  }
backButton.disable();
  addZones();

const MarkarthZone = L.polygon([
    [628, 242],
    [635, 243],
    [640, 254],
    [637,275],
    [639,278],
    [657,287],
    [663,296],
    [663,296],
    [659,324],
    [665,337],
    [656,360],
    [647,373],
    [646,384],
    [632,396],
    [616,389],
    [606,369],
    [601,352],
    [596,344],
    [581,351],
    [573,336],
    [572,320],
    [574,302],
    [571,287],
    [574,264],
    [583,249],
    [596,252]
], {className: "leaflet-polygon"}).bindTooltip("Markarth");

MarkarthZone.addTo(map);
MarkarthZone.on('click', function () {
  map.eachLayer(function (layer) {
    map.removeLayer(layer);
  });
  const MarkarthBounds = [[0, 0], [1280, 1280]];
  L.imageOverlay('/images/d/d4/Markarthcity_base.jpg', MarkarthBounds).addTo(map);
  map.fitBounds(MarkarthBounds);

  const backButton = L.control({position: 'topright'});
      backButton.onAdd = function () {
        const div = L.DomUtil.create('div', 'leaflet-bar');
        div.innerHTML = '<a href="#" title="Back">←</a>';
        div.onclick = function () {
          map.eachLayer(layer => map.removeLayer(layer));
          mainLayer.addTo(map);
          map.fitBounds(mainBounds);
          MarkarthZone.addTo(map);
          addZones();
          return false;
          remove();
        };
       return div;
      };
      backButton.addTo(map);
    const MarkarthIcon = L.icon({
      iconUrl: '/resources/assets/leaflet/icons/Markarth.png',
      iconSize: [252, 74],
      iconAnchor: [22, 14],
    });
L.marker([1064, 478], {icon: MarkarthIcon}).addTo(map);

});
</script>

I tried several options with removing the button, but all in vain. The code either breaks or does not work

Don’t match a subdomain / multi-level subdomain that ends with xyz.example.com using regex [closed]

(https?|ftp)://((?!wm.)[^/]+.)*(localhost|win|tplinkwifi.net|tplinkrepeater.net)/

These should be captured
http://win/myprofile
http://localhost/myprofile
http://ss.localhost/myprofile
http://ss.cdsada.win/myprofile

These shouldn't be match
http://wm.win/myprofile
http://das.wm.win/myprofile
http://sadasd.small.wm.win/myprofile

This is my regex and these are my test cases

https://regex101.com/r/avyEpl/1

It works well. But I don’t want last 2 lines to match. How do I fix it?

How to keep the “Previous” button visible in a multi-page Intro.js tutorial?

I have a multi-page Intro.js tutorial, and I need to allow users to navigate between pages. Specifically, I want to make sure that when a page contains only 1 step, the “Previous” button remains visible so users can navigate back to the previous page.

Currently, on pages with only 1 step, the “Previous” button does not appear. I would like to ensure that the “Previous” button is always visible, even on pages with just 1 step, so users can go back to the previous page in the tutorial.

It would also be a nice addition to display the total step count for all pages, not just per page.

Is it possible to have 1 function that holds all steps for my multiple pages, en between selected steps a redirect.,

This is the code that i’m using on page 1, and page 2 is pretty much the same, with the include tutorial=1:

 function startTutorial() {
        introJs().setOptions({
            steps: [
                {
                    title: 'Welcome',
                    intro: "Intro text",
                    position: 'center'
                },
                {
                    element: document.querySelector('[data-step="1"]'),
                    intro: "Step 1"
                }
            ],
            exitOnOverlayClick: true,
            showBullets: false,
            nextLabel: 'Next',
            prevLabel: 'Previous',
            doneLabel: 'Next'
        }).oncomplete(function() {
            window.location.href = '{{ path("app_next_page", {"tutorial": true}) }}';
        }).start();
    }