LangChain and HuggingFace in javascript: how does it work?

I’m creating a server in javascript and i want to use it comunicate with an LLM on HuggingFace with LangChain.
I can’t find how the class HuggingFaceInferenceEmbeddings works. If you know it or you can link a proper documentation please let me know.

index.js

import express from 'express';
import mistral from './mistralClient.js';
import { configDotenv } from "dotenv";

configDotenv();

const app = express();
const PORT = process.env.PORT || 8080;

app.use(express.json());

app.post('/api/query', async (req, res) => {
  try {
    const response = await mistral.call({ //what should i use to interrogate Mistral?
      inputs: req.body.message,
    });
    console.log("Answer:", response);
    return response;
  } catch (error) {
    console.error("Error in Mistral call:", error);
  }
});

app.listen(PORT, () => {
  console.log(`Server avviato su http://localhost:${PORT}`);
});

mistralClient.js

import { HuggingFaceInferenceEmbeddings } from "@langchain/community/embeddings/hf";
import { configDotenv } from "dotenv";

configDotenv();

const apiKey = process.env.HUGGINGFACE_API_KEY;

console.log(apiKey)

const mistral = new HuggingFaceInferenceEmbeddings({
  apiKey: apiKey,
  
  model: 'mistralai/Mistral-7B-v0.1',
  temperature: 0.7,
  maxTokens: 200,
});

export default mistral;