import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OllamaEmbeddings } from "@langchain/community/embeddings/ollama";
const vectorstore = await MemoryVectorStore.fromTexts( [ "Buildings are made out of brick", "Buildings are made out of wood", "Buildings are made out of stone", "Cars are made out of metal", "Cars are made out of plastic", "mitochondria is the powerhouse of the cell", "mitochondria is made of lipids", ], [{ id: 1 }, { id: 2 }, { id: 3 }, { id: 4 }, { id: 5 }], new OllamaEmbeddings({ model: "nomic-embed-text", baseUrl: "http://127.0.0.1:11434", }) );
// langchain/src/retrievers/multi_query.ts const DEFAULT_QUERY_PROMPT = new PromptTemplate({ inputVariables: ["question", "queryCount"], template: `You are an AI language model assistant. Your task is to generate {queryCount} different versions of the given user question to retrieve relevant documents from a vector database. By generating multiple perspectives on the user question, your goal is to help the user overcome some of the limitations of distance-based similarity search.
Provide these alternative questions separated by newlines between XML tags. For example: