import { ChatOllama, OllamaEmbeddings } from "@langchain/ollama";
import { MemoryVectorStore } from "langchain/vectorstores/memory";
import { SelfQueryRetriever } from "langchain/retrievers/self_query";
import { FunctionalTranslator } from "@langchain/core/structured_query";

import LoadOfSheet from "./loadofsheet.mjs";

let s = 0, spin = ['\\', '|', '/', '-'];
setInterval(() => { process.stderr.write(spin[s = ++s % spin.length] + '\u001b[0G'); }, 100).unref();
process.on('exit',  function() { process.stderr.write('\u001b[2K'); });

// const model = "llama3-chatqa:8b-v1.5-q8_0";
const model = "phi4:14b";

console.log(`Using model ${model}`);

const llm = new ChatOllama({ baseUrl: "http://127.0.0.1:11434", model });
const embeddings = new OllamaEmbeddings({model});

console.time("load of sheet");
const loader = new LoadOfSheet("./cd.xls");
const docs = await loader.load();
console.timeEnd("load of sheet");

console.time("vector store");
const vectorStore = await MemoryVectorStore.fromDocuments(docs, embeddings);
console.timeEnd("vector store");

console.time("query");
const selfQueryRetriever = SelfQueryRetriever.fromLLM({
  llm,
  vectorStore,
  documentContents: "Data rows from a worksheet",
  attributeInfo: loader.attributes,
  structuredQueryTranslator: new FunctionalTranslator(),
  searchParams: { k: 1024 } // default is 4
});

const res = await selfQueryRetriever.invoke(
  "Which rows have over 40 miles per gallon?"
);
console.timeEnd("query");

res.forEach(({metadata}) => { console.log({ Name: metadata.Name, MPG: metadata.Miles_per_Gallon }); });
