findArt.ts 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. import { Chroma } from "langchain/vectorstores/chroma";
  2. import { ChromaTranslator } from "langchain/retrievers/self_query/chroma";
  3. import { Ollama } from "langchain/llms/ollama"
  4. import { AttributeInfo } from "langchain/schema/query_constructor";
  5. import { HuggingFaceTransformersEmbeddings } from "langchain/embeddings/hf_transformers";
  6. import { SelfQueryRetriever } from "langchain/retrievers/self_query";
  7. const modelName = "codellama";
  8. // Define the attributes of the schema so that the model will know what to look for
  9. const attributeInfo: AttributeInfo[] = [
  10. {
  11. name: "title",
  12. type: "string",
  13. description: "The title of the painting"
  14. },
  15. {
  16. name: "date",
  17. type: "integer",
  18. description: "The four digit year when the painting was created"
  19. },
  20. {
  21. name: "artistName",
  22. type: "string",
  23. description: "The first name and last name of the artist who created the painting. Always use the full name in the filter, even if it isn't included. If the query is 'van Gogh', the filter should be 'Vincent van Gogh'. Use Pierre-Auguste Renoir instead of just Renoir."
  24. }
  25. ]
  26. // Define the model used to generate embeddings, these capture the context of the input data
  27. const embeddings = new HuggingFaceTransformersEmbeddings({
  28. modelName: "Xenova/all-MiniLM-L6-v2",
  29. });
  30. // Run the model using Ollama
  31. const llm = new Ollama({
  32. model: modelName
  33. })
  34. const documentContents = "Description of the art";
  35. const findArt = async () => {
  36. // Load the saved vector store
  37. const vectorStore = await Chroma.fromExistingCollection(embeddings, {
  38. collectionName: "artcollection",
  39. });
  40. const retriever = SelfQueryRetriever.fromLLM({
  41. llm, vectorStore, documentContents, attributeInfo, verbose: false, useOriginalQuery: true, structuredQueryTranslator: new ChromaTranslator()
  42. });
  43. // Get the query from the command line
  44. const query = process.argv[2];
  45. try {
  46. const newquery = await retriever.getRelevantDocuments(query, [
  47. // You can add callbacks to the retriever to get information about the process. In this case, show the output
  48. // query from the LLM used to retrieve the documents
  49. {
  50. handleLLMEnd(output) {
  51. console.log("This is the output from the LLM after it has come up with a filter")
  52. const llmEndOutput = output.generations[0][0].text.replace(/\\"/gm, "'").replace(/\n/gm, "")
  53. console.log(`output - ${JSON.stringify(llmEndOutput, null, 2)}`)
  54. }
  55. },
  56. ]);
  57. console.log(newquery);
  58. } catch (error) {
  59. console.log(`There was an error getting the values: ${error}`);
  60. }
  61. }
  62. findArt();