- better formatting of input prompt - use invoke instead of predict
@@ -1,6 +1,6 @@
from langchain.llms import Ollama
-input = input("What is your question?")
+input = input("What is your question?\n> ")
llm = Ollama(model="llama3.2")
-res = llm.predict(input)
+res = llm.invoke(input)
print (res)