main.py 398 B

123456789101112
  1. from langchain_community.llms import Ollama
  2. from langchain_community.document_loaders import WebBaseLoader
  3. from langchain.chains.summarize import load_summarize_chain
  4. loader = WebBaseLoader("https://ollama.com/blog/run-llama2-uncensored-locally")
  5. docs = loader.load()
  6. llm = Ollama(model="llama3.2")
  7. chain = load_summarize_chain(llm, chain_type="stuff")
  8. result = chain.invoke(docs)
  9. print(result)