|
@@ -1,7 +1,9 @@
|
|
import socket
|
|
import socket
|
|
|
|
+import aiohttp
|
|
|
|
+import asyncio
|
|
import urllib.parse
|
|
import urllib.parse
|
|
import validators
|
|
import validators
|
|
-from typing import Union, Sequence, Iterator
|
|
|
|
|
|
+from typing import Any, AsyncIterator, Dict, Iterator, List, Sequence, Union
|
|
|
|
|
|
from langchain_community.document_loaders import (
|
|
from langchain_community.document_loaders import (
|
|
WebBaseLoader,
|
|
WebBaseLoader,
|
|
@@ -68,6 +70,31 @@ def resolve_hostname(hostname):
|
|
class SafeWebBaseLoader(WebBaseLoader):
|
|
class SafeWebBaseLoader(WebBaseLoader):
|
|
"""WebBaseLoader with enhanced error handling for URLs."""
|
|
"""WebBaseLoader with enhanced error handling for URLs."""
|
|
|
|
|
|
|
|
+ def _unpack_fetch_results(
|
|
|
|
+ self, results: Any, urls: List[str], parser: Union[str, None] = None
|
|
|
|
+ ) -> List[Any]:
|
|
|
|
+ """Unpack fetch results into BeautifulSoup objects."""
|
|
|
|
+ from bs4 import BeautifulSoup
|
|
|
|
+
|
|
|
|
+ final_results = []
|
|
|
|
+ for i, result in enumerate(results):
|
|
|
|
+ url = urls[i]
|
|
|
|
+ if parser is None:
|
|
|
|
+ if url.endswith(".xml"):
|
|
|
|
+ parser = "xml"
|
|
|
|
+ else:
|
|
|
|
+ parser = self.default_parser
|
|
|
|
+ self._check_parser(parser)
|
|
|
|
+ final_results.append(BeautifulSoup(result, parser, **self.bs_kwargs))
|
|
|
|
+ return final_results
|
|
|
|
+
|
|
|
|
+ async def ascrape_all(
|
|
|
|
+ self, urls: List[str], parser: Union[str, None] = None
|
|
|
|
+ ) -> List[Any]:
|
|
|
|
+ """Async fetch all urls, then return soups for all results."""
|
|
|
|
+ results = await self.fetch_all(urls)
|
|
|
|
+ return self._unpack_fetch_results(results, urls, parser=parser)
|
|
|
|
+
|
|
def lazy_load(self) -> Iterator[Document]:
|
|
def lazy_load(self) -> Iterator[Document]:
|
|
"""Lazy load text from the url(s) in web_path with error handling."""
|
|
"""Lazy load text from the url(s) in web_path with error handling."""
|
|
for path in self.web_paths:
|
|
for path in self.web_paths:
|
|
@@ -91,6 +118,26 @@ class SafeWebBaseLoader(WebBaseLoader):
|
|
# Log the error and continue with the next URL
|
|
# Log the error and continue with the next URL
|
|
log.error(f"Error loading {path}: {e}")
|
|
log.error(f"Error loading {path}: {e}")
|
|
|
|
|
|
|
|
+ async def alazy_load(self) -> AsyncIterator[Document]:
|
|
|
|
+ """Async lazy load text from the url(s) in web_path."""
|
|
|
|
+ results = await self.ascrape_all(self.web_paths)
|
|
|
|
+ for path, soup in zip(self.web_paths, results):
|
|
|
|
+ text = soup.get_text(**self.bs_get_text_kwargs)
|
|
|
|
+ metadata = {"source": path}
|
|
|
|
+ if title := soup.find("title"):
|
|
|
|
+ metadata["title"] = title.get_text()
|
|
|
|
+ if description := soup.find("meta", attrs={"name": "description"}):
|
|
|
|
+ metadata["description"] = description.get(
|
|
|
|
+ "content", "No description found."
|
|
|
|
+ )
|
|
|
|
+ if html := soup.find("html"):
|
|
|
|
+ metadata["language"] = html.get("lang", "No language found.")
|
|
|
|
+ yield Document(page_content=text, metadata=metadata)
|
|
|
|
+
|
|
|
|
+ async def aload(self) -> list[Document]:
|
|
|
|
+ """Load data into Document objects."""
|
|
|
|
+ return [document async for document in self.alazy_load()]
|
|
|
|
+
|
|
|
|
|
|
def get_web_loader(
|
|
def get_web_loader(
|
|
urls: Union[str, Sequence[str]],
|
|
urls: Union[str, Sequence[str]],
|