12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511 |
- from fastapi import (
- FastAPI,
- Depends,
- HTTPException,
- status,
- UploadFile,
- File,
- Form,
- )
- from fastapi.middleware.cors import CORSMiddleware
- import requests
- import os, shutil, logging, re
- from datetime import datetime
- from pathlib import Path
- from typing import Union, Sequence, Iterator, Any
- from chromadb.utils.batch_utils import create_batches
- from langchain_core.documents import Document
- from langchain_community.document_loaders import (
- WebBaseLoader,
- TextLoader,
- PyPDFLoader,
- CSVLoader,
- BSHTMLLoader,
- Docx2txtLoader,
- UnstructuredEPubLoader,
- UnstructuredWordDocumentLoader,
- UnstructuredMarkdownLoader,
- UnstructuredXMLLoader,
- UnstructuredRSTLoader,
- UnstructuredExcelLoader,
- UnstructuredPowerPointLoader,
- YoutubeLoader,
- OutlookMessageLoader,
- )
- from langchain.text_splitter import RecursiveCharacterTextSplitter
- import validators
- import urllib.parse
- import socket
- from pydantic import BaseModel
- from typing import Optional
- import mimetypes
- import uuid
- import json
- from apps.webui.models.documents import (
- Documents,
- DocumentForm,
- DocumentResponse,
- )
- from apps.webui.models.files import (
- Files,
- )
- from apps.rag.utils import (
- get_model_path,
- get_embedding_function,
- query_doc,
- query_doc_with_hybrid_search,
- query_collection,
- query_collection_with_hybrid_search,
- )
- from apps.rag.search.brave import search_brave
- from apps.rag.search.google_pse import search_google_pse
- from apps.rag.search.main import SearchResult
- from apps.rag.search.searxng import search_searxng
- from apps.rag.search.serper import search_serper
- from apps.rag.search.serpstack import search_serpstack
- from apps.rag.search.serply import search_serply
- from apps.rag.search.duckduckgo import search_duckduckgo
- from apps.rag.search.tavily import search_tavily
- from apps.rag.search.jina_search import search_jina
- from apps.rag.search.searchapi import search_searchapi
- from utils.misc import (
- calculate_sha256,
- calculate_sha256_string,
- sanitize_filename,
- extract_folders_after_data_docs,
- )
- from utils.utils import get_verified_user, get_admin_user
- from config import (
- AppConfig,
- ENV,
- SRC_LOG_LEVELS,
- UPLOAD_DIR,
- DOCS_DIR,
- CONTENT_EXTRACTION_ENGINE,
- TIKA_SERVER_URL,
- RAG_TOP_K,
- RAG_RELEVANCE_THRESHOLD,
- RAG_FILE_MAX_SIZE,
- RAG_FILE_MAX_COUNT,
- RAG_EMBEDDING_ENGINE,
- RAG_EMBEDDING_MODEL,
- RAG_EMBEDDING_MODEL_AUTO_UPDATE,
- RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE,
- ENABLE_RAG_HYBRID_SEARCH,
- ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION,
- RAG_RERANKING_MODEL,
- PDF_EXTRACT_IMAGES,
- RAG_RERANKING_MODEL_AUTO_UPDATE,
- RAG_RERANKING_MODEL_TRUST_REMOTE_CODE,
- RAG_OPENAI_API_BASE_URL,
- RAG_OPENAI_API_KEY,
- DEVICE_TYPE,
- CHROMA_CLIENT,
- CHUNK_SIZE,
- CHUNK_OVERLAP,
- RAG_TEMPLATE,
- ENABLE_RAG_LOCAL_WEB_FETCH,
- YOUTUBE_LOADER_LANGUAGE,
- ENABLE_RAG_WEB_SEARCH,
- RAG_WEB_SEARCH_ENGINE,
- RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- SEARXNG_QUERY_URL,
- GOOGLE_PSE_API_KEY,
- GOOGLE_PSE_ENGINE_ID,
- BRAVE_SEARCH_API_KEY,
- SERPSTACK_API_KEY,
- SERPSTACK_HTTPS,
- SERPER_API_KEY,
- SERPLY_API_KEY,
- TAVILY_API_KEY,
- SEARCHAPI_API_KEY,
- SEARCHAPI_ENGINE,
- RAG_WEB_SEARCH_RESULT_COUNT,
- RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
- RAG_EMBEDDING_OPENAI_BATCH_SIZE,
- CORS_ALLOW_ORIGIN,
- )
- from constants import ERROR_MESSAGES
- log = logging.getLogger(__name__)
- log.setLevel(SRC_LOG_LEVELS["RAG"])
- app = FastAPI()
- app.state.config = AppConfig()
- app.state.config.TOP_K = RAG_TOP_K
- app.state.config.RELEVANCE_THRESHOLD = RAG_RELEVANCE_THRESHOLD
- app.state.config.FILE_MAX_SIZE = RAG_FILE_MAX_SIZE
- app.state.config.FILE_MAX_COUNT = RAG_FILE_MAX_COUNT
- app.state.config.ENABLE_RAG_HYBRID_SEARCH = ENABLE_RAG_HYBRID_SEARCH
- app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = (
- ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION
- )
- app.state.config.CONTENT_EXTRACTION_ENGINE = CONTENT_EXTRACTION_ENGINE
- app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL
- app.state.config.CHUNK_SIZE = CHUNK_SIZE
- app.state.config.CHUNK_OVERLAP = CHUNK_OVERLAP
- app.state.config.RAG_EMBEDDING_ENGINE = RAG_EMBEDDING_ENGINE
- app.state.config.RAG_EMBEDDING_MODEL = RAG_EMBEDDING_MODEL
- app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE = RAG_EMBEDDING_OPENAI_BATCH_SIZE
- app.state.config.RAG_RERANKING_MODEL = RAG_RERANKING_MODEL
- app.state.config.RAG_TEMPLATE = RAG_TEMPLATE
- app.state.config.OPENAI_API_BASE_URL = RAG_OPENAI_API_BASE_URL
- app.state.config.OPENAI_API_KEY = RAG_OPENAI_API_KEY
- app.state.config.PDF_EXTRACT_IMAGES = PDF_EXTRACT_IMAGES
- app.state.config.YOUTUBE_LOADER_LANGUAGE = YOUTUBE_LOADER_LANGUAGE
- app.state.YOUTUBE_LOADER_TRANSLATION = None
- app.state.config.ENABLE_RAG_WEB_SEARCH = ENABLE_RAG_WEB_SEARCH
- app.state.config.RAG_WEB_SEARCH_ENGINE = RAG_WEB_SEARCH_ENGINE
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST = RAG_WEB_SEARCH_DOMAIN_FILTER_LIST
- app.state.config.SEARXNG_QUERY_URL = SEARXNG_QUERY_URL
- app.state.config.GOOGLE_PSE_API_KEY = GOOGLE_PSE_API_KEY
- app.state.config.GOOGLE_PSE_ENGINE_ID = GOOGLE_PSE_ENGINE_ID
- app.state.config.BRAVE_SEARCH_API_KEY = BRAVE_SEARCH_API_KEY
- app.state.config.SERPSTACK_API_KEY = SERPSTACK_API_KEY
- app.state.config.SERPSTACK_HTTPS = SERPSTACK_HTTPS
- app.state.config.SERPER_API_KEY = SERPER_API_KEY
- app.state.config.SERPLY_API_KEY = SERPLY_API_KEY
- app.state.config.TAVILY_API_KEY = TAVILY_API_KEY
- app.state.config.SEARCHAPI_API_KEY = SEARCHAPI_API_KEY
- app.state.config.SEARCHAPI_ENGINE = SEARCHAPI_ENGINE
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = RAG_WEB_SEARCH_RESULT_COUNT
- app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = RAG_WEB_SEARCH_CONCURRENT_REQUESTS
- def update_embedding_model(
- embedding_model: str,
- update_model: bool = False,
- ):
- if embedding_model and app.state.config.RAG_EMBEDDING_ENGINE == "":
- import sentence_transformers
- app.state.sentence_transformer_ef = sentence_transformers.SentenceTransformer(
- get_model_path(embedding_model, update_model),
- device=DEVICE_TYPE,
- trust_remote_code=RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE,
- )
- else:
- app.state.sentence_transformer_ef = None
- def update_reranking_model(
- reranking_model: str,
- update_model: bool = False,
- ):
- if reranking_model:
- import sentence_transformers
- app.state.sentence_transformer_rf = sentence_transformers.CrossEncoder(
- get_model_path(reranking_model, update_model),
- device=DEVICE_TYPE,
- trust_remote_code=RAG_RERANKING_MODEL_TRUST_REMOTE_CODE,
- )
- else:
- app.state.sentence_transformer_rf = None
- update_embedding_model(
- app.state.config.RAG_EMBEDDING_MODEL,
- RAG_EMBEDDING_MODEL_AUTO_UPDATE,
- )
- update_reranking_model(
- app.state.config.RAG_RERANKING_MODEL,
- RAG_RERANKING_MODEL_AUTO_UPDATE,
- )
- app.state.EMBEDDING_FUNCTION = get_embedding_function(
- app.state.config.RAG_EMBEDDING_ENGINE,
- app.state.config.RAG_EMBEDDING_MODEL,
- app.state.sentence_transformer_ef,
- app.state.config.OPENAI_API_KEY,
- app.state.config.OPENAI_API_BASE_URL,
- app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
- )
- app.add_middleware(
- CORSMiddleware,
- allow_origins=CORS_ALLOW_ORIGIN,
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
- )
- class CollectionNameForm(BaseModel):
- collection_name: Optional[str] = "test"
- class UrlForm(CollectionNameForm):
- url: str
- class SearchForm(CollectionNameForm):
- query: str
- @app.get("/")
- async def get_status():
- return {
- "status": True,
- "chunk_size": app.state.config.CHUNK_SIZE,
- "chunk_overlap": app.state.config.CHUNK_OVERLAP,
- "template": app.state.config.RAG_TEMPLATE,
- "embedding_engine": app.state.config.RAG_EMBEDDING_ENGINE,
- "embedding_model": app.state.config.RAG_EMBEDDING_MODEL,
- "reranking_model": app.state.config.RAG_RERANKING_MODEL,
- "openai_batch_size": app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
- }
- @app.get("/embedding")
- async def get_embedding_config(user=Depends(get_admin_user)):
- return {
- "status": True,
- "embedding_engine": app.state.config.RAG_EMBEDDING_ENGINE,
- "embedding_model": app.state.config.RAG_EMBEDDING_MODEL,
- "openai_config": {
- "url": app.state.config.OPENAI_API_BASE_URL,
- "key": app.state.config.OPENAI_API_KEY,
- "batch_size": app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
- },
- }
- @app.get("/reranking")
- async def get_reraanking_config(user=Depends(get_admin_user)):
- return {
- "status": True,
- "reranking_model": app.state.config.RAG_RERANKING_MODEL,
- }
- class OpenAIConfigForm(BaseModel):
- url: str
- key: str
- batch_size: Optional[int] = None
- class EmbeddingModelUpdateForm(BaseModel):
- openai_config: Optional[OpenAIConfigForm] = None
- embedding_engine: str
- embedding_model: str
- @app.post("/embedding/update")
- async def update_embedding_config(
- form_data: EmbeddingModelUpdateForm, user=Depends(get_admin_user)
- ):
- log.info(
- f"Updating embedding model: {app.state.config.RAG_EMBEDDING_MODEL} to {form_data.embedding_model}"
- )
- try:
- app.state.config.RAG_EMBEDDING_ENGINE = form_data.embedding_engine
- app.state.config.RAG_EMBEDDING_MODEL = form_data.embedding_model
- if app.state.config.RAG_EMBEDDING_ENGINE in ["ollama", "openai"]:
- if form_data.openai_config is not None:
- app.state.config.OPENAI_API_BASE_URL = form_data.openai_config.url
- app.state.config.OPENAI_API_KEY = form_data.openai_config.key
- app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE = (
- form_data.openai_config.batch_size
- if form_data.openai_config.batch_size
- else 1
- )
- update_embedding_model(app.state.config.RAG_EMBEDDING_MODEL)
- app.state.EMBEDDING_FUNCTION = get_embedding_function(
- app.state.config.RAG_EMBEDDING_ENGINE,
- app.state.config.RAG_EMBEDDING_MODEL,
- app.state.sentence_transformer_ef,
- app.state.config.OPENAI_API_KEY,
- app.state.config.OPENAI_API_BASE_URL,
- app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
- )
- return {
- "status": True,
- "embedding_engine": app.state.config.RAG_EMBEDDING_ENGINE,
- "embedding_model": app.state.config.RAG_EMBEDDING_MODEL,
- "openai_config": {
- "url": app.state.config.OPENAI_API_BASE_URL,
- "key": app.state.config.OPENAI_API_KEY,
- "batch_size": app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
- },
- }
- except Exception as e:
- log.exception(f"Problem updating embedding model: {e}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- class RerankingModelUpdateForm(BaseModel):
- reranking_model: str
- @app.post("/reranking/update")
- async def update_reranking_config(
- form_data: RerankingModelUpdateForm, user=Depends(get_admin_user)
- ):
- log.info(
- f"Updating reranking model: {app.state.config.RAG_RERANKING_MODEL} to {form_data.reranking_model}"
- )
- try:
- app.state.config.RAG_RERANKING_MODEL = form_data.reranking_model
- update_reranking_model(app.state.config.RAG_RERANKING_MODEL, True)
- return {
- "status": True,
- "reranking_model": app.state.config.RAG_RERANKING_MODEL,
- }
- except Exception as e:
- log.exception(f"Problem updating reranking model: {e}")
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- @app.get("/config")
- async def get_rag_config(user=Depends(get_admin_user)):
- return {
- "status": True,
- "pdf_extract_images": app.state.config.PDF_EXTRACT_IMAGES,
- "file": {
- "max_size": app.state.config.FILE_MAX_SIZE,
- "max_count": app.state.config.FILE_MAX_COUNT,
- },
- "content_extraction": {
- "engine": app.state.config.CONTENT_EXTRACTION_ENGINE,
- "tika_server_url": app.state.config.TIKA_SERVER_URL,
- },
- "chunk": {
- "chunk_size": app.state.config.CHUNK_SIZE,
- "chunk_overlap": app.state.config.CHUNK_OVERLAP,
- },
- "youtube": {
- "language": app.state.config.YOUTUBE_LOADER_LANGUAGE,
- "translation": app.state.YOUTUBE_LOADER_TRANSLATION,
- },
- "web": {
- "ssl_verification": app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION,
- "search": {
- "enabled": app.state.config.ENABLE_RAG_WEB_SEARCH,
- "engine": app.state.config.RAG_WEB_SEARCH_ENGINE,
- "searxng_query_url": app.state.config.SEARXNG_QUERY_URL,
- "google_pse_api_key": app.state.config.GOOGLE_PSE_API_KEY,
- "google_pse_engine_id": app.state.config.GOOGLE_PSE_ENGINE_ID,
- "brave_search_api_key": app.state.config.BRAVE_SEARCH_API_KEY,
- "serpstack_api_key": app.state.config.SERPSTACK_API_KEY,
- "serpstack_https": app.state.config.SERPSTACK_HTTPS,
- "serper_api_key": app.state.config.SERPER_API_KEY,
- "serply_api_key": app.state.config.SERPLY_API_KEY,
- "tavily_api_key": app.state.config.TAVILY_API_KEY,
- "searchapi_api_key": app.state.config.SEARCHAPI_API_KEY,
- "seaarchapi_engine": app.state.config.SEARCHAPI_ENGINE,
- "result_count": app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- "concurrent_requests": app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
- },
- },
- }
- class FileConfig(BaseModel):
- max_size: Optional[int] = None
- max_count: Optional[int] = None
- class ContentExtractionConfig(BaseModel):
- engine: str = ""
- tika_server_url: Optional[str] = None
- class ChunkParamUpdateForm(BaseModel):
- chunk_size: int
- chunk_overlap: int
- class YoutubeLoaderConfig(BaseModel):
- language: list[str]
- translation: Optional[str] = None
- class WebSearchConfig(BaseModel):
- enabled: bool
- engine: Optional[str] = None
- searxng_query_url: Optional[str] = None
- google_pse_api_key: Optional[str] = None
- google_pse_engine_id: Optional[str] = None
- brave_search_api_key: Optional[str] = None
- serpstack_api_key: Optional[str] = None
- serpstack_https: Optional[bool] = None
- serper_api_key: Optional[str] = None
- serply_api_key: Optional[str] = None
- tavily_api_key: Optional[str] = None
- searchapi_api_key: Optional[str] = None
- searchapi_engine: Optional[str] = None
- result_count: Optional[int] = None
- concurrent_requests: Optional[int] = None
- class WebConfig(BaseModel):
- search: WebSearchConfig
- web_loader_ssl_verification: Optional[bool] = None
- class ConfigUpdateForm(BaseModel):
- pdf_extract_images: Optional[bool] = None
- file: Optional[FileConfig] = None
- content_extraction: Optional[ContentExtractionConfig] = None
- chunk: Optional[ChunkParamUpdateForm] = None
- youtube: Optional[YoutubeLoaderConfig] = None
- web: Optional[WebConfig] = None
- @app.post("/config/update")
- async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_user)):
- app.state.config.PDF_EXTRACT_IMAGES = (
- form_data.pdf_extract_images
- if form_data.pdf_extract_images is not None
- else app.state.config.PDF_EXTRACT_IMAGES
- )
- if form_data.file is not None:
- app.state.config.FILE_MAX_SIZE = form_data.file.max_size
- app.state.config.FILE_MAX_COUNT = form_data.file.max_count
- if form_data.content_extraction is not None:
- log.info(f"Updating text settings: {form_data.content_extraction}")
- app.state.config.CONTENT_EXTRACTION_ENGINE = form_data.content_extraction.engine
- app.state.config.TIKA_SERVER_URL = form_data.content_extraction.tika_server_url
- if form_data.chunk is not None:
- app.state.config.CHUNK_SIZE = form_data.chunk.chunk_size
- app.state.config.CHUNK_OVERLAP = form_data.chunk.chunk_overlap
- if form_data.youtube is not None:
- app.state.config.YOUTUBE_LOADER_LANGUAGE = form_data.youtube.language
- app.state.YOUTUBE_LOADER_TRANSLATION = form_data.youtube.translation
- if form_data.web is not None:
- app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = (
- form_data.web.web_loader_ssl_verification
- )
- app.state.config.ENABLE_RAG_WEB_SEARCH = form_data.web.search.enabled
- app.state.config.RAG_WEB_SEARCH_ENGINE = form_data.web.search.engine
- app.state.config.SEARXNG_QUERY_URL = form_data.web.search.searxng_query_url
- app.state.config.GOOGLE_PSE_API_KEY = form_data.web.search.google_pse_api_key
- app.state.config.GOOGLE_PSE_ENGINE_ID = (
- form_data.web.search.google_pse_engine_id
- )
- app.state.config.BRAVE_SEARCH_API_KEY = (
- form_data.web.search.brave_search_api_key
- )
- app.state.config.SERPSTACK_API_KEY = form_data.web.search.serpstack_api_key
- app.state.config.SERPSTACK_HTTPS = form_data.web.search.serpstack_https
- app.state.config.SERPER_API_KEY = form_data.web.search.serper_api_key
- app.state.config.SERPLY_API_KEY = form_data.web.search.serply_api_key
- app.state.config.TAVILY_API_KEY = form_data.web.search.tavily_api_key
- app.state.config.SEARCHAPI_API_KEY = form_data.web.search.searchapi_api_key
- app.state.config.SEARCHAPI_ENGINE = (
- form_data.web.search.searchapi_engine
- )
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = form_data.web.search.result_count
- app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = (
- form_data.web.search.concurrent_requests
- )
- return {
- "status": True,
- "pdf_extract_images": app.state.config.PDF_EXTRACT_IMAGES,
- "file": {
- "max_size": app.state.config.FILE_MAX_SIZE,
- "max_count": app.state.config.FILE_MAX_COUNT,
- },
- "content_extraction": {
- "engine": app.state.config.CONTENT_EXTRACTION_ENGINE,
- "tika_server_url": app.state.config.TIKA_SERVER_URL,
- },
- "chunk": {
- "chunk_size": app.state.config.CHUNK_SIZE,
- "chunk_overlap": app.state.config.CHUNK_OVERLAP,
- },
- "youtube": {
- "language": app.state.config.YOUTUBE_LOADER_LANGUAGE,
- "translation": app.state.YOUTUBE_LOADER_TRANSLATION,
- },
- "web": {
- "ssl_verification": app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION,
- "search": {
- "enabled": app.state.config.ENABLE_RAG_WEB_SEARCH,
- "engine": app.state.config.RAG_WEB_SEARCH_ENGINE,
- "searxng_query_url": app.state.config.SEARXNG_QUERY_URL,
- "google_pse_api_key": app.state.config.GOOGLE_PSE_API_KEY,
- "google_pse_engine_id": app.state.config.GOOGLE_PSE_ENGINE_ID,
- "brave_search_api_key": app.state.config.BRAVE_SEARCH_API_KEY,
- "serpstack_api_key": app.state.config.SERPSTACK_API_KEY,
- "serpstack_https": app.state.config.SERPSTACK_HTTPS,
- "serper_api_key": app.state.config.SERPER_API_KEY,
- "serply_api_key": app.state.config.SERPLY_API_KEY,
- "serachapi_api_key": app.state.config.SEARCHAPI_API_KEY,
- "searchapi_engine": app.state.config.SEARCHAPI_ENGINE,
- "tavily_api_key": app.state.config.TAVILY_API_KEY,
- "result_count": app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- "concurrent_requests": app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
- },
- },
- }
- @app.get("/template")
- async def get_rag_template(user=Depends(get_verified_user)):
- return {
- "status": True,
- "template": app.state.config.RAG_TEMPLATE,
- }
- @app.get("/query/settings")
- async def get_query_settings(user=Depends(get_admin_user)):
- return {
- "status": True,
- "template": app.state.config.RAG_TEMPLATE,
- "k": app.state.config.TOP_K,
- "r": app.state.config.RELEVANCE_THRESHOLD,
- "hybrid": app.state.config.ENABLE_RAG_HYBRID_SEARCH,
- }
- class QuerySettingsForm(BaseModel):
- k: Optional[int] = None
- r: Optional[float] = None
- template: Optional[str] = None
- hybrid: Optional[bool] = None
- @app.post("/query/settings/update")
- async def update_query_settings(
- form_data: QuerySettingsForm, user=Depends(get_admin_user)
- ):
- app.state.config.RAG_TEMPLATE = (
- form_data.template if form_data.template else RAG_TEMPLATE
- )
- app.state.config.TOP_K = form_data.k if form_data.k else 4
- app.state.config.RELEVANCE_THRESHOLD = form_data.r if form_data.r else 0.0
- app.state.config.ENABLE_RAG_HYBRID_SEARCH = (
- form_data.hybrid if form_data.hybrid else False
- )
- return {
- "status": True,
- "template": app.state.config.RAG_TEMPLATE,
- "k": app.state.config.TOP_K,
- "r": app.state.config.RELEVANCE_THRESHOLD,
- "hybrid": app.state.config.ENABLE_RAG_HYBRID_SEARCH,
- }
- class QueryDocForm(BaseModel):
- collection_name: str
- query: str
- k: Optional[int] = None
- r: Optional[float] = None
- hybrid: Optional[bool] = None
- @app.post("/query/doc")
- def query_doc_handler(
- form_data: QueryDocForm,
- user=Depends(get_verified_user),
- ):
- try:
- if app.state.config.ENABLE_RAG_HYBRID_SEARCH:
- return query_doc_with_hybrid_search(
- collection_name=form_data.collection_name,
- query=form_data.query,
- embedding_function=app.state.EMBEDDING_FUNCTION,
- k=form_data.k if form_data.k else app.state.config.TOP_K,
- reranking_function=app.state.sentence_transformer_rf,
- r=(
- form_data.r if form_data.r else app.state.config.RELEVANCE_THRESHOLD
- ),
- )
- else:
- return query_doc(
- collection_name=form_data.collection_name,
- query=form_data.query,
- embedding_function=app.state.EMBEDDING_FUNCTION,
- k=form_data.k if form_data.k else app.state.config.TOP_K,
- )
- except Exception as e:
- log.exception(e)
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- class QueryCollectionsForm(BaseModel):
- collection_names: list[str]
- query: str
- k: Optional[int] = None
- r: Optional[float] = None
- hybrid: Optional[bool] = None
- @app.post("/query/collection")
- def query_collection_handler(
- form_data: QueryCollectionsForm,
- user=Depends(get_verified_user),
- ):
- try:
- if app.state.config.ENABLE_RAG_HYBRID_SEARCH:
- return query_collection_with_hybrid_search(
- collection_names=form_data.collection_names,
- query=form_data.query,
- embedding_function=app.state.EMBEDDING_FUNCTION,
- k=form_data.k if form_data.k else app.state.config.TOP_K,
- reranking_function=app.state.sentence_transformer_rf,
- r=(
- form_data.r if form_data.r else app.state.config.RELEVANCE_THRESHOLD
- ),
- )
- else:
- return query_collection(
- collection_names=form_data.collection_names,
- query=form_data.query,
- embedding_function=app.state.EMBEDDING_FUNCTION,
- k=form_data.k if form_data.k else app.state.config.TOP_K,
- )
- except Exception as e:
- log.exception(e)
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- @app.post("/youtube")
- def store_youtube_video(form_data: UrlForm, user=Depends(get_verified_user)):
- try:
- loader = YoutubeLoader.from_youtube_url(
- form_data.url,
- add_video_info=True,
- language=app.state.config.YOUTUBE_LOADER_LANGUAGE,
- translation=app.state.YOUTUBE_LOADER_TRANSLATION,
- )
- data = loader.load()
- collection_name = form_data.collection_name
- if collection_name == "":
- collection_name = calculate_sha256_string(form_data.url)[:63]
- store_data_in_vector_db(data, collection_name, overwrite=True)
- return {
- "status": True,
- "collection_name": collection_name,
- "filename": form_data.url,
- }
- except Exception as e:
- log.exception(e)
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- @app.post("/web")
- def store_web(form_data: UrlForm, user=Depends(get_verified_user)):
- # "https://www.gutenberg.org/files/1727/1727-h/1727-h.htm"
- try:
- loader = get_web_loader(
- form_data.url,
- verify_ssl=app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION,
- )
- data = loader.load()
- collection_name = form_data.collection_name
- if collection_name == "":
- collection_name = calculate_sha256_string(form_data.url)[:63]
- store_data_in_vector_db(data, collection_name, overwrite=True)
- return {
- "status": True,
- "collection_name": collection_name,
- "filename": form_data.url,
- }
- except Exception as e:
- log.exception(e)
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- def get_web_loader(url: Union[str, Sequence[str]], verify_ssl: bool = True):
- # Check if the URL is valid
- if not validate_url(url):
- raise ValueError(ERROR_MESSAGES.INVALID_URL)
- return SafeWebBaseLoader(
- url,
- verify_ssl=verify_ssl,
- requests_per_second=RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
- continue_on_failure=True,
- )
- def validate_url(url: Union[str, Sequence[str]]):
- if isinstance(url, str):
- if isinstance(validators.url(url), validators.ValidationError):
- raise ValueError(ERROR_MESSAGES.INVALID_URL)
- if not ENABLE_RAG_LOCAL_WEB_FETCH:
- # Local web fetch is disabled, filter out any URLs that resolve to private IP addresses
- parsed_url = urllib.parse.urlparse(url)
- # Get IPv4 and IPv6 addresses
- ipv4_addresses, ipv6_addresses = resolve_hostname(parsed_url.hostname)
- # Check if any of the resolved addresses are private
- # This is technically still vulnerable to DNS rebinding attacks, as we don't control WebBaseLoader
- for ip in ipv4_addresses:
- if validators.ipv4(ip, private=True):
- raise ValueError(ERROR_MESSAGES.INVALID_URL)
- for ip in ipv6_addresses:
- if validators.ipv6(ip, private=True):
- raise ValueError(ERROR_MESSAGES.INVALID_URL)
- return True
- elif isinstance(url, Sequence):
- return all(validate_url(u) for u in url)
- else:
- return False
- def resolve_hostname(hostname):
- # Get address information
- addr_info = socket.getaddrinfo(hostname, None)
- # Extract IP addresses from address information
- ipv4_addresses = [info[4][0] for info in addr_info if info[0] == socket.AF_INET]
- ipv6_addresses = [info[4][0] for info in addr_info if info[0] == socket.AF_INET6]
- return ipv4_addresses, ipv6_addresses
- def search_web(engine: str, query: str) -> list[SearchResult]:
- """Search the web using a search engine and return the results as a list of SearchResult objects.
- Will look for a search engine API key in environment variables in the following order:
- - SEARXNG_QUERY_URL
- - GOOGLE_PSE_API_KEY + GOOGLE_PSE_ENGINE_ID
- - BRAVE_SEARCH_API_KEY
- - SERPSTACK_API_KEY
- - SERPER_API_KEY
- - SERPLY_API_KEY
- - TAVILY_API_KEY
- - SEARCHAPI_API_KEY + SEARCHAPI_ENGINE (by default `google`)
- Args:
- query (str): The query to search for
- """
- # TODO: add playwright to search the web
- if engine == "searxng":
- if app.state.config.SEARXNG_QUERY_URL:
- return search_searxng(
- app.state.config.SEARXNG_QUERY_URL,
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- )
- else:
- raise Exception("No SEARXNG_QUERY_URL found in environment variables")
- elif engine == "google_pse":
- if (
- app.state.config.GOOGLE_PSE_API_KEY
- and app.state.config.GOOGLE_PSE_ENGINE_ID
- ):
- return search_google_pse(
- app.state.config.GOOGLE_PSE_API_KEY,
- app.state.config.GOOGLE_PSE_ENGINE_ID,
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- )
- else:
- raise Exception(
- "No GOOGLE_PSE_API_KEY or GOOGLE_PSE_ENGINE_ID found in environment variables"
- )
- elif engine == "brave":
- if app.state.config.BRAVE_SEARCH_API_KEY:
- return search_brave(
- app.state.config.BRAVE_SEARCH_API_KEY,
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- )
- else:
- raise Exception("No BRAVE_SEARCH_API_KEY found in environment variables")
- elif engine == "serpstack":
- if app.state.config.SERPSTACK_API_KEY:
- return search_serpstack(
- app.state.config.SERPSTACK_API_KEY,
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- https_enabled=app.state.config.SERPSTACK_HTTPS,
- )
- else:
- raise Exception("No SERPSTACK_API_KEY found in environment variables")
- elif engine == "serper":
- if app.state.config.SERPER_API_KEY:
- return search_serper(
- app.state.config.SERPER_API_KEY,
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- )
- else:
- raise Exception("No SERPER_API_KEY found in environment variables")
- elif engine == "serply":
- if app.state.config.SERPLY_API_KEY:
- return search_serply(
- app.state.config.SERPLY_API_KEY,
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- )
- else:
- raise Exception("No SERPLY_API_KEY found in environment variables")
- elif engine == "duckduckgo":
- return search_duckduckgo(
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- )
- elif engine == "tavily":
- if app.state.config.TAVILY_API_KEY:
- return search_tavily(
- app.state.config.TAVILY_API_KEY,
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- )
- else:
- raise Exception("No TAVILY_API_KEY found in environment variables")
- elif engine == "searchapi":
- if app.state.config.SEARCHAPI_API_KEY:
- return search_searchapi(
- app.state.config.SEARCHAPI_API_KEY,
- app.state.config.SEARCHAPI_ENGINE,
- query,
- app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
- app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
- )
- else:
- raise Exception("No SEARCHAPI_API_KEY found in environment variables")
- elif engine == "jina":
- return search_jina(query, app.state.config.RAG_WEB_SEARCH_RESULT_COUNT)
- else:
- raise Exception("No search engine API key found in environment variables")
- @app.post("/web/search")
- def store_web_search(form_data: SearchForm, user=Depends(get_verified_user)):
- try:
- logging.info(
- f"trying to web search with {app.state.config.RAG_WEB_SEARCH_ENGINE, form_data.query}"
- )
- web_results = search_web(
- app.state.config.RAG_WEB_SEARCH_ENGINE, form_data.query
- )
- except Exception as e:
- log.exception(e)
- print(e)
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.WEB_SEARCH_ERROR(e),
- )
- try:
- urls = [result.link for result in web_results]
- loader = get_web_loader(urls)
- data = loader.load()
- collection_name = form_data.collection_name
- if collection_name == "":
- collection_name = calculate_sha256_string(form_data.query)[:63]
- store_data_in_vector_db(data, collection_name, overwrite=True)
- return {
- "status": True,
- "collection_name": collection_name,
- "filenames": urls,
- }
- except Exception as e:
- log.exception(e)
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- def store_data_in_vector_db(
- data, collection_name, metadata: Optional[dict] = None, overwrite: bool = False
- ) -> bool:
- text_splitter = RecursiveCharacterTextSplitter(
- chunk_size=app.state.config.CHUNK_SIZE,
- chunk_overlap=app.state.config.CHUNK_OVERLAP,
- add_start_index=True,
- )
- docs = text_splitter.split_documents(data)
- if len(docs) > 0:
- log.info(f"store_data_in_vector_db {docs}")
- return store_docs_in_vector_db(docs, collection_name, metadata, overwrite), None
- else:
- raise ValueError(ERROR_MESSAGES.EMPTY_CONTENT)
- def store_text_in_vector_db(
- text, metadata, collection_name, overwrite: bool = False
- ) -> bool:
- text_splitter = RecursiveCharacterTextSplitter(
- chunk_size=app.state.config.CHUNK_SIZE,
- chunk_overlap=app.state.config.CHUNK_OVERLAP,
- add_start_index=True,
- )
- docs = text_splitter.create_documents([text], metadatas=[metadata])
- return store_docs_in_vector_db(docs, collection_name, overwrite=overwrite)
- def store_docs_in_vector_db(
- docs, collection_name, metadata: Optional[dict] = None, overwrite: bool = False
- ) -> bool:
- log.info(f"store_docs_in_vector_db {docs} {collection_name}")
- texts = [doc.page_content for doc in docs]
- metadatas = [{**doc.metadata, **(metadata if metadata else {})} for doc in docs]
- # ChromaDB does not like datetime formats
- # for meta-data so convert them to string.
- for metadata in metadatas:
- for key, value in metadata.items():
- if isinstance(value, datetime):
- metadata[key] = str(value)
- try:
- if overwrite:
- for collection in CHROMA_CLIENT.list_collections():
- if collection_name == collection.name:
- log.info(f"deleting existing collection {collection_name}")
- CHROMA_CLIENT.delete_collection(name=collection_name)
- collection = CHROMA_CLIENT.create_collection(name=collection_name)
- embedding_func = get_embedding_function(
- app.state.config.RAG_EMBEDDING_ENGINE,
- app.state.config.RAG_EMBEDDING_MODEL,
- app.state.sentence_transformer_ef,
- app.state.config.OPENAI_API_KEY,
- app.state.config.OPENAI_API_BASE_URL,
- app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
- )
- embedding_texts = list(map(lambda x: x.replace("\n", " "), texts))
- embeddings = embedding_func(embedding_texts)
- for batch in create_batches(
- api=CHROMA_CLIENT,
- ids=[str(uuid.uuid4()) for _ in texts],
- metadatas=metadatas,
- embeddings=embeddings,
- documents=texts,
- ):
- collection.add(*batch)
- return True
- except Exception as e:
- if e.__class__.__name__ == "UniqueConstraintError":
- return True
- log.exception(e)
- return False
- class TikaLoader:
- def __init__(self, file_path, mime_type=None):
- self.file_path = file_path
- self.mime_type = mime_type
- def load(self) -> list[Document]:
- with open(self.file_path, "rb") as f:
- data = f.read()
- if self.mime_type is not None:
- headers = {"Content-Type": self.mime_type}
- else:
- headers = {}
- endpoint = app.state.config.TIKA_SERVER_URL
- if not endpoint.endswith("/"):
- endpoint += "/"
- endpoint += "tika/text"
- r = requests.put(endpoint, data=data, headers=headers)
- if r.ok:
- raw_metadata = r.json()
- text = raw_metadata.get("X-TIKA:content", "<No text content found>")
- if "Content-Type" in raw_metadata:
- headers["Content-Type"] = raw_metadata["Content-Type"]
- log.info("Tika extracted text: %s", text)
- return [Document(page_content=text, metadata=headers)]
- else:
- raise Exception(f"Error calling Tika: {r.reason}")
- def get_loader(filename: str, file_content_type: str, file_path: str):
- file_ext = filename.split(".")[-1].lower()
- known_type = True
- known_source_ext = [
- "go",
- "py",
- "java",
- "sh",
- "bat",
- "ps1",
- "cmd",
- "js",
- "ts",
- "css",
- "cpp",
- "hpp",
- "h",
- "c",
- "cs",
- "sql",
- "log",
- "ini",
- "pl",
- "pm",
- "r",
- "dart",
- "dockerfile",
- "env",
- "php",
- "hs",
- "hsc",
- "lua",
- "nginxconf",
- "conf",
- "m",
- "mm",
- "plsql",
- "perl",
- "rb",
- "rs",
- "db2",
- "scala",
- "bash",
- "swift",
- "vue",
- "svelte",
- "msg",
- "ex",
- "exs",
- "erl",
- "tsx",
- "jsx",
- "hs",
- "lhs",
- ]
- if (
- app.state.config.CONTENT_EXTRACTION_ENGINE == "tika"
- and app.state.config.TIKA_SERVER_URL
- ):
- if file_ext in known_source_ext or (
- file_content_type and file_content_type.find("text/") >= 0
- ):
- loader = TextLoader(file_path, autodetect_encoding=True)
- else:
- loader = TikaLoader(file_path, file_content_type)
- else:
- if file_ext == "pdf":
- loader = PyPDFLoader(
- file_path, extract_images=app.state.config.PDF_EXTRACT_IMAGES
- )
- elif file_ext == "csv":
- loader = CSVLoader(file_path)
- elif file_ext == "rst":
- loader = UnstructuredRSTLoader(file_path, mode="elements")
- elif file_ext == "xml":
- loader = UnstructuredXMLLoader(file_path)
- elif file_ext in ["htm", "html"]:
- loader = BSHTMLLoader(file_path, open_encoding="unicode_escape")
- elif file_ext == "md":
- loader = UnstructuredMarkdownLoader(file_path)
- elif file_content_type == "application/epub+zip":
- loader = UnstructuredEPubLoader(file_path)
- elif (
- file_content_type
- == "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
- or file_ext in ["doc", "docx"]
- ):
- loader = Docx2txtLoader(file_path)
- elif file_content_type in [
- "application/vnd.ms-excel",
- "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
- ] or file_ext in ["xls", "xlsx"]:
- loader = UnstructuredExcelLoader(file_path)
- elif file_content_type in [
- "application/vnd.ms-powerpoint",
- "application/vnd.openxmlformats-officedocument.presentationml.presentation",
- ] or file_ext in ["ppt", "pptx"]:
- loader = UnstructuredPowerPointLoader(file_path)
- elif file_ext == "msg":
- loader = OutlookMessageLoader(file_path)
- elif file_ext in known_source_ext or (
- file_content_type and file_content_type.find("text/") >= 0
- ):
- loader = TextLoader(file_path, autodetect_encoding=True)
- else:
- loader = TextLoader(file_path, autodetect_encoding=True)
- known_type = False
- return loader, known_type
- @app.post("/doc")
- def store_doc(
- collection_name: Optional[str] = Form(None),
- file: UploadFile = File(...),
- user=Depends(get_verified_user),
- ):
- # "https://www.gutenberg.org/files/1727/1727-h/1727-h.htm"
- log.info(f"file.content_type: {file.content_type}")
- try:
- unsanitized_filename = file.filename
- filename = os.path.basename(unsanitized_filename)
- file_path = f"{UPLOAD_DIR}/{filename}"
- contents = file.file.read()
- with open(file_path, "wb") as f:
- f.write(contents)
- f.close()
- f = open(file_path, "rb")
- if collection_name is None:
- collection_name = calculate_sha256(f)[:63]
- f.close()
- loader, known_type = get_loader(filename, file.content_type, file_path)
- data = loader.load()
- try:
- result = store_data_in_vector_db(data, collection_name)
- if result:
- return {
- "status": True,
- "collection_name": collection_name,
- "filename": filename,
- "known_type": known_type,
- }
- except Exception as e:
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=e,
- )
- except Exception as e:
- log.exception(e)
- if "No pandoc was found" in str(e):
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.PANDOC_NOT_INSTALLED,
- )
- else:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- class ProcessDocForm(BaseModel):
- file_id: str
- collection_name: Optional[str] = None
- @app.post("/process/doc")
- def process_doc(
- form_data: ProcessDocForm,
- user=Depends(get_verified_user),
- ):
- try:
- file = Files.get_file_by_id(form_data.file_id)
- file_path = file.meta.get("path", f"{UPLOAD_DIR}/{file.filename}")
- f = open(file_path, "rb")
- collection_name = form_data.collection_name
- if collection_name is None:
- collection_name = calculate_sha256(f)[:63]
- f.close()
- loader, known_type = get_loader(
- file.filename, file.meta.get("content_type"), file_path
- )
- data = loader.load()
- try:
- result = store_data_in_vector_db(
- data,
- collection_name,
- {
- "file_id": form_data.file_id,
- "name": file.meta.get("name", file.filename),
- },
- )
- if result:
- return {
- "status": True,
- "collection_name": collection_name,
- "known_type": known_type,
- "filename": file.meta.get("name", file.filename),
- }
- except Exception as e:
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=e,
- )
- except Exception as e:
- log.exception(e)
- if "No pandoc was found" in str(e):
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.PANDOC_NOT_INSTALLED,
- )
- else:
- raise HTTPException(
- status_code=status.HTTP_400_BAD_REQUEST,
- detail=ERROR_MESSAGES.DEFAULT(e),
- )
- class TextRAGForm(BaseModel):
- name: str
- content: str
- collection_name: Optional[str] = None
- @app.post("/text")
- def store_text(
- form_data: TextRAGForm,
- user=Depends(get_verified_user),
- ):
- collection_name = form_data.collection_name
- if collection_name is None:
- collection_name = calculate_sha256_string(form_data.content)
- result = store_text_in_vector_db(
- form_data.content,
- metadata={"name": form_data.name, "created_by": user.id},
- collection_name=collection_name,
- )
- if result:
- return {"status": True, "collection_name": collection_name}
- else:
- raise HTTPException(
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
- detail=ERROR_MESSAGES.DEFAULT(),
- )
- @app.get("/scan")
- def scan_docs_dir(user=Depends(get_admin_user)):
- for path in Path(DOCS_DIR).rglob("./**/*"):
- try:
- if path.is_file() and not path.name.startswith("."):
- tags = extract_folders_after_data_docs(path)
- filename = path.name
- file_content_type = mimetypes.guess_type(path)
- f = open(path, "rb")
- collection_name = calculate_sha256(f)[:63]
- f.close()
- loader, known_type = get_loader(
- filename, file_content_type[0], str(path)
- )
- data = loader.load()
- try:
- result = store_data_in_vector_db(data, collection_name)
- if result:
- sanitized_filename = sanitize_filename(filename)
- doc = Documents.get_doc_by_name(sanitized_filename)
- if doc is None:
- doc = Documents.insert_new_doc(
- user.id,
- DocumentForm(
- **{
- "name": sanitized_filename,
- "title": filename,
- "collection_name": collection_name,
- "filename": filename,
- "content": (
- json.dumps(
- {
- "tags": list(
- map(
- lambda name: {"name": name},
- tags,
- )
- )
- }
- )
- if len(tags)
- else "{}"
- ),
- }
- ),
- )
- except Exception as e:
- log.exception(e)
- pass
- except Exception as e:
- log.exception(e)
- return True
- @app.post("/reset/db")
- def reset_vector_db(user=Depends(get_admin_user)):
- CHROMA_CLIENT.reset()
- @app.post("/reset/uploads")
- def reset_upload_dir(user=Depends(get_admin_user)) -> bool:
- folder = f"{UPLOAD_DIR}"
- try:
- # Check if the directory exists
- if os.path.exists(folder):
- # Iterate over all the files and directories in the specified directory
- for filename in os.listdir(folder):
- file_path = os.path.join(folder, filename)
- try:
- if os.path.isfile(file_path) or os.path.islink(file_path):
- os.unlink(file_path) # Remove the file or link
- elif os.path.isdir(file_path):
- shutil.rmtree(file_path) # Remove the directory
- except Exception as e:
- print(f"Failed to delete {file_path}. Reason: {e}")
- else:
- print(f"The directory {folder} does not exist")
- except Exception as e:
- print(f"Failed to process the directory {folder}. Reason: {e}")
- return True
- @app.post("/reset")
- def reset(user=Depends(get_admin_user)) -> bool:
- folder = f"{UPLOAD_DIR}"
- for filename in os.listdir(folder):
- file_path = os.path.join(folder, filename)
- try:
- if os.path.isfile(file_path) or os.path.islink(file_path):
- os.unlink(file_path)
- elif os.path.isdir(file_path):
- shutil.rmtree(file_path)
- except Exception as e:
- log.error("Failed to delete %s. Reason: %s" % (file_path, e))
- try:
- CHROMA_CLIENT.reset()
- except Exception as e:
- log.exception(e)
- return True
- class SafeWebBaseLoader(WebBaseLoader):
- """WebBaseLoader with enhanced error handling for URLs."""
- def lazy_load(self) -> Iterator[Document]:
- """Lazy load text from the url(s) in web_path with error handling."""
- for path in self.web_paths:
- try:
- soup = self._scrape(path, bs_kwargs=self.bs_kwargs)
- text = soup.get_text(**self.bs_get_text_kwargs)
- # Build metadata
- metadata = {"source": path}
- if title := soup.find("title"):
- metadata["title"] = title.get_text()
- if description := soup.find("meta", attrs={"name": "description"}):
- metadata["description"] = description.get(
- "content", "No description found."
- )
- if html := soup.find("html"):
- metadata["language"] = html.get("lang", "No language found.")
- yield Document(page_content=text, metadata=metadata)
- except Exception as e:
- # Log the error and continue with the next URL
- log.error(f"Error loading {path}: {e}")
- if ENV == "dev":
- @app.get("/ef")
- async def get_embeddings():
- return {"result": app.state.EMBEDDING_FUNCTION("hello world")}
- @app.get("/ef/{text}")
- async def get_embeddings_text(text: str):
- return {"result": app.state.EMBEDDING_FUNCTION(text)}
|