main.py 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import inspect
  15. import asyncio
  16. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  17. from fastapi.staticfiles import StaticFiles
  18. from fastapi.responses import JSONResponse
  19. from fastapi import HTTPException
  20. from fastapi.middleware.wsgi import WSGIMiddleware
  21. from fastapi.middleware.cors import CORSMiddleware
  22. from starlette.exceptions import HTTPException as StarletteHTTPException
  23. from starlette.middleware.base import BaseHTTPMiddleware
  24. from starlette.responses import StreamingResponse, Response
  25. from apps.socket.main import app as socket_app
  26. from apps.ollama.main import (
  27. app as ollama_app,
  28. OpenAIChatCompletionForm,
  29. get_all_models as get_ollama_models,
  30. generate_openai_chat_completion as generate_ollama_chat_completion,
  31. )
  32. from apps.openai.main import (
  33. app as openai_app,
  34. get_all_models as get_openai_models,
  35. generate_chat_completion as generate_openai_chat_completion,
  36. )
  37. from apps.audio.main import app as audio_app
  38. from apps.images.main import app as images_app
  39. from apps.rag.main import app as rag_app
  40. from apps.webui.main import app as webui_app
  41. from pydantic import BaseModel
  42. from typing import List, Optional
  43. from apps.webui.models.models import Models, ModelModel
  44. from apps.webui.models.tools import Tools
  45. from apps.webui.utils import load_toolkit_module_by_id
  46. from utils.utils import (
  47. get_admin_user,
  48. get_verified_user,
  49. get_current_user,
  50. get_http_authorization_cred,
  51. )
  52. from utils.task import (
  53. title_generation_template,
  54. search_query_generation_template,
  55. tools_function_calling_generation_template,
  56. )
  57. from utils.misc import get_last_user_message, add_or_update_system_message
  58. from apps.rag.utils import get_rag_context, rag_template
  59. from config import (
  60. CONFIG_DATA,
  61. WEBUI_NAME,
  62. WEBUI_URL,
  63. WEBUI_AUTH,
  64. ENV,
  65. VERSION,
  66. CHANGELOG,
  67. FRONTEND_BUILD_DIR,
  68. CACHE_DIR,
  69. STATIC_DIR,
  70. ENABLE_OPENAI_API,
  71. ENABLE_OLLAMA_API,
  72. ENABLE_MODEL_FILTER,
  73. MODEL_FILTER_LIST,
  74. GLOBAL_LOG_LEVEL,
  75. SRC_LOG_LEVELS,
  76. WEBHOOK_URL,
  77. ENABLE_ADMIN_EXPORT,
  78. WEBUI_BUILD_HASH,
  79. TASK_MODEL,
  80. TASK_MODEL_EXTERNAL,
  81. TITLE_GENERATION_PROMPT_TEMPLATE,
  82. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  83. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  84. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  85. AppConfig,
  86. )
  87. from constants import ERROR_MESSAGES
  88. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  89. log = logging.getLogger(__name__)
  90. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  91. class SPAStaticFiles(StaticFiles):
  92. async def get_response(self, path: str, scope):
  93. try:
  94. return await super().get_response(path, scope)
  95. except (HTTPException, StarletteHTTPException) as ex:
  96. if ex.status_code == 404:
  97. return await super().get_response("index.html", scope)
  98. else:
  99. raise ex
  100. print(
  101. rf"""
  102. ___ __ __ _ _ _ ___
  103. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  104. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  105. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  106. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  107. |_|
  108. v{VERSION} - building the best open-source AI user interface.
  109. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  110. https://github.com/open-webui/open-webui
  111. """
  112. )
  113. @asynccontextmanager
  114. async def lifespan(app: FastAPI):
  115. yield
  116. app = FastAPI(
  117. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  118. )
  119. app.state.config = AppConfig()
  120. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  121. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  122. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  123. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  124. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  125. app.state.config.TASK_MODEL = TASK_MODEL
  126. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  127. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  128. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  129. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  130. )
  131. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  132. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  133. )
  134. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  135. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  136. )
  137. app.state.MODELS = {}
  138. origins = ["*"]
  139. async def get_function_call_response(messages, tool_id, template, task_model_id, user):
  140. tool = Tools.get_tool_by_id(tool_id)
  141. tools_specs = json.dumps(tool.specs, indent=2)
  142. content = tools_function_calling_generation_template(template, tools_specs)
  143. user_message = get_last_user_message(messages)
  144. prompt = (
  145. "History:\n"
  146. + "\n".join(
  147. [
  148. f"{message['role']}: {message['content']}"
  149. for message in messages[::-1][:4]
  150. ]
  151. )
  152. + f"\nQuery: {user_message}"
  153. )
  154. print(prompt)
  155. payload = {
  156. "model": task_model_id,
  157. "messages": [
  158. {"role": "system", "content": content},
  159. {"role": "user", "content": f"Query: {prompt}"},
  160. ],
  161. "stream": False,
  162. }
  163. payload = filter_pipeline(payload, user)
  164. model = app.state.MODELS[task_model_id]
  165. response = None
  166. try:
  167. if model["owned_by"] == "ollama":
  168. response = await generate_ollama_chat_completion(
  169. OpenAIChatCompletionForm(**payload), user=user
  170. )
  171. else:
  172. response = await generate_openai_chat_completion(payload, user=user)
  173. content = None
  174. async for chunk in response.body_iterator:
  175. data = json.loads(chunk.decode("utf-8"))
  176. content = data["choices"][0]["message"]["content"]
  177. # Cleanup any remaining background tasks if necessary
  178. if response.background is not None:
  179. await response.background()
  180. # Parse the function response
  181. if content is not None:
  182. print(content)
  183. result = json.loads(content)
  184. print(result)
  185. # Call the function
  186. if "name" in result:
  187. if tool_id in webui_app.state.TOOLS:
  188. toolkit_module = webui_app.state.TOOLS[tool_id]
  189. else:
  190. toolkit_module = load_toolkit_module_by_id(tool_id)
  191. webui_app.state.TOOLS[tool_id] = toolkit_module
  192. function = getattr(toolkit_module, result["name"])
  193. function_result = None
  194. try:
  195. # Get the signature of the function
  196. sig = inspect.signature(function)
  197. # Check if '__user__' is a parameter of the function
  198. if "__user__" in sig.parameters:
  199. # Call the function with the '__user__' parameter included
  200. function_result = function(
  201. **{
  202. **result["parameters"],
  203. "__user__": {
  204. "id": user.id,
  205. "email": user.email,
  206. "name": user.name,
  207. "role": user.role,
  208. },
  209. }
  210. )
  211. else:
  212. # Call the function without modifying the parameters
  213. function_result = function(**result["parameters"])
  214. except Exception as e:
  215. print(e)
  216. # Add the function result to the system prompt
  217. if function_result:
  218. return function_result
  219. except Exception as e:
  220. print(f"Error: {e}")
  221. return None
  222. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  223. async def dispatch(self, request: Request, call_next):
  224. return_citations = False
  225. if request.method == "POST" and (
  226. "/ollama/api/chat" in request.url.path
  227. or "/chat/completions" in request.url.path
  228. ):
  229. log.debug(f"request.url.path: {request.url.path}")
  230. # Read the original request body
  231. body = await request.body()
  232. # Decode body to string
  233. body_str = body.decode("utf-8")
  234. # Parse string to JSON
  235. data = json.loads(body_str) if body_str else {}
  236. user = get_current_user(
  237. get_http_authorization_cred(request.headers.get("Authorization"))
  238. )
  239. # Remove the citations from the body
  240. return_citations = data.get("citations", False)
  241. if "citations" in data:
  242. del data["citations"]
  243. # Set the task model
  244. task_model_id = data["model"]
  245. if task_model_id not in app.state.MODELS:
  246. raise HTTPException(
  247. status_code=status.HTTP_404_NOT_FOUND,
  248. detail="Model not found",
  249. )
  250. # Check if the user has a custom task model
  251. # If the user has a custom task model, use that model
  252. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  253. if (
  254. app.state.config.TASK_MODEL
  255. and app.state.config.TASK_MODEL in app.state.MODELS
  256. ):
  257. task_model_id = app.state.config.TASK_MODEL
  258. else:
  259. if (
  260. app.state.config.TASK_MODEL_EXTERNAL
  261. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  262. ):
  263. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  264. prompt = get_last_user_message(data["messages"])
  265. context = ""
  266. # If tool_ids field is present, call the functions
  267. if "tool_ids" in data:
  268. print(data["tool_ids"])
  269. for tool_id in data["tool_ids"]:
  270. print(tool_id)
  271. response = await get_function_call_response(
  272. messages=data["messages"],
  273. tool_id=tool_id,
  274. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  275. task_model_id=task_model_id,
  276. user=user,
  277. )
  278. if response:
  279. context += ("\n" if context != "" else "") + response
  280. del data["tool_ids"]
  281. print(f"tool_context: {context}")
  282. # If docs field is present, generate RAG completions
  283. if "docs" in data:
  284. data = {**data}
  285. rag_context, citations = get_rag_context(
  286. docs=data["docs"],
  287. messages=data["messages"],
  288. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  289. k=rag_app.state.config.TOP_K,
  290. reranking_function=rag_app.state.sentence_transformer_rf,
  291. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  292. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  293. )
  294. if rag_context:
  295. context += ("\n" if context != "" else "") + rag_context
  296. del data["docs"]
  297. log.debug(f"rag_context: {rag_context}, citations: {citations}")
  298. if context != "":
  299. system_prompt = rag_template(
  300. rag_app.state.config.RAG_TEMPLATE, context, prompt
  301. )
  302. print(system_prompt)
  303. data["messages"] = add_or_update_system_message(
  304. f"\n{system_prompt}", data["messages"]
  305. )
  306. modified_body_bytes = json.dumps(data).encode("utf-8")
  307. # Replace the request body with the modified one
  308. request._body = modified_body_bytes
  309. # Set custom header to ensure content-length matches new body length
  310. request.headers.__dict__["_list"] = [
  311. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  312. *[
  313. (k, v)
  314. for k, v in request.headers.raw
  315. if k.lower() != b"content-length"
  316. ],
  317. ]
  318. response = await call_next(request)
  319. if return_citations:
  320. # Inject the citations into the response
  321. if isinstance(response, StreamingResponse):
  322. # If it's a streaming response, inject it as SSE event or NDJSON line
  323. content_type = response.headers.get("Content-Type")
  324. if "text/event-stream" in content_type:
  325. return StreamingResponse(
  326. self.openai_stream_wrapper(response.body_iterator, citations),
  327. )
  328. if "application/x-ndjson" in content_type:
  329. return StreamingResponse(
  330. self.ollama_stream_wrapper(response.body_iterator, citations),
  331. )
  332. return response
  333. async def _receive(self, body: bytes):
  334. return {"type": "http.request", "body": body, "more_body": False}
  335. async def openai_stream_wrapper(self, original_generator, citations):
  336. yield f"data: {json.dumps({'citations': citations})}\n\n"
  337. async for data in original_generator:
  338. yield data
  339. async def ollama_stream_wrapper(self, original_generator, citations):
  340. yield f"{json.dumps({'citations': citations})}\n"
  341. async for data in original_generator:
  342. yield data
  343. app.add_middleware(ChatCompletionMiddleware)
  344. def filter_pipeline(payload, user):
  345. user = {"id": user.id, "name": user.name, "role": user.role}
  346. model_id = payload["model"]
  347. filters = [
  348. model
  349. for model in app.state.MODELS.values()
  350. if "pipeline" in model
  351. and "type" in model["pipeline"]
  352. and model["pipeline"]["type"] == "filter"
  353. and (
  354. model["pipeline"]["pipelines"] == ["*"]
  355. or any(
  356. model_id == target_model_id
  357. for target_model_id in model["pipeline"]["pipelines"]
  358. )
  359. )
  360. ]
  361. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  362. model = app.state.MODELS[model_id]
  363. if "pipeline" in model:
  364. sorted_filters.append(model)
  365. for filter in sorted_filters:
  366. r = None
  367. try:
  368. urlIdx = filter["urlIdx"]
  369. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  370. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  371. if key != "":
  372. headers = {"Authorization": f"Bearer {key}"}
  373. r = requests.post(
  374. f"{url}/{filter['id']}/filter/inlet",
  375. headers=headers,
  376. json={
  377. "user": user,
  378. "body": payload,
  379. },
  380. )
  381. r.raise_for_status()
  382. payload = r.json()
  383. except Exception as e:
  384. # Handle connection error here
  385. print(f"Connection error: {e}")
  386. if r is not None:
  387. try:
  388. res = r.json()
  389. if "detail" in res:
  390. return JSONResponse(
  391. status_code=r.status_code,
  392. content=res,
  393. )
  394. except:
  395. pass
  396. else:
  397. pass
  398. if "pipeline" not in app.state.MODELS[model_id]:
  399. if "chat_id" in payload:
  400. del payload["chat_id"]
  401. if "title" in payload:
  402. del payload["title"]
  403. return payload
  404. class PipelineMiddleware(BaseHTTPMiddleware):
  405. async def dispatch(self, request: Request, call_next):
  406. if request.method == "POST" and (
  407. "/ollama/api/chat" in request.url.path
  408. or "/chat/completions" in request.url.path
  409. ):
  410. log.debug(f"request.url.path: {request.url.path}")
  411. # Read the original request body
  412. body = await request.body()
  413. # Decode body to string
  414. body_str = body.decode("utf-8")
  415. # Parse string to JSON
  416. data = json.loads(body_str) if body_str else {}
  417. user = get_current_user(
  418. get_http_authorization_cred(request.headers.get("Authorization"))
  419. )
  420. data = filter_pipeline(data, user)
  421. modified_body_bytes = json.dumps(data).encode("utf-8")
  422. # Replace the request body with the modified one
  423. request._body = modified_body_bytes
  424. # Set custom header to ensure content-length matches new body length
  425. request.headers.__dict__["_list"] = [
  426. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  427. *[
  428. (k, v)
  429. for k, v in request.headers.raw
  430. if k.lower() != b"content-length"
  431. ],
  432. ]
  433. response = await call_next(request)
  434. return response
  435. async def _receive(self, body: bytes):
  436. return {"type": "http.request", "body": body, "more_body": False}
  437. app.add_middleware(PipelineMiddleware)
  438. app.add_middleware(
  439. CORSMiddleware,
  440. allow_origins=origins,
  441. allow_credentials=True,
  442. allow_methods=["*"],
  443. allow_headers=["*"],
  444. )
  445. @app.middleware("http")
  446. async def check_url(request: Request, call_next):
  447. if len(app.state.MODELS) == 0:
  448. await get_all_models()
  449. else:
  450. pass
  451. start_time = int(time.time())
  452. response = await call_next(request)
  453. process_time = int(time.time()) - start_time
  454. response.headers["X-Process-Time"] = str(process_time)
  455. return response
  456. @app.middleware("http")
  457. async def update_embedding_function(request: Request, call_next):
  458. response = await call_next(request)
  459. if "/embedding/update" in request.url.path:
  460. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  461. return response
  462. app.mount("/ws", socket_app)
  463. app.mount("/ollama", ollama_app)
  464. app.mount("/openai", openai_app)
  465. app.mount("/images/api/v1", images_app)
  466. app.mount("/audio/api/v1", audio_app)
  467. app.mount("/rag/api/v1", rag_app)
  468. app.mount("/api/v1", webui_app)
  469. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  470. async def get_all_models():
  471. openai_models = []
  472. ollama_models = []
  473. if app.state.config.ENABLE_OPENAI_API:
  474. openai_models = await get_openai_models()
  475. openai_models = openai_models["data"]
  476. if app.state.config.ENABLE_OLLAMA_API:
  477. ollama_models = await get_ollama_models()
  478. ollama_models = [
  479. {
  480. "id": model["model"],
  481. "name": model["name"],
  482. "object": "model",
  483. "created": int(time.time()),
  484. "owned_by": "ollama",
  485. "ollama": model,
  486. }
  487. for model in ollama_models["models"]
  488. ]
  489. models = openai_models + ollama_models
  490. custom_models = Models.get_all_models()
  491. for custom_model in custom_models:
  492. if custom_model.base_model_id == None:
  493. for model in models:
  494. if (
  495. custom_model.id == model["id"]
  496. or custom_model.id == model["id"].split(":")[0]
  497. ):
  498. model["name"] = custom_model.name
  499. model["info"] = custom_model.model_dump()
  500. else:
  501. owned_by = "openai"
  502. for model in models:
  503. if (
  504. custom_model.base_model_id == model["id"]
  505. or custom_model.base_model_id == model["id"].split(":")[0]
  506. ):
  507. owned_by = model["owned_by"]
  508. break
  509. models.append(
  510. {
  511. "id": custom_model.id,
  512. "name": custom_model.name,
  513. "object": "model",
  514. "created": custom_model.created_at,
  515. "owned_by": owned_by,
  516. "info": custom_model.model_dump(),
  517. "preset": True,
  518. }
  519. )
  520. app.state.MODELS = {model["id"]: model for model in models}
  521. webui_app.state.MODELS = app.state.MODELS
  522. return models
  523. @app.get("/api/models")
  524. async def get_models(user=Depends(get_verified_user)):
  525. models = await get_all_models()
  526. # Filter out filter pipelines
  527. models = [
  528. model
  529. for model in models
  530. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  531. ]
  532. if app.state.config.ENABLE_MODEL_FILTER:
  533. if user.role == "user":
  534. models = list(
  535. filter(
  536. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  537. models,
  538. )
  539. )
  540. return {"data": models}
  541. return {"data": models}
  542. @app.get("/api/task/config")
  543. async def get_task_config(user=Depends(get_verified_user)):
  544. return {
  545. "TASK_MODEL": app.state.config.TASK_MODEL,
  546. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  547. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  548. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  549. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  550. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  551. }
  552. class TaskConfigForm(BaseModel):
  553. TASK_MODEL: Optional[str]
  554. TASK_MODEL_EXTERNAL: Optional[str]
  555. TITLE_GENERATION_PROMPT_TEMPLATE: str
  556. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  557. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  558. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  559. @app.post("/api/task/config/update")
  560. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  561. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  562. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  563. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  564. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  565. )
  566. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  567. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  568. )
  569. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  570. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  571. )
  572. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  573. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  574. )
  575. return {
  576. "TASK_MODEL": app.state.config.TASK_MODEL,
  577. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  578. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  579. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  580. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  581. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  582. }
  583. @app.post("/api/task/title/completions")
  584. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  585. print("generate_title")
  586. model_id = form_data["model"]
  587. if model_id not in app.state.MODELS:
  588. raise HTTPException(
  589. status_code=status.HTTP_404_NOT_FOUND,
  590. detail="Model not found",
  591. )
  592. # Check if the user has a custom task model
  593. # If the user has a custom task model, use that model
  594. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  595. if app.state.config.TASK_MODEL:
  596. task_model_id = app.state.config.TASK_MODEL
  597. if task_model_id in app.state.MODELS:
  598. model_id = task_model_id
  599. else:
  600. if app.state.config.TASK_MODEL_EXTERNAL:
  601. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  602. if task_model_id in app.state.MODELS:
  603. model_id = task_model_id
  604. print(model_id)
  605. model = app.state.MODELS[model_id]
  606. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  607. content = title_generation_template(
  608. template, form_data["prompt"], user.model_dump()
  609. )
  610. payload = {
  611. "model": model_id,
  612. "messages": [{"role": "user", "content": content}],
  613. "stream": False,
  614. "max_tokens": 50,
  615. "chat_id": form_data.get("chat_id", None),
  616. "title": True,
  617. }
  618. print(payload)
  619. payload = filter_pipeline(payload, user)
  620. if model["owned_by"] == "ollama":
  621. return await generate_ollama_chat_completion(
  622. OpenAIChatCompletionForm(**payload), user=user
  623. )
  624. else:
  625. return await generate_openai_chat_completion(payload, user=user)
  626. @app.post("/api/task/query/completions")
  627. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  628. print("generate_search_query")
  629. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  630. raise HTTPException(
  631. status_code=status.HTTP_400_BAD_REQUEST,
  632. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  633. )
  634. model_id = form_data["model"]
  635. if model_id not in app.state.MODELS:
  636. raise HTTPException(
  637. status_code=status.HTTP_404_NOT_FOUND,
  638. detail="Model not found",
  639. )
  640. # Check if the user has a custom task model
  641. # If the user has a custom task model, use that model
  642. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  643. if app.state.config.TASK_MODEL:
  644. task_model_id = app.state.config.TASK_MODEL
  645. if task_model_id in app.state.MODELS:
  646. model_id = task_model_id
  647. else:
  648. if app.state.config.TASK_MODEL_EXTERNAL:
  649. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  650. if task_model_id in app.state.MODELS:
  651. model_id = task_model_id
  652. print(model_id)
  653. model = app.state.MODELS[model_id]
  654. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  655. content = search_query_generation_template(
  656. template, form_data["prompt"], user.model_dump()
  657. )
  658. payload = {
  659. "model": model_id,
  660. "messages": [{"role": "user", "content": content}],
  661. "stream": False,
  662. "max_tokens": 30,
  663. }
  664. print(payload)
  665. payload = filter_pipeline(payload, user)
  666. if model["owned_by"] == "ollama":
  667. return await generate_ollama_chat_completion(
  668. OpenAIChatCompletionForm(**payload), user=user
  669. )
  670. else:
  671. return await generate_openai_chat_completion(payload, user=user)
  672. @app.post("/api/task/tools/completions")
  673. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  674. print("get_tools_function_calling")
  675. model_id = form_data["model"]
  676. if model_id not in app.state.MODELS:
  677. raise HTTPException(
  678. status_code=status.HTTP_404_NOT_FOUND,
  679. detail="Model not found",
  680. )
  681. # Check if the user has a custom task model
  682. # If the user has a custom task model, use that model
  683. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  684. if app.state.config.TASK_MODEL:
  685. task_model_id = app.state.config.TASK_MODEL
  686. if task_model_id in app.state.MODELS:
  687. model_id = task_model_id
  688. else:
  689. if app.state.config.TASK_MODEL_EXTERNAL:
  690. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  691. if task_model_id in app.state.MODELS:
  692. model_id = task_model_id
  693. print(model_id)
  694. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  695. return await get_function_call_response(
  696. form_data["messages"], form_data["tool_id"], template, model_id, user
  697. )
  698. @app.post("/api/chat/completions")
  699. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  700. model_id = form_data["model"]
  701. if model_id not in app.state.MODELS:
  702. raise HTTPException(
  703. status_code=status.HTTP_404_NOT_FOUND,
  704. detail="Model not found",
  705. )
  706. model = app.state.MODELS[model_id]
  707. print(model)
  708. if model["owned_by"] == "ollama":
  709. return await generate_ollama_chat_completion(
  710. OpenAIChatCompletionForm(**form_data), user=user
  711. )
  712. else:
  713. return await generate_openai_chat_completion(form_data, user=user)
  714. @app.post("/api/chat/completed")
  715. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  716. data = form_data
  717. model_id = data["model"]
  718. filters = [
  719. model
  720. for model in app.state.MODELS.values()
  721. if "pipeline" in model
  722. and "type" in model["pipeline"]
  723. and model["pipeline"]["type"] == "filter"
  724. and (
  725. model["pipeline"]["pipelines"] == ["*"]
  726. or any(
  727. model_id == target_model_id
  728. for target_model_id in model["pipeline"]["pipelines"]
  729. )
  730. )
  731. ]
  732. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  733. print(model_id)
  734. if model_id in app.state.MODELS:
  735. model = app.state.MODELS[model_id]
  736. if "pipeline" in model:
  737. sorted_filters = [model] + sorted_filters
  738. for filter in sorted_filters:
  739. r = None
  740. try:
  741. urlIdx = filter["urlIdx"]
  742. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  743. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  744. if key != "":
  745. headers = {"Authorization": f"Bearer {key}"}
  746. r = requests.post(
  747. f"{url}/{filter['id']}/filter/outlet",
  748. headers=headers,
  749. json={
  750. "user": {"id": user.id, "name": user.name, "role": user.role},
  751. "body": data,
  752. },
  753. )
  754. r.raise_for_status()
  755. data = r.json()
  756. except Exception as e:
  757. # Handle connection error here
  758. print(f"Connection error: {e}")
  759. if r is not None:
  760. try:
  761. res = r.json()
  762. if "detail" in res:
  763. return JSONResponse(
  764. status_code=r.status_code,
  765. content=res,
  766. )
  767. except:
  768. pass
  769. else:
  770. pass
  771. return data
  772. @app.get("/api/pipelines/list")
  773. async def get_pipelines_list(user=Depends(get_admin_user)):
  774. responses = await get_openai_models(raw=True)
  775. print(responses)
  776. urlIdxs = [
  777. idx
  778. for idx, response in enumerate(responses)
  779. if response != None and "pipelines" in response
  780. ]
  781. return {
  782. "data": [
  783. {
  784. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  785. "idx": urlIdx,
  786. }
  787. for urlIdx in urlIdxs
  788. ]
  789. }
  790. @app.post("/api/pipelines/upload")
  791. async def upload_pipeline(
  792. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  793. ):
  794. print("upload_pipeline", urlIdx, file.filename)
  795. # Check if the uploaded file is a python file
  796. if not file.filename.endswith(".py"):
  797. raise HTTPException(
  798. status_code=status.HTTP_400_BAD_REQUEST,
  799. detail="Only Python (.py) files are allowed.",
  800. )
  801. upload_folder = f"{CACHE_DIR}/pipelines"
  802. os.makedirs(upload_folder, exist_ok=True)
  803. file_path = os.path.join(upload_folder, file.filename)
  804. try:
  805. # Save the uploaded file
  806. with open(file_path, "wb") as buffer:
  807. shutil.copyfileobj(file.file, buffer)
  808. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  809. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  810. headers = {"Authorization": f"Bearer {key}"}
  811. with open(file_path, "rb") as f:
  812. files = {"file": f}
  813. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  814. r.raise_for_status()
  815. data = r.json()
  816. return {**data}
  817. except Exception as e:
  818. # Handle connection error here
  819. print(f"Connection error: {e}")
  820. detail = "Pipeline not found"
  821. if r is not None:
  822. try:
  823. res = r.json()
  824. if "detail" in res:
  825. detail = res["detail"]
  826. except:
  827. pass
  828. raise HTTPException(
  829. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  830. detail=detail,
  831. )
  832. finally:
  833. # Ensure the file is deleted after the upload is completed or on failure
  834. if os.path.exists(file_path):
  835. os.remove(file_path)
  836. class AddPipelineForm(BaseModel):
  837. url: str
  838. urlIdx: int
  839. @app.post("/api/pipelines/add")
  840. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  841. r = None
  842. try:
  843. urlIdx = form_data.urlIdx
  844. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  845. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  846. headers = {"Authorization": f"Bearer {key}"}
  847. r = requests.post(
  848. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  849. )
  850. r.raise_for_status()
  851. data = r.json()
  852. return {**data}
  853. except Exception as e:
  854. # Handle connection error here
  855. print(f"Connection error: {e}")
  856. detail = "Pipeline not found"
  857. if r is not None:
  858. try:
  859. res = r.json()
  860. if "detail" in res:
  861. detail = res["detail"]
  862. except:
  863. pass
  864. raise HTTPException(
  865. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  866. detail=detail,
  867. )
  868. class DeletePipelineForm(BaseModel):
  869. id: str
  870. urlIdx: int
  871. @app.delete("/api/pipelines/delete")
  872. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  873. r = None
  874. try:
  875. urlIdx = form_data.urlIdx
  876. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  877. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  878. headers = {"Authorization": f"Bearer {key}"}
  879. r = requests.delete(
  880. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  881. )
  882. r.raise_for_status()
  883. data = r.json()
  884. return {**data}
  885. except Exception as e:
  886. # Handle connection error here
  887. print(f"Connection error: {e}")
  888. detail = "Pipeline not found"
  889. if r is not None:
  890. try:
  891. res = r.json()
  892. if "detail" in res:
  893. detail = res["detail"]
  894. except:
  895. pass
  896. raise HTTPException(
  897. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  898. detail=detail,
  899. )
  900. @app.get("/api/pipelines")
  901. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  902. r = None
  903. try:
  904. urlIdx
  905. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  906. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  907. headers = {"Authorization": f"Bearer {key}"}
  908. r = requests.get(f"{url}/pipelines", headers=headers)
  909. r.raise_for_status()
  910. data = r.json()
  911. return {**data}
  912. except Exception as e:
  913. # Handle connection error here
  914. print(f"Connection error: {e}")
  915. detail = "Pipeline not found"
  916. if r is not None:
  917. try:
  918. res = r.json()
  919. if "detail" in res:
  920. detail = res["detail"]
  921. except:
  922. pass
  923. raise HTTPException(
  924. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  925. detail=detail,
  926. )
  927. @app.get("/api/pipelines/{pipeline_id}/valves")
  928. async def get_pipeline_valves(
  929. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  930. ):
  931. models = await get_all_models()
  932. r = None
  933. try:
  934. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  935. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  936. headers = {"Authorization": f"Bearer {key}"}
  937. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  938. r.raise_for_status()
  939. data = r.json()
  940. return {**data}
  941. except Exception as e:
  942. # Handle connection error here
  943. print(f"Connection error: {e}")
  944. detail = "Pipeline not found"
  945. if r is not None:
  946. try:
  947. res = r.json()
  948. if "detail" in res:
  949. detail = res["detail"]
  950. except:
  951. pass
  952. raise HTTPException(
  953. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  954. detail=detail,
  955. )
  956. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  957. async def get_pipeline_valves_spec(
  958. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  959. ):
  960. models = await get_all_models()
  961. r = None
  962. try:
  963. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  964. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  965. headers = {"Authorization": f"Bearer {key}"}
  966. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  967. r.raise_for_status()
  968. data = r.json()
  969. return {**data}
  970. except Exception as e:
  971. # Handle connection error here
  972. print(f"Connection error: {e}")
  973. detail = "Pipeline not found"
  974. if r is not None:
  975. try:
  976. res = r.json()
  977. if "detail" in res:
  978. detail = res["detail"]
  979. except:
  980. pass
  981. raise HTTPException(
  982. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  983. detail=detail,
  984. )
  985. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  986. async def update_pipeline_valves(
  987. urlIdx: Optional[int],
  988. pipeline_id: str,
  989. form_data: dict,
  990. user=Depends(get_admin_user),
  991. ):
  992. models = await get_all_models()
  993. r = None
  994. try:
  995. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  996. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  997. headers = {"Authorization": f"Bearer {key}"}
  998. r = requests.post(
  999. f"{url}/{pipeline_id}/valves/update",
  1000. headers=headers,
  1001. json={**form_data},
  1002. )
  1003. r.raise_for_status()
  1004. data = r.json()
  1005. return {**data}
  1006. except Exception as e:
  1007. # Handle connection error here
  1008. print(f"Connection error: {e}")
  1009. detail = "Pipeline not found"
  1010. if r is not None:
  1011. try:
  1012. res = r.json()
  1013. if "detail" in res:
  1014. detail = res["detail"]
  1015. except:
  1016. pass
  1017. raise HTTPException(
  1018. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1019. detail=detail,
  1020. )
  1021. @app.get("/api/config")
  1022. async def get_app_config():
  1023. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  1024. default_locale = "en-US"
  1025. if "ui" in CONFIG_DATA:
  1026. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  1027. # The Rest of the Function Now Uses the Variables Defined Above
  1028. return {
  1029. "status": True,
  1030. "name": WEBUI_NAME,
  1031. "version": VERSION,
  1032. "default_locale": default_locale,
  1033. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1034. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1035. "features": {
  1036. "auth": WEBUI_AUTH,
  1037. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1038. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1039. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1040. "enable_image_generation": images_app.state.config.ENABLED,
  1041. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1042. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1043. },
  1044. "audio": {
  1045. "tts": {
  1046. "engine": audio_app.state.config.TTS_ENGINE,
  1047. "voice": audio_app.state.config.TTS_VOICE,
  1048. },
  1049. "stt": {
  1050. "engine": audio_app.state.config.STT_ENGINE,
  1051. },
  1052. },
  1053. }
  1054. @app.get("/api/config/model/filter")
  1055. async def get_model_filter_config(user=Depends(get_admin_user)):
  1056. return {
  1057. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1058. "models": app.state.config.MODEL_FILTER_LIST,
  1059. }
  1060. class ModelFilterConfigForm(BaseModel):
  1061. enabled: bool
  1062. models: List[str]
  1063. @app.post("/api/config/model/filter")
  1064. async def update_model_filter_config(
  1065. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1066. ):
  1067. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1068. app.state.config.MODEL_FILTER_LIST = form_data.models
  1069. return {
  1070. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1071. "models": app.state.config.MODEL_FILTER_LIST,
  1072. }
  1073. @app.get("/api/webhook")
  1074. async def get_webhook_url(user=Depends(get_admin_user)):
  1075. return {
  1076. "url": app.state.config.WEBHOOK_URL,
  1077. }
  1078. class UrlForm(BaseModel):
  1079. url: str
  1080. @app.post("/api/webhook")
  1081. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1082. app.state.config.WEBHOOK_URL = form_data.url
  1083. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1084. return {"url": app.state.config.WEBHOOK_URL}
  1085. @app.get("/api/version")
  1086. async def get_app_config():
  1087. return {
  1088. "version": VERSION,
  1089. }
  1090. @app.get("/api/changelog")
  1091. async def get_app_changelog():
  1092. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1093. @app.get("/api/version/updates")
  1094. async def get_app_latest_release_version():
  1095. try:
  1096. async with aiohttp.ClientSession(trust_env=True) as session:
  1097. async with session.get(
  1098. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1099. ) as response:
  1100. response.raise_for_status()
  1101. data = await response.json()
  1102. latest_version = data["tag_name"]
  1103. return {"current": VERSION, "latest": latest_version[1:]}
  1104. except aiohttp.ClientError as e:
  1105. raise HTTPException(
  1106. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1107. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1108. )
  1109. @app.get("/manifest.json")
  1110. async def get_manifest_json():
  1111. return {
  1112. "name": WEBUI_NAME,
  1113. "short_name": WEBUI_NAME,
  1114. "start_url": "/",
  1115. "display": "standalone",
  1116. "background_color": "#343541",
  1117. "theme_color": "#343541",
  1118. "orientation": "portrait-primary",
  1119. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1120. }
  1121. @app.get("/opensearch.xml")
  1122. async def get_opensearch_xml():
  1123. xml_content = rf"""
  1124. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1125. <ShortName>{WEBUI_NAME}</ShortName>
  1126. <Description>Search {WEBUI_NAME}</Description>
  1127. <InputEncoding>UTF-8</InputEncoding>
  1128. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1129. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1130. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1131. </OpenSearchDescription>
  1132. """
  1133. return Response(content=xml_content, media_type="application/xml")
  1134. @app.get("/health")
  1135. async def healthcheck():
  1136. return {"status": True}
  1137. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1138. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1139. if os.path.exists(FRONTEND_BUILD_DIR):
  1140. mimetypes.add_type("text/javascript", ".js")
  1141. app.mount(
  1142. "/",
  1143. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1144. name="spa-static-files",
  1145. )
  1146. else:
  1147. log.warning(
  1148. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1149. )