main.py 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import asyncio
  15. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  16. from fastapi.staticfiles import StaticFiles
  17. from fastapi.responses import JSONResponse
  18. from fastapi import HTTPException
  19. from fastapi.middleware.wsgi import WSGIMiddleware
  20. from fastapi.middleware.cors import CORSMiddleware
  21. from starlette.exceptions import HTTPException as StarletteHTTPException
  22. from starlette.middleware.base import BaseHTTPMiddleware
  23. from starlette.responses import StreamingResponse, Response
  24. from apps.socket.main import app as socket_app
  25. from apps.ollama.main import (
  26. app as ollama_app,
  27. OpenAIChatCompletionForm,
  28. get_all_models as get_ollama_models,
  29. generate_openai_chat_completion as generate_ollama_chat_completion,
  30. )
  31. from apps.openai.main import (
  32. app as openai_app,
  33. get_all_models as get_openai_models,
  34. generate_chat_completion as generate_openai_chat_completion,
  35. )
  36. from apps.audio.main import app as audio_app
  37. from apps.images.main import app as images_app
  38. from apps.rag.main import app as rag_app
  39. from apps.webui.main import app as webui_app
  40. from pydantic import BaseModel
  41. from typing import List, Optional
  42. from apps.webui.models.models import Models, ModelModel
  43. from apps.webui.models.tools import Tools
  44. from apps.webui.utils import load_toolkit_module_by_id
  45. from utils.utils import (
  46. get_admin_user,
  47. get_verified_user,
  48. get_current_user,
  49. get_http_authorization_cred,
  50. )
  51. from utils.task import (
  52. title_generation_template,
  53. search_query_generation_template,
  54. tools_function_calling_generation_template,
  55. )
  56. from utils.misc import get_last_user_message, add_or_update_system_message
  57. from apps.rag.utils import get_rag_context, rag_template
  58. from config import (
  59. CONFIG_DATA,
  60. WEBUI_NAME,
  61. WEBUI_URL,
  62. WEBUI_AUTH,
  63. ENV,
  64. VERSION,
  65. CHANGELOG,
  66. FRONTEND_BUILD_DIR,
  67. CACHE_DIR,
  68. STATIC_DIR,
  69. ENABLE_OPENAI_API,
  70. ENABLE_OLLAMA_API,
  71. ENABLE_MODEL_FILTER,
  72. MODEL_FILTER_LIST,
  73. GLOBAL_LOG_LEVEL,
  74. SRC_LOG_LEVELS,
  75. WEBHOOK_URL,
  76. ENABLE_ADMIN_EXPORT,
  77. WEBUI_BUILD_HASH,
  78. TASK_MODEL,
  79. TASK_MODEL_EXTERNAL,
  80. TITLE_GENERATION_PROMPT_TEMPLATE,
  81. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  82. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  83. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  84. AppConfig,
  85. )
  86. from constants import ERROR_MESSAGES
  87. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  88. log = logging.getLogger(__name__)
  89. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  90. class SPAStaticFiles(StaticFiles):
  91. async def get_response(self, path: str, scope):
  92. try:
  93. return await super().get_response(path, scope)
  94. except (HTTPException, StarletteHTTPException) as ex:
  95. if ex.status_code == 404:
  96. return await super().get_response("index.html", scope)
  97. else:
  98. raise ex
  99. print(
  100. rf"""
  101. ___ __ __ _ _ _ ___
  102. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  103. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  104. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  105. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  106. |_|
  107. v{VERSION} - building the best open-source AI user interface.
  108. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  109. https://github.com/open-webui/open-webui
  110. """
  111. )
  112. @asynccontextmanager
  113. async def lifespan(app: FastAPI):
  114. yield
  115. app = FastAPI(
  116. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  117. )
  118. app.state.config = AppConfig()
  119. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  120. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  121. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  122. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  123. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  124. app.state.config.TASK_MODEL = TASK_MODEL
  125. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  126. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  127. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  128. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  129. )
  130. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  131. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  132. )
  133. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  134. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  135. )
  136. app.state.MODELS = {}
  137. origins = ["*"]
  138. async def get_function_call_response(prompt, tool_id, template, task_model_id, user):
  139. tool = Tools.get_tool_by_id(tool_id)
  140. tools_specs = json.dumps(tool.specs, indent=2)
  141. content = tools_function_calling_generation_template(template, tools_specs)
  142. payload = {
  143. "model": task_model_id,
  144. "messages": [
  145. {"role": "system", "content": content},
  146. {"role": "user", "content": f"Query: {prompt}"},
  147. ],
  148. "stream": False,
  149. }
  150. payload = filter_pipeline(payload, user)
  151. model = app.state.MODELS[task_model_id]
  152. response = None
  153. try:
  154. if model["owned_by"] == "ollama":
  155. response = await generate_ollama_chat_completion(
  156. OpenAIChatCompletionForm(**payload), user=user
  157. )
  158. else:
  159. response = await generate_openai_chat_completion(payload, user=user)
  160. content = None
  161. async for chunk in response.body_iterator:
  162. data = json.loads(chunk.decode("utf-8"))
  163. content = data["choices"][0]["message"]["content"]
  164. # Cleanup any remaining background tasks if necessary
  165. if response.background is not None:
  166. await response.background()
  167. # Parse the function response
  168. if content is not None:
  169. result = json.loads(content)
  170. print(result)
  171. # Call the function
  172. if "name" in result:
  173. if tool_id in webui_app.state.TOOLS:
  174. toolkit_module = webui_app.state.TOOLS[tool_id]
  175. else:
  176. toolkit_module = load_toolkit_module_by_id(tool_id)
  177. webui_app.state.TOOLS[tool_id] = toolkit_module
  178. function = getattr(toolkit_module, result["name"])
  179. function_result = None
  180. try:
  181. function_result = function(**result["parameters"])
  182. except Exception as e:
  183. print(e)
  184. # Add the function result to the system prompt
  185. if function_result:
  186. return function_result
  187. except Exception as e:
  188. print(f"Error: {e}")
  189. return None
  190. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  191. async def dispatch(self, request: Request, call_next):
  192. return_citations = False
  193. if request.method == "POST" and (
  194. "/ollama/api/chat" in request.url.path
  195. or "/chat/completions" in request.url.path
  196. ):
  197. log.debug(f"request.url.path: {request.url.path}")
  198. # Read the original request body
  199. body = await request.body()
  200. # Decode body to string
  201. body_str = body.decode("utf-8")
  202. # Parse string to JSON
  203. data = json.loads(body_str) if body_str else {}
  204. user = get_current_user(
  205. get_http_authorization_cred(request.headers.get("Authorization"))
  206. )
  207. # Remove the citations from the body
  208. return_citations = data.get("citations", False)
  209. if "citations" in data:
  210. del data["citations"]
  211. # Set the task model
  212. task_model_id = data["model"]
  213. if task_model_id not in app.state.MODELS:
  214. raise HTTPException(
  215. status_code=status.HTTP_404_NOT_FOUND,
  216. detail="Model not found",
  217. )
  218. # Check if the user has a custom task model
  219. # If the user has a custom task model, use that model
  220. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  221. if (
  222. app.state.config.TASK_MODEL
  223. and app.state.config.TASK_MODEL in app.state.MODELS
  224. ):
  225. task_model_id = app.state.config.TASK_MODEL
  226. else:
  227. if (
  228. app.state.config.TASK_MODEL_EXTERNAL
  229. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  230. ):
  231. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  232. context = ""
  233. # If tool_ids field is present, call the functions
  234. if "tool_ids" in data:
  235. prompt = get_last_user_message(data["messages"])
  236. for tool_id in data["tool_ids"]:
  237. print(tool_id)
  238. response = await get_function_call_response(
  239. prompt=prompt,
  240. tool_id=tool_id,
  241. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  242. task_model_id=task_model_id,
  243. user=user,
  244. )
  245. if response:
  246. context += ("\n" if context != "" else "") + response
  247. del data["tool_ids"]
  248. print(context)
  249. # If docs field is present, generate RAG completions
  250. if "docs" in data:
  251. data = {**data}
  252. rag_context, citations = get_rag_context(
  253. docs=data["docs"],
  254. messages=data["messages"],
  255. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  256. k=rag_app.state.config.TOP_K,
  257. reranking_function=rag_app.state.sentence_transformer_rf,
  258. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  259. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  260. )
  261. if rag_context:
  262. context += ("\n" if context != "" else "") + rag_context
  263. del data["docs"]
  264. log.debug(f"rag_context: {rag_context}, citations: {citations}")
  265. if context != "":
  266. system_prompt = rag_template(
  267. rag_app.state.config.RAG_TEMPLATE, context, prompt
  268. )
  269. print(system_prompt)
  270. data["messages"] = add_or_update_system_message(
  271. f"\n{system_prompt}", data["messages"]
  272. )
  273. modified_body_bytes = json.dumps(data).encode("utf-8")
  274. # Replace the request body with the modified one
  275. request._body = modified_body_bytes
  276. # Set custom header to ensure content-length matches new body length
  277. request.headers.__dict__["_list"] = [
  278. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  279. *[
  280. (k, v)
  281. for k, v in request.headers.raw
  282. if k.lower() != b"content-length"
  283. ],
  284. ]
  285. response = await call_next(request)
  286. if return_citations:
  287. # Inject the citations into the response
  288. if isinstance(response, StreamingResponse):
  289. # If it's a streaming response, inject it as SSE event or NDJSON line
  290. content_type = response.headers.get("Content-Type")
  291. if "text/event-stream" in content_type:
  292. return StreamingResponse(
  293. self.openai_stream_wrapper(response.body_iterator, citations),
  294. )
  295. if "application/x-ndjson" in content_type:
  296. return StreamingResponse(
  297. self.ollama_stream_wrapper(response.body_iterator, citations),
  298. )
  299. return response
  300. async def _receive(self, body: bytes):
  301. return {"type": "http.request", "body": body, "more_body": False}
  302. async def openai_stream_wrapper(self, original_generator, citations):
  303. yield f"data: {json.dumps({'citations': citations})}\n\n"
  304. async for data in original_generator:
  305. yield data
  306. async def ollama_stream_wrapper(self, original_generator, citations):
  307. yield f"{json.dumps({'citations': citations})}\n"
  308. async for data in original_generator:
  309. yield data
  310. app.add_middleware(ChatCompletionMiddleware)
  311. def filter_pipeline(payload, user):
  312. user = {"id": user.id, "name": user.name, "role": user.role}
  313. model_id = payload["model"]
  314. filters = [
  315. model
  316. for model in app.state.MODELS.values()
  317. if "pipeline" in model
  318. and "type" in model["pipeline"]
  319. and model["pipeline"]["type"] == "filter"
  320. and (
  321. model["pipeline"]["pipelines"] == ["*"]
  322. or any(
  323. model_id == target_model_id
  324. for target_model_id in model["pipeline"]["pipelines"]
  325. )
  326. )
  327. ]
  328. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  329. model = app.state.MODELS[model_id]
  330. if "pipeline" in model:
  331. sorted_filters.append(model)
  332. for filter in sorted_filters:
  333. r = None
  334. try:
  335. urlIdx = filter["urlIdx"]
  336. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  337. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  338. if key != "":
  339. headers = {"Authorization": f"Bearer {key}"}
  340. r = requests.post(
  341. f"{url}/{filter['id']}/filter/inlet",
  342. headers=headers,
  343. json={
  344. "user": user,
  345. "body": payload,
  346. },
  347. )
  348. r.raise_for_status()
  349. payload = r.json()
  350. except Exception as e:
  351. # Handle connection error here
  352. print(f"Connection error: {e}")
  353. if r is not None:
  354. try:
  355. res = r.json()
  356. if "detail" in res:
  357. return JSONResponse(
  358. status_code=r.status_code,
  359. content=res,
  360. )
  361. except:
  362. pass
  363. else:
  364. pass
  365. if "pipeline" not in app.state.MODELS[model_id]:
  366. if "chat_id" in payload:
  367. del payload["chat_id"]
  368. if "title" in payload:
  369. del payload["title"]
  370. return payload
  371. class PipelineMiddleware(BaseHTTPMiddleware):
  372. async def dispatch(self, request: Request, call_next):
  373. if request.method == "POST" and (
  374. "/ollama/api/chat" in request.url.path
  375. or "/chat/completions" in request.url.path
  376. ):
  377. log.debug(f"request.url.path: {request.url.path}")
  378. # Read the original request body
  379. body = await request.body()
  380. # Decode body to string
  381. body_str = body.decode("utf-8")
  382. # Parse string to JSON
  383. data = json.loads(body_str) if body_str else {}
  384. user = get_current_user(
  385. get_http_authorization_cred(request.headers.get("Authorization"))
  386. )
  387. data = filter_pipeline(data, user)
  388. modified_body_bytes = json.dumps(data).encode("utf-8")
  389. # Replace the request body with the modified one
  390. request._body = modified_body_bytes
  391. # Set custom header to ensure content-length matches new body length
  392. request.headers.__dict__["_list"] = [
  393. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  394. *[
  395. (k, v)
  396. for k, v in request.headers.raw
  397. if k.lower() != b"content-length"
  398. ],
  399. ]
  400. response = await call_next(request)
  401. return response
  402. async def _receive(self, body: bytes):
  403. return {"type": "http.request", "body": body, "more_body": False}
  404. app.add_middleware(PipelineMiddleware)
  405. app.add_middleware(
  406. CORSMiddleware,
  407. allow_origins=origins,
  408. allow_credentials=True,
  409. allow_methods=["*"],
  410. allow_headers=["*"],
  411. )
  412. @app.middleware("http")
  413. async def check_url(request: Request, call_next):
  414. if len(app.state.MODELS) == 0:
  415. await get_all_models()
  416. else:
  417. pass
  418. start_time = int(time.time())
  419. response = await call_next(request)
  420. process_time = int(time.time()) - start_time
  421. response.headers["X-Process-Time"] = str(process_time)
  422. return response
  423. @app.middleware("http")
  424. async def update_embedding_function(request: Request, call_next):
  425. response = await call_next(request)
  426. if "/embedding/update" in request.url.path:
  427. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  428. return response
  429. app.mount("/ws", socket_app)
  430. app.mount("/ollama", ollama_app)
  431. app.mount("/openai", openai_app)
  432. app.mount("/images/api/v1", images_app)
  433. app.mount("/audio/api/v1", audio_app)
  434. app.mount("/rag/api/v1", rag_app)
  435. app.mount("/api/v1", webui_app)
  436. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  437. async def get_all_models():
  438. openai_models = []
  439. ollama_models = []
  440. if app.state.config.ENABLE_OPENAI_API:
  441. openai_models = await get_openai_models()
  442. openai_models = openai_models["data"]
  443. if app.state.config.ENABLE_OLLAMA_API:
  444. ollama_models = await get_ollama_models()
  445. ollama_models = [
  446. {
  447. "id": model["model"],
  448. "name": model["name"],
  449. "object": "model",
  450. "created": int(time.time()),
  451. "owned_by": "ollama",
  452. "ollama": model,
  453. }
  454. for model in ollama_models["models"]
  455. ]
  456. models = openai_models + ollama_models
  457. custom_models = Models.get_all_models()
  458. for custom_model in custom_models:
  459. if custom_model.base_model_id == None:
  460. for model in models:
  461. if (
  462. custom_model.id == model["id"]
  463. or custom_model.id == model["id"].split(":")[0]
  464. ):
  465. model["name"] = custom_model.name
  466. model["info"] = custom_model.model_dump()
  467. else:
  468. owned_by = "openai"
  469. for model in models:
  470. if (
  471. custom_model.base_model_id == model["id"]
  472. or custom_model.base_model_id == model["id"].split(":")[0]
  473. ):
  474. owned_by = model["owned_by"]
  475. break
  476. models.append(
  477. {
  478. "id": custom_model.id,
  479. "name": custom_model.name,
  480. "object": "model",
  481. "created": custom_model.created_at,
  482. "owned_by": owned_by,
  483. "info": custom_model.model_dump(),
  484. "preset": True,
  485. }
  486. )
  487. app.state.MODELS = {model["id"]: model for model in models}
  488. webui_app.state.MODELS = app.state.MODELS
  489. return models
  490. @app.get("/api/models")
  491. async def get_models(user=Depends(get_verified_user)):
  492. models = await get_all_models()
  493. # Filter out filter pipelines
  494. models = [
  495. model
  496. for model in models
  497. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  498. ]
  499. if app.state.config.ENABLE_MODEL_FILTER:
  500. if user.role == "user":
  501. models = list(
  502. filter(
  503. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  504. models,
  505. )
  506. )
  507. return {"data": models}
  508. return {"data": models}
  509. @app.get("/api/task/config")
  510. async def get_task_config(user=Depends(get_verified_user)):
  511. return {
  512. "TASK_MODEL": app.state.config.TASK_MODEL,
  513. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  514. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  515. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  516. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  517. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  518. }
  519. class TaskConfigForm(BaseModel):
  520. TASK_MODEL: Optional[str]
  521. TASK_MODEL_EXTERNAL: Optional[str]
  522. TITLE_GENERATION_PROMPT_TEMPLATE: str
  523. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  524. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  525. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  526. @app.post("/api/task/config/update")
  527. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  528. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  529. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  530. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  531. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  532. )
  533. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  534. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  535. )
  536. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  537. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  538. )
  539. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  540. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  541. )
  542. return {
  543. "TASK_MODEL": app.state.config.TASK_MODEL,
  544. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  545. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  546. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  547. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  548. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  549. }
  550. @app.post("/api/task/title/completions")
  551. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  552. print("generate_title")
  553. model_id = form_data["model"]
  554. if model_id not in app.state.MODELS:
  555. raise HTTPException(
  556. status_code=status.HTTP_404_NOT_FOUND,
  557. detail="Model not found",
  558. )
  559. # Check if the user has a custom task model
  560. # If the user has a custom task model, use that model
  561. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  562. if app.state.config.TASK_MODEL:
  563. task_model_id = app.state.config.TASK_MODEL
  564. if task_model_id in app.state.MODELS:
  565. model_id = task_model_id
  566. else:
  567. if app.state.config.TASK_MODEL_EXTERNAL:
  568. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  569. if task_model_id in app.state.MODELS:
  570. model_id = task_model_id
  571. print(model_id)
  572. model = app.state.MODELS[model_id]
  573. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  574. content = title_generation_template(
  575. template, form_data["prompt"], user.model_dump()
  576. )
  577. payload = {
  578. "model": model_id,
  579. "messages": [{"role": "user", "content": content}],
  580. "stream": False,
  581. "max_tokens": 50,
  582. "chat_id": form_data.get("chat_id", None),
  583. "title": True,
  584. }
  585. print(payload)
  586. payload = filter_pipeline(payload, user)
  587. if model["owned_by"] == "ollama":
  588. return await generate_ollama_chat_completion(
  589. OpenAIChatCompletionForm(**payload), user=user
  590. )
  591. else:
  592. return await generate_openai_chat_completion(payload, user=user)
  593. @app.post("/api/task/query/completions")
  594. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  595. print("generate_search_query")
  596. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  597. raise HTTPException(
  598. status_code=status.HTTP_400_BAD_REQUEST,
  599. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  600. )
  601. model_id = form_data["model"]
  602. if model_id not in app.state.MODELS:
  603. raise HTTPException(
  604. status_code=status.HTTP_404_NOT_FOUND,
  605. detail="Model not found",
  606. )
  607. # Check if the user has a custom task model
  608. # If the user has a custom task model, use that model
  609. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  610. if app.state.config.TASK_MODEL:
  611. task_model_id = app.state.config.TASK_MODEL
  612. if task_model_id in app.state.MODELS:
  613. model_id = task_model_id
  614. else:
  615. if app.state.config.TASK_MODEL_EXTERNAL:
  616. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  617. if task_model_id in app.state.MODELS:
  618. model_id = task_model_id
  619. print(model_id)
  620. model = app.state.MODELS[model_id]
  621. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  622. content = search_query_generation_template(
  623. template, form_data["prompt"], user.model_dump()
  624. )
  625. payload = {
  626. "model": model_id,
  627. "messages": [{"role": "user", "content": content}],
  628. "stream": False,
  629. "max_tokens": 30,
  630. }
  631. print(payload)
  632. payload = filter_pipeline(payload, user)
  633. if model["owned_by"] == "ollama":
  634. return await generate_ollama_chat_completion(
  635. OpenAIChatCompletionForm(**payload), user=user
  636. )
  637. else:
  638. return await generate_openai_chat_completion(payload, user=user)
  639. @app.post("/api/task/tools/completions")
  640. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  641. print("get_tools_function_calling")
  642. model_id = form_data["model"]
  643. if model_id not in app.state.MODELS:
  644. raise HTTPException(
  645. status_code=status.HTTP_404_NOT_FOUND,
  646. detail="Model not found",
  647. )
  648. # Check if the user has a custom task model
  649. # If the user has a custom task model, use that model
  650. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  651. if app.state.config.TASK_MODEL:
  652. task_model_id = app.state.config.TASK_MODEL
  653. if task_model_id in app.state.MODELS:
  654. model_id = task_model_id
  655. else:
  656. if app.state.config.TASK_MODEL_EXTERNAL:
  657. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  658. if task_model_id in app.state.MODELS:
  659. model_id = task_model_id
  660. print(model_id)
  661. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  662. return await get_function_call_response(
  663. form_data["prompt"], form_data["tool_id"], template, model_id, user
  664. )
  665. @app.post("/api/chat/completions")
  666. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  667. model_id = form_data["model"]
  668. if model_id not in app.state.MODELS:
  669. raise HTTPException(
  670. status_code=status.HTTP_404_NOT_FOUND,
  671. detail="Model not found",
  672. )
  673. model = app.state.MODELS[model_id]
  674. print(model)
  675. if model["owned_by"] == "ollama":
  676. return await generate_ollama_chat_completion(
  677. OpenAIChatCompletionForm(**form_data), user=user
  678. )
  679. else:
  680. return await generate_openai_chat_completion(form_data, user=user)
  681. @app.post("/api/chat/completed")
  682. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  683. data = form_data
  684. model_id = data["model"]
  685. filters = [
  686. model
  687. for model in app.state.MODELS.values()
  688. if "pipeline" in model
  689. and "type" in model["pipeline"]
  690. and model["pipeline"]["type"] == "filter"
  691. and (
  692. model["pipeline"]["pipelines"] == ["*"]
  693. or any(
  694. model_id == target_model_id
  695. for target_model_id in model["pipeline"]["pipelines"]
  696. )
  697. )
  698. ]
  699. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  700. print(model_id)
  701. if model_id in app.state.MODELS:
  702. model = app.state.MODELS[model_id]
  703. if "pipeline" in model:
  704. sorted_filters = [model] + sorted_filters
  705. for filter in sorted_filters:
  706. r = None
  707. try:
  708. urlIdx = filter["urlIdx"]
  709. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  710. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  711. if key != "":
  712. headers = {"Authorization": f"Bearer {key}"}
  713. r = requests.post(
  714. f"{url}/{filter['id']}/filter/outlet",
  715. headers=headers,
  716. json={
  717. "user": {"id": user.id, "name": user.name, "role": user.role},
  718. "body": data,
  719. },
  720. )
  721. r.raise_for_status()
  722. data = r.json()
  723. except Exception as e:
  724. # Handle connection error here
  725. print(f"Connection error: {e}")
  726. if r is not None:
  727. try:
  728. res = r.json()
  729. if "detail" in res:
  730. return JSONResponse(
  731. status_code=r.status_code,
  732. content=res,
  733. )
  734. except:
  735. pass
  736. else:
  737. pass
  738. return data
  739. @app.get("/api/pipelines/list")
  740. async def get_pipelines_list(user=Depends(get_admin_user)):
  741. responses = await get_openai_models(raw=True)
  742. print(responses)
  743. urlIdxs = [
  744. idx
  745. for idx, response in enumerate(responses)
  746. if response != None and "pipelines" in response
  747. ]
  748. return {
  749. "data": [
  750. {
  751. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  752. "idx": urlIdx,
  753. }
  754. for urlIdx in urlIdxs
  755. ]
  756. }
  757. @app.post("/api/pipelines/upload")
  758. async def upload_pipeline(
  759. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  760. ):
  761. print("upload_pipeline", urlIdx, file.filename)
  762. # Check if the uploaded file is a python file
  763. if not file.filename.endswith(".py"):
  764. raise HTTPException(
  765. status_code=status.HTTP_400_BAD_REQUEST,
  766. detail="Only Python (.py) files are allowed.",
  767. )
  768. upload_folder = f"{CACHE_DIR}/pipelines"
  769. os.makedirs(upload_folder, exist_ok=True)
  770. file_path = os.path.join(upload_folder, file.filename)
  771. try:
  772. # Save the uploaded file
  773. with open(file_path, "wb") as buffer:
  774. shutil.copyfileobj(file.file, buffer)
  775. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  776. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  777. headers = {"Authorization": f"Bearer {key}"}
  778. with open(file_path, "rb") as f:
  779. files = {"file": f}
  780. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  781. r.raise_for_status()
  782. data = r.json()
  783. return {**data}
  784. except Exception as e:
  785. # Handle connection error here
  786. print(f"Connection error: {e}")
  787. detail = "Pipeline not found"
  788. if r is not None:
  789. try:
  790. res = r.json()
  791. if "detail" in res:
  792. detail = res["detail"]
  793. except:
  794. pass
  795. raise HTTPException(
  796. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  797. detail=detail,
  798. )
  799. finally:
  800. # Ensure the file is deleted after the upload is completed or on failure
  801. if os.path.exists(file_path):
  802. os.remove(file_path)
  803. class AddPipelineForm(BaseModel):
  804. url: str
  805. urlIdx: int
  806. @app.post("/api/pipelines/add")
  807. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  808. r = None
  809. try:
  810. urlIdx = form_data.urlIdx
  811. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  812. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  813. headers = {"Authorization": f"Bearer {key}"}
  814. r = requests.post(
  815. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  816. )
  817. r.raise_for_status()
  818. data = r.json()
  819. return {**data}
  820. except Exception as e:
  821. # Handle connection error here
  822. print(f"Connection error: {e}")
  823. detail = "Pipeline not found"
  824. if r is not None:
  825. try:
  826. res = r.json()
  827. if "detail" in res:
  828. detail = res["detail"]
  829. except:
  830. pass
  831. raise HTTPException(
  832. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  833. detail=detail,
  834. )
  835. class DeletePipelineForm(BaseModel):
  836. id: str
  837. urlIdx: int
  838. @app.delete("/api/pipelines/delete")
  839. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  840. r = None
  841. try:
  842. urlIdx = form_data.urlIdx
  843. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  844. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  845. headers = {"Authorization": f"Bearer {key}"}
  846. r = requests.delete(
  847. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  848. )
  849. r.raise_for_status()
  850. data = r.json()
  851. return {**data}
  852. except Exception as e:
  853. # Handle connection error here
  854. print(f"Connection error: {e}")
  855. detail = "Pipeline not found"
  856. if r is not None:
  857. try:
  858. res = r.json()
  859. if "detail" in res:
  860. detail = res["detail"]
  861. except:
  862. pass
  863. raise HTTPException(
  864. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  865. detail=detail,
  866. )
  867. @app.get("/api/pipelines")
  868. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  869. r = None
  870. try:
  871. urlIdx
  872. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  873. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  874. headers = {"Authorization": f"Bearer {key}"}
  875. r = requests.get(f"{url}/pipelines", headers=headers)
  876. r.raise_for_status()
  877. data = r.json()
  878. return {**data}
  879. except Exception as e:
  880. # Handle connection error here
  881. print(f"Connection error: {e}")
  882. detail = "Pipeline not found"
  883. if r is not None:
  884. try:
  885. res = r.json()
  886. if "detail" in res:
  887. detail = res["detail"]
  888. except:
  889. pass
  890. raise HTTPException(
  891. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  892. detail=detail,
  893. )
  894. @app.get("/api/pipelines/{pipeline_id}/valves")
  895. async def get_pipeline_valves(
  896. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  897. ):
  898. models = await get_all_models()
  899. r = None
  900. try:
  901. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  902. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  903. headers = {"Authorization": f"Bearer {key}"}
  904. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  905. r.raise_for_status()
  906. data = r.json()
  907. return {**data}
  908. except Exception as e:
  909. # Handle connection error here
  910. print(f"Connection error: {e}")
  911. detail = "Pipeline not found"
  912. if r is not None:
  913. try:
  914. res = r.json()
  915. if "detail" in res:
  916. detail = res["detail"]
  917. except:
  918. pass
  919. raise HTTPException(
  920. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  921. detail=detail,
  922. )
  923. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  924. async def get_pipeline_valves_spec(
  925. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  926. ):
  927. models = await get_all_models()
  928. r = None
  929. try:
  930. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  931. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  932. headers = {"Authorization": f"Bearer {key}"}
  933. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  934. r.raise_for_status()
  935. data = r.json()
  936. return {**data}
  937. except Exception as e:
  938. # Handle connection error here
  939. print(f"Connection error: {e}")
  940. detail = "Pipeline not found"
  941. if r is not None:
  942. try:
  943. res = r.json()
  944. if "detail" in res:
  945. detail = res["detail"]
  946. except:
  947. pass
  948. raise HTTPException(
  949. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  950. detail=detail,
  951. )
  952. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  953. async def update_pipeline_valves(
  954. urlIdx: Optional[int],
  955. pipeline_id: str,
  956. form_data: dict,
  957. user=Depends(get_admin_user),
  958. ):
  959. models = await get_all_models()
  960. r = None
  961. try:
  962. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  963. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  964. headers = {"Authorization": f"Bearer {key}"}
  965. r = requests.post(
  966. f"{url}/{pipeline_id}/valves/update",
  967. headers=headers,
  968. json={**form_data},
  969. )
  970. r.raise_for_status()
  971. data = r.json()
  972. return {**data}
  973. except Exception as e:
  974. # Handle connection error here
  975. print(f"Connection error: {e}")
  976. detail = "Pipeline not found"
  977. if r is not None:
  978. try:
  979. res = r.json()
  980. if "detail" in res:
  981. detail = res["detail"]
  982. except:
  983. pass
  984. raise HTTPException(
  985. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  986. detail=detail,
  987. )
  988. @app.get("/api/config")
  989. async def get_app_config():
  990. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  991. default_locale = "en-US"
  992. if "ui" in CONFIG_DATA:
  993. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  994. # The Rest of the Function Now Uses the Variables Defined Above
  995. return {
  996. "status": True,
  997. "name": WEBUI_NAME,
  998. "version": VERSION,
  999. "default_locale": default_locale,
  1000. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1001. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1002. "features": {
  1003. "auth": WEBUI_AUTH,
  1004. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1005. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1006. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1007. "enable_image_generation": images_app.state.config.ENABLED,
  1008. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1009. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1010. },
  1011. "audio": {
  1012. "tts": {
  1013. "engine": audio_app.state.config.TTS_ENGINE,
  1014. "voice": audio_app.state.config.TTS_VOICE,
  1015. },
  1016. "stt": {
  1017. "engine": audio_app.state.config.STT_ENGINE,
  1018. },
  1019. },
  1020. }
  1021. @app.get("/api/config/model/filter")
  1022. async def get_model_filter_config(user=Depends(get_admin_user)):
  1023. return {
  1024. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1025. "models": app.state.config.MODEL_FILTER_LIST,
  1026. }
  1027. class ModelFilterConfigForm(BaseModel):
  1028. enabled: bool
  1029. models: List[str]
  1030. @app.post("/api/config/model/filter")
  1031. async def update_model_filter_config(
  1032. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1033. ):
  1034. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1035. app.state.config.MODEL_FILTER_LIST = form_data.models
  1036. return {
  1037. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1038. "models": app.state.config.MODEL_FILTER_LIST,
  1039. }
  1040. @app.get("/api/webhook")
  1041. async def get_webhook_url(user=Depends(get_admin_user)):
  1042. return {
  1043. "url": app.state.config.WEBHOOK_URL,
  1044. }
  1045. class UrlForm(BaseModel):
  1046. url: str
  1047. @app.post("/api/webhook")
  1048. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1049. app.state.config.WEBHOOK_URL = form_data.url
  1050. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1051. return {"url": app.state.config.WEBHOOK_URL}
  1052. @app.get("/api/version")
  1053. async def get_app_config():
  1054. return {
  1055. "version": VERSION,
  1056. }
  1057. @app.get("/api/changelog")
  1058. async def get_app_changelog():
  1059. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1060. @app.get("/api/version/updates")
  1061. async def get_app_latest_release_version():
  1062. try:
  1063. async with aiohttp.ClientSession(trust_env=True) as session:
  1064. async with session.get(
  1065. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1066. ) as response:
  1067. response.raise_for_status()
  1068. data = await response.json()
  1069. latest_version = data["tag_name"]
  1070. return {"current": VERSION, "latest": latest_version[1:]}
  1071. except aiohttp.ClientError as e:
  1072. raise HTTPException(
  1073. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1074. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1075. )
  1076. @app.get("/manifest.json")
  1077. async def get_manifest_json():
  1078. return {
  1079. "name": WEBUI_NAME,
  1080. "short_name": WEBUI_NAME,
  1081. "start_url": "/",
  1082. "display": "standalone",
  1083. "background_color": "#343541",
  1084. "theme_color": "#343541",
  1085. "orientation": "portrait-primary",
  1086. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1087. }
  1088. @app.get("/opensearch.xml")
  1089. async def get_opensearch_xml():
  1090. xml_content = rf"""
  1091. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1092. <ShortName>{WEBUI_NAME}</ShortName>
  1093. <Description>Search {WEBUI_NAME}</Description>
  1094. <InputEncoding>UTF-8</InputEncoding>
  1095. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1096. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1097. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1098. </OpenSearchDescription>
  1099. """
  1100. return Response(content=xml_content, media_type="application/xml")
  1101. @app.get("/health")
  1102. async def healthcheck():
  1103. return {"status": True}
  1104. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1105. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1106. if os.path.exists(FRONTEND_BUILD_DIR):
  1107. mimetypes.add_type("text/javascript", ".js")
  1108. app.mount(
  1109. "/",
  1110. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1111. name="spa-static-files",
  1112. )
  1113. else:
  1114. log.warning(
  1115. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1116. )