main.py 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import asyncio
  15. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  16. from fastapi.staticfiles import StaticFiles
  17. from fastapi.responses import JSONResponse
  18. from fastapi import HTTPException
  19. from fastapi.middleware.wsgi import WSGIMiddleware
  20. from fastapi.middleware.cors import CORSMiddleware
  21. from starlette.exceptions import HTTPException as StarletteHTTPException
  22. from starlette.middleware.base import BaseHTTPMiddleware
  23. from starlette.responses import StreamingResponse, Response
  24. from apps.socket.main import app as socket_app
  25. from apps.ollama.main import (
  26. app as ollama_app,
  27. OpenAIChatCompletionForm,
  28. get_all_models as get_ollama_models,
  29. generate_openai_chat_completion as generate_ollama_chat_completion,
  30. )
  31. from apps.openai.main import (
  32. app as openai_app,
  33. get_all_models as get_openai_models,
  34. generate_chat_completion as generate_openai_chat_completion,
  35. )
  36. from apps.audio.main import app as audio_app
  37. from apps.images.main import app as images_app
  38. from apps.rag.main import app as rag_app
  39. from apps.webui.main import app as webui_app
  40. from pydantic import BaseModel
  41. from typing import List, Optional
  42. from apps.webui.models.models import Models, ModelModel
  43. from apps.webui.models.tools import Tools
  44. from apps.webui.utils import load_toolkit_module_by_id
  45. from utils.utils import (
  46. get_admin_user,
  47. get_verified_user,
  48. get_current_user,
  49. get_http_authorization_cred,
  50. )
  51. from utils.task import (
  52. title_generation_template,
  53. search_query_generation_template,
  54. tools_function_calling_generation_template,
  55. )
  56. from utils.misc import get_last_user_message, add_or_update_system_message
  57. from apps.rag.utils import rag_messages, rag_template
  58. from config import (
  59. CONFIG_DATA,
  60. WEBUI_NAME,
  61. WEBUI_URL,
  62. WEBUI_AUTH,
  63. ENV,
  64. VERSION,
  65. CHANGELOG,
  66. FRONTEND_BUILD_DIR,
  67. CACHE_DIR,
  68. STATIC_DIR,
  69. ENABLE_OPENAI_API,
  70. ENABLE_OLLAMA_API,
  71. ENABLE_MODEL_FILTER,
  72. MODEL_FILTER_LIST,
  73. GLOBAL_LOG_LEVEL,
  74. SRC_LOG_LEVELS,
  75. WEBHOOK_URL,
  76. ENABLE_ADMIN_EXPORT,
  77. WEBUI_BUILD_HASH,
  78. TASK_MODEL,
  79. TASK_MODEL_EXTERNAL,
  80. TITLE_GENERATION_PROMPT_TEMPLATE,
  81. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  82. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  83. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  84. AppConfig,
  85. )
  86. from constants import ERROR_MESSAGES
  87. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  88. log = logging.getLogger(__name__)
  89. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  90. class SPAStaticFiles(StaticFiles):
  91. async def get_response(self, path: str, scope):
  92. try:
  93. return await super().get_response(path, scope)
  94. except (HTTPException, StarletteHTTPException) as ex:
  95. if ex.status_code == 404:
  96. return await super().get_response("index.html", scope)
  97. else:
  98. raise ex
  99. print(
  100. rf"""
  101. ___ __ __ _ _ _ ___
  102. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  103. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  104. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  105. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  106. |_|
  107. v{VERSION} - building the best open-source AI user interface.
  108. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  109. https://github.com/open-webui/open-webui
  110. """
  111. )
  112. @asynccontextmanager
  113. async def lifespan(app: FastAPI):
  114. yield
  115. app = FastAPI(
  116. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  117. )
  118. app.state.config = AppConfig()
  119. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  120. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  121. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  122. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  123. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  124. app.state.config.TASK_MODEL = TASK_MODEL
  125. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  126. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  127. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  128. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  129. )
  130. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  131. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  132. )
  133. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  134. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  135. )
  136. app.state.MODELS = {}
  137. origins = ["*"]
  138. async def get_function_call_response(prompt, tool_id, template, task_model_id, user):
  139. tool = Tools.get_tool_by_id(tool_id)
  140. tools_specs = json.dumps(tool.specs, indent=2)
  141. content = tools_function_calling_generation_template(template, tools_specs)
  142. payload = {
  143. "model": task_model_id,
  144. "messages": [
  145. {"role": "system", "content": content},
  146. {"role": "user", "content": f"Query: {prompt}"},
  147. ],
  148. "stream": False,
  149. }
  150. payload = filter_pipeline(payload, user)
  151. model = app.state.MODELS[task_model_id]
  152. response = None
  153. try:
  154. if model["owned_by"] == "ollama":
  155. response = await generate_ollama_chat_completion(
  156. OpenAIChatCompletionForm(**payload), user=user
  157. )
  158. else:
  159. response = await generate_openai_chat_completion(payload, user=user)
  160. content = None
  161. async for chunk in response.body_iterator:
  162. data = json.loads(chunk.decode("utf-8"))
  163. content = data["choices"][0]["message"]["content"]
  164. # Cleanup any remaining background tasks if necessary
  165. if response.background is not None:
  166. await response.background()
  167. # Parse the function response
  168. if content is not None:
  169. result = json.loads(content)
  170. print(result)
  171. # Call the function
  172. if "name" in result:
  173. if tool_id in webui_app.state.TOOLS:
  174. toolkit_module = webui_app.state.TOOLS[tool_id]
  175. else:
  176. toolkit_module = load_toolkit_module_by_id(tool_id)
  177. webui_app.state.TOOLS[tool_id] = toolkit_module
  178. function = getattr(toolkit_module, result["name"])
  179. function_result = None
  180. try:
  181. function_result = function(**result["parameters"])
  182. except Exception as e:
  183. print(e)
  184. # Add the function result to the system prompt
  185. if function_result:
  186. return function_result
  187. except Exception as e:
  188. print(f"Error: {e}")
  189. return None
  190. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  191. async def dispatch(self, request: Request, call_next):
  192. return_citations = False
  193. if request.method == "POST" and (
  194. "/ollama/api/chat" in request.url.path
  195. or "/chat/completions" in request.url.path
  196. ):
  197. log.debug(f"request.url.path: {request.url.path}")
  198. # Read the original request body
  199. body = await request.body()
  200. # Decode body to string
  201. body_str = body.decode("utf-8")
  202. # Parse string to JSON
  203. data = json.loads(body_str) if body_str else {}
  204. # Remove the citations from the body
  205. return_citations = data.get("citations", False)
  206. if "citations" in data:
  207. del data["citations"]
  208. # Set the task model
  209. task_model_id = data["model"]
  210. if task_model_id not in app.state.MODELS:
  211. raise HTTPException(
  212. status_code=status.HTTP_404_NOT_FOUND,
  213. detail="Model not found",
  214. )
  215. # Check if the user has a custom task model
  216. # If the user has a custom task model, use that model
  217. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  218. if (
  219. app.state.config.TASK_MODEL
  220. and app.state.config.TASK_MODEL in app.state.MODELS
  221. ):
  222. task_model_id = app.state.config.TASK_MODEL
  223. else:
  224. if (
  225. app.state.config.TASK_MODEL_EXTERNAL
  226. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  227. ):
  228. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  229. if "tool_ids" in data:
  230. user = get_current_user(
  231. get_http_authorization_cred(request.headers.get("Authorization"))
  232. )
  233. prompt = get_last_user_message(data["messages"])
  234. context = ""
  235. for tool_id in data["tool_ids"]:
  236. response = await get_function_call_response(
  237. prompt=prompt,
  238. tool_id=tool_id,
  239. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  240. task_model_id=task_model_id,
  241. user=user,
  242. )
  243. print(response)
  244. if response:
  245. context = ("\n" if context != "" else "") + response
  246. if context != "":
  247. system_prompt = rag_template(
  248. rag_app.state.config.RAG_TEMPLATE, context, prompt
  249. )
  250. print(system_prompt)
  251. data["messages"] = add_or_update_system_message(
  252. f"\n{system_prompt}", data["messages"]
  253. )
  254. del data["tool_ids"]
  255. # If docs field is present, generate RAG completions
  256. if "docs" in data:
  257. data = {**data}
  258. data["messages"], citations = rag_messages(
  259. docs=data["docs"],
  260. messages=data["messages"],
  261. template=rag_app.state.config.RAG_TEMPLATE,
  262. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  263. k=rag_app.state.config.TOP_K,
  264. reranking_function=rag_app.state.sentence_transformer_rf,
  265. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  266. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  267. )
  268. del data["docs"]
  269. log.debug(
  270. f"data['messages']: {data['messages']}, citations: {citations}"
  271. )
  272. modified_body_bytes = json.dumps(data).encode("utf-8")
  273. # Replace the request body with the modified one
  274. request._body = modified_body_bytes
  275. # Set custom header to ensure content-length matches new body length
  276. request.headers.__dict__["_list"] = [
  277. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  278. *[
  279. (k, v)
  280. for k, v in request.headers.raw
  281. if k.lower() != b"content-length"
  282. ],
  283. ]
  284. response = await call_next(request)
  285. if return_citations:
  286. # Inject the citations into the response
  287. if isinstance(response, StreamingResponse):
  288. # If it's a streaming response, inject it as SSE event or NDJSON line
  289. content_type = response.headers.get("Content-Type")
  290. if "text/event-stream" in content_type:
  291. return StreamingResponse(
  292. self.openai_stream_wrapper(response.body_iterator, citations),
  293. )
  294. if "application/x-ndjson" in content_type:
  295. return StreamingResponse(
  296. self.ollama_stream_wrapper(response.body_iterator, citations),
  297. )
  298. return response
  299. async def _receive(self, body: bytes):
  300. return {"type": "http.request", "body": body, "more_body": False}
  301. async def openai_stream_wrapper(self, original_generator, citations):
  302. yield f"data: {json.dumps({'citations': citations})}\n\n"
  303. async for data in original_generator:
  304. yield data
  305. async def ollama_stream_wrapper(self, original_generator, citations):
  306. yield f"{json.dumps({'citations': citations})}\n"
  307. async for data in original_generator:
  308. yield data
  309. app.add_middleware(ChatCompletionMiddleware)
  310. def filter_pipeline(payload, user):
  311. user = {"id": user.id, "name": user.name, "role": user.role}
  312. model_id = payload["model"]
  313. filters = [
  314. model
  315. for model in app.state.MODELS.values()
  316. if "pipeline" in model
  317. and "type" in model["pipeline"]
  318. and model["pipeline"]["type"] == "filter"
  319. and (
  320. model["pipeline"]["pipelines"] == ["*"]
  321. or any(
  322. model_id == target_model_id
  323. for target_model_id in model["pipeline"]["pipelines"]
  324. )
  325. )
  326. ]
  327. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  328. model = app.state.MODELS[model_id]
  329. if "pipeline" in model:
  330. sorted_filters.append(model)
  331. for filter in sorted_filters:
  332. r = None
  333. try:
  334. urlIdx = filter["urlIdx"]
  335. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  336. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  337. if key != "":
  338. headers = {"Authorization": f"Bearer {key}"}
  339. r = requests.post(
  340. f"{url}/{filter['id']}/filter/inlet",
  341. headers=headers,
  342. json={
  343. "user": user,
  344. "body": payload,
  345. },
  346. )
  347. r.raise_for_status()
  348. payload = r.json()
  349. except Exception as e:
  350. # Handle connection error here
  351. print(f"Connection error: {e}")
  352. if r is not None:
  353. try:
  354. res = r.json()
  355. if "detail" in res:
  356. return JSONResponse(
  357. status_code=r.status_code,
  358. content=res,
  359. )
  360. except:
  361. pass
  362. else:
  363. pass
  364. if "pipeline" not in app.state.MODELS[model_id]:
  365. if "chat_id" in payload:
  366. del payload["chat_id"]
  367. if "title" in payload:
  368. del payload["title"]
  369. return payload
  370. class PipelineMiddleware(BaseHTTPMiddleware):
  371. async def dispatch(self, request: Request, call_next):
  372. if request.method == "POST" and (
  373. "/ollama/api/chat" in request.url.path
  374. or "/chat/completions" in request.url.path
  375. ):
  376. log.debug(f"request.url.path: {request.url.path}")
  377. # Read the original request body
  378. body = await request.body()
  379. # Decode body to string
  380. body_str = body.decode("utf-8")
  381. # Parse string to JSON
  382. data = json.loads(body_str) if body_str else {}
  383. user = get_current_user(
  384. get_http_authorization_cred(request.headers.get("Authorization"))
  385. )
  386. data = filter_pipeline(data, user)
  387. modified_body_bytes = json.dumps(data).encode("utf-8")
  388. # Replace the request body with the modified one
  389. request._body = modified_body_bytes
  390. # Set custom header to ensure content-length matches new body length
  391. request.headers.__dict__["_list"] = [
  392. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  393. *[
  394. (k, v)
  395. for k, v in request.headers.raw
  396. if k.lower() != b"content-length"
  397. ],
  398. ]
  399. response = await call_next(request)
  400. return response
  401. async def _receive(self, body: bytes):
  402. return {"type": "http.request", "body": body, "more_body": False}
  403. app.add_middleware(PipelineMiddleware)
  404. app.add_middleware(
  405. CORSMiddleware,
  406. allow_origins=origins,
  407. allow_credentials=True,
  408. allow_methods=["*"],
  409. allow_headers=["*"],
  410. )
  411. @app.middleware("http")
  412. async def check_url(request: Request, call_next):
  413. if len(app.state.MODELS) == 0:
  414. await get_all_models()
  415. else:
  416. pass
  417. start_time = int(time.time())
  418. response = await call_next(request)
  419. process_time = int(time.time()) - start_time
  420. response.headers["X-Process-Time"] = str(process_time)
  421. return response
  422. @app.middleware("http")
  423. async def update_embedding_function(request: Request, call_next):
  424. response = await call_next(request)
  425. if "/embedding/update" in request.url.path:
  426. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  427. return response
  428. app.mount("/ws", socket_app)
  429. app.mount("/ollama", ollama_app)
  430. app.mount("/openai", openai_app)
  431. app.mount("/images/api/v1", images_app)
  432. app.mount("/audio/api/v1", audio_app)
  433. app.mount("/rag/api/v1", rag_app)
  434. app.mount("/api/v1", webui_app)
  435. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  436. async def get_all_models():
  437. openai_models = []
  438. ollama_models = []
  439. if app.state.config.ENABLE_OPENAI_API:
  440. openai_models = await get_openai_models()
  441. openai_models = openai_models["data"]
  442. if app.state.config.ENABLE_OLLAMA_API:
  443. ollama_models = await get_ollama_models()
  444. ollama_models = [
  445. {
  446. "id": model["model"],
  447. "name": model["name"],
  448. "object": "model",
  449. "created": int(time.time()),
  450. "owned_by": "ollama",
  451. "ollama": model,
  452. }
  453. for model in ollama_models["models"]
  454. ]
  455. models = openai_models + ollama_models
  456. custom_models = Models.get_all_models()
  457. for custom_model in custom_models:
  458. if custom_model.base_model_id == None:
  459. for model in models:
  460. if (
  461. custom_model.id == model["id"]
  462. or custom_model.id == model["id"].split(":")[0]
  463. ):
  464. model["name"] = custom_model.name
  465. model["info"] = custom_model.model_dump()
  466. else:
  467. owned_by = "openai"
  468. for model in models:
  469. if (
  470. custom_model.base_model_id == model["id"]
  471. or custom_model.base_model_id == model["id"].split(":")[0]
  472. ):
  473. owned_by = model["owned_by"]
  474. break
  475. models.append(
  476. {
  477. "id": custom_model.id,
  478. "name": custom_model.name,
  479. "object": "model",
  480. "created": custom_model.created_at,
  481. "owned_by": owned_by,
  482. "info": custom_model.model_dump(),
  483. "preset": True,
  484. }
  485. )
  486. app.state.MODELS = {model["id"]: model for model in models}
  487. webui_app.state.MODELS = app.state.MODELS
  488. return models
  489. @app.get("/api/models")
  490. async def get_models(user=Depends(get_verified_user)):
  491. models = await get_all_models()
  492. # Filter out filter pipelines
  493. models = [
  494. model
  495. for model in models
  496. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  497. ]
  498. if app.state.config.ENABLE_MODEL_FILTER:
  499. if user.role == "user":
  500. models = list(
  501. filter(
  502. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  503. models,
  504. )
  505. )
  506. return {"data": models}
  507. return {"data": models}
  508. @app.get("/api/task/config")
  509. async def get_task_config(user=Depends(get_verified_user)):
  510. return {
  511. "TASK_MODEL": app.state.config.TASK_MODEL,
  512. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  513. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  514. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  515. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  516. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  517. }
  518. class TaskConfigForm(BaseModel):
  519. TASK_MODEL: Optional[str]
  520. TASK_MODEL_EXTERNAL: Optional[str]
  521. TITLE_GENERATION_PROMPT_TEMPLATE: str
  522. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  523. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  524. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  525. @app.post("/api/task/config/update")
  526. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  527. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  528. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  529. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  530. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  531. )
  532. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  533. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  534. )
  535. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  536. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  537. )
  538. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  539. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  540. )
  541. return {
  542. "TASK_MODEL": app.state.config.TASK_MODEL,
  543. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  544. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  545. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  546. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  547. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  548. }
  549. @app.post("/api/task/title/completions")
  550. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  551. print("generate_title")
  552. model_id = form_data["model"]
  553. if model_id not in app.state.MODELS:
  554. raise HTTPException(
  555. status_code=status.HTTP_404_NOT_FOUND,
  556. detail="Model not found",
  557. )
  558. # Check if the user has a custom task model
  559. # If the user has a custom task model, use that model
  560. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  561. if app.state.config.TASK_MODEL:
  562. task_model_id = app.state.config.TASK_MODEL
  563. if task_model_id in app.state.MODELS:
  564. model_id = task_model_id
  565. else:
  566. if app.state.config.TASK_MODEL_EXTERNAL:
  567. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  568. if task_model_id in app.state.MODELS:
  569. model_id = task_model_id
  570. print(model_id)
  571. model = app.state.MODELS[model_id]
  572. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  573. content = title_generation_template(
  574. template, form_data["prompt"], user.model_dump()
  575. )
  576. payload = {
  577. "model": model_id,
  578. "messages": [{"role": "user", "content": content}],
  579. "stream": False,
  580. "max_tokens": 50,
  581. "chat_id": form_data.get("chat_id", None),
  582. "title": True,
  583. }
  584. print(payload)
  585. payload = filter_pipeline(payload, user)
  586. if model["owned_by"] == "ollama":
  587. return await generate_ollama_chat_completion(
  588. OpenAIChatCompletionForm(**payload), user=user
  589. )
  590. else:
  591. return await generate_openai_chat_completion(payload, user=user)
  592. @app.post("/api/task/query/completions")
  593. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  594. print("generate_search_query")
  595. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  596. raise HTTPException(
  597. status_code=status.HTTP_400_BAD_REQUEST,
  598. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  599. )
  600. model_id = form_data["model"]
  601. if model_id not in app.state.MODELS:
  602. raise HTTPException(
  603. status_code=status.HTTP_404_NOT_FOUND,
  604. detail="Model not found",
  605. )
  606. # Check if the user has a custom task model
  607. # If the user has a custom task model, use that model
  608. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  609. if app.state.config.TASK_MODEL:
  610. task_model_id = app.state.config.TASK_MODEL
  611. if task_model_id in app.state.MODELS:
  612. model_id = task_model_id
  613. else:
  614. if app.state.config.TASK_MODEL_EXTERNAL:
  615. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  616. if task_model_id in app.state.MODELS:
  617. model_id = task_model_id
  618. print(model_id)
  619. model = app.state.MODELS[model_id]
  620. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  621. content = search_query_generation_template(
  622. template, form_data["prompt"], user.model_dump()
  623. )
  624. payload = {
  625. "model": model_id,
  626. "messages": [{"role": "user", "content": content}],
  627. "stream": False,
  628. "max_tokens": 30,
  629. }
  630. print(payload)
  631. payload = filter_pipeline(payload, user)
  632. if model["owned_by"] == "ollama":
  633. return await generate_ollama_chat_completion(
  634. OpenAIChatCompletionForm(**payload), user=user
  635. )
  636. else:
  637. return await generate_openai_chat_completion(payload, user=user)
  638. @app.post("/api/task/tools/completions")
  639. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  640. print("get_tools_function_calling")
  641. model_id = form_data["model"]
  642. if model_id not in app.state.MODELS:
  643. raise HTTPException(
  644. status_code=status.HTTP_404_NOT_FOUND,
  645. detail="Model not found",
  646. )
  647. # Check if the user has a custom task model
  648. # If the user has a custom task model, use that model
  649. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  650. if app.state.config.TASK_MODEL:
  651. task_model_id = app.state.config.TASK_MODEL
  652. if task_model_id in app.state.MODELS:
  653. model_id = task_model_id
  654. else:
  655. if app.state.config.TASK_MODEL_EXTERNAL:
  656. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  657. if task_model_id in app.state.MODELS:
  658. model_id = task_model_id
  659. print(model_id)
  660. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  661. return await get_function_call_response(
  662. form_data["prompt"], form_data["tool_id"], template, model_id, user
  663. )
  664. @app.post("/api/chat/completions")
  665. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  666. model_id = form_data["model"]
  667. if model_id not in app.state.MODELS:
  668. raise HTTPException(
  669. status_code=status.HTTP_404_NOT_FOUND,
  670. detail="Model not found",
  671. )
  672. model = app.state.MODELS[model_id]
  673. print(model)
  674. if model["owned_by"] == "ollama":
  675. return await generate_ollama_chat_completion(
  676. OpenAIChatCompletionForm(**form_data), user=user
  677. )
  678. else:
  679. return await generate_openai_chat_completion(form_data, user=user)
  680. @app.post("/api/chat/completed")
  681. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  682. data = form_data
  683. model_id = data["model"]
  684. filters = [
  685. model
  686. for model in app.state.MODELS.values()
  687. if "pipeline" in model
  688. and "type" in model["pipeline"]
  689. and model["pipeline"]["type"] == "filter"
  690. and (
  691. model["pipeline"]["pipelines"] == ["*"]
  692. or any(
  693. model_id == target_model_id
  694. for target_model_id in model["pipeline"]["pipelines"]
  695. )
  696. )
  697. ]
  698. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  699. print(model_id)
  700. if model_id in app.state.MODELS:
  701. model = app.state.MODELS[model_id]
  702. if "pipeline" in model:
  703. sorted_filters = [model] + sorted_filters
  704. for filter in sorted_filters:
  705. r = None
  706. try:
  707. urlIdx = filter["urlIdx"]
  708. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  709. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  710. if key != "":
  711. headers = {"Authorization": f"Bearer {key}"}
  712. r = requests.post(
  713. f"{url}/{filter['id']}/filter/outlet",
  714. headers=headers,
  715. json={
  716. "user": {"id": user.id, "name": user.name, "role": user.role},
  717. "body": data,
  718. },
  719. )
  720. r.raise_for_status()
  721. data = r.json()
  722. except Exception as e:
  723. # Handle connection error here
  724. print(f"Connection error: {e}")
  725. if r is not None:
  726. try:
  727. res = r.json()
  728. if "detail" in res:
  729. return JSONResponse(
  730. status_code=r.status_code,
  731. content=res,
  732. )
  733. except:
  734. pass
  735. else:
  736. pass
  737. return data
  738. @app.get("/api/pipelines/list")
  739. async def get_pipelines_list(user=Depends(get_admin_user)):
  740. responses = await get_openai_models(raw=True)
  741. print(responses)
  742. urlIdxs = [
  743. idx
  744. for idx, response in enumerate(responses)
  745. if response != None and "pipelines" in response
  746. ]
  747. return {
  748. "data": [
  749. {
  750. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  751. "idx": urlIdx,
  752. }
  753. for urlIdx in urlIdxs
  754. ]
  755. }
  756. @app.post("/api/pipelines/upload")
  757. async def upload_pipeline(
  758. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  759. ):
  760. print("upload_pipeline", urlIdx, file.filename)
  761. # Check if the uploaded file is a python file
  762. if not file.filename.endswith(".py"):
  763. raise HTTPException(
  764. status_code=status.HTTP_400_BAD_REQUEST,
  765. detail="Only Python (.py) files are allowed.",
  766. )
  767. upload_folder = f"{CACHE_DIR}/pipelines"
  768. os.makedirs(upload_folder, exist_ok=True)
  769. file_path = os.path.join(upload_folder, file.filename)
  770. try:
  771. # Save the uploaded file
  772. with open(file_path, "wb") as buffer:
  773. shutil.copyfileobj(file.file, buffer)
  774. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  775. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  776. headers = {"Authorization": f"Bearer {key}"}
  777. with open(file_path, "rb") as f:
  778. files = {"file": f}
  779. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  780. r.raise_for_status()
  781. data = r.json()
  782. return {**data}
  783. except Exception as e:
  784. # Handle connection error here
  785. print(f"Connection error: {e}")
  786. detail = "Pipeline not found"
  787. if r is not None:
  788. try:
  789. res = r.json()
  790. if "detail" in res:
  791. detail = res["detail"]
  792. except:
  793. pass
  794. raise HTTPException(
  795. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  796. detail=detail,
  797. )
  798. finally:
  799. # Ensure the file is deleted after the upload is completed or on failure
  800. if os.path.exists(file_path):
  801. os.remove(file_path)
  802. class AddPipelineForm(BaseModel):
  803. url: str
  804. urlIdx: int
  805. @app.post("/api/pipelines/add")
  806. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  807. r = None
  808. try:
  809. urlIdx = form_data.urlIdx
  810. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  811. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  812. headers = {"Authorization": f"Bearer {key}"}
  813. r = requests.post(
  814. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  815. )
  816. r.raise_for_status()
  817. data = r.json()
  818. return {**data}
  819. except Exception as e:
  820. # Handle connection error here
  821. print(f"Connection error: {e}")
  822. detail = "Pipeline not found"
  823. if r is not None:
  824. try:
  825. res = r.json()
  826. if "detail" in res:
  827. detail = res["detail"]
  828. except:
  829. pass
  830. raise HTTPException(
  831. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  832. detail=detail,
  833. )
  834. class DeletePipelineForm(BaseModel):
  835. id: str
  836. urlIdx: int
  837. @app.delete("/api/pipelines/delete")
  838. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  839. r = None
  840. try:
  841. urlIdx = form_data.urlIdx
  842. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  843. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  844. headers = {"Authorization": f"Bearer {key}"}
  845. r = requests.delete(
  846. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  847. )
  848. r.raise_for_status()
  849. data = r.json()
  850. return {**data}
  851. except Exception as e:
  852. # Handle connection error here
  853. print(f"Connection error: {e}")
  854. detail = "Pipeline not found"
  855. if r is not None:
  856. try:
  857. res = r.json()
  858. if "detail" in res:
  859. detail = res["detail"]
  860. except:
  861. pass
  862. raise HTTPException(
  863. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  864. detail=detail,
  865. )
  866. @app.get("/api/pipelines")
  867. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  868. r = None
  869. try:
  870. urlIdx
  871. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  872. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  873. headers = {"Authorization": f"Bearer {key}"}
  874. r = requests.get(f"{url}/pipelines", headers=headers)
  875. r.raise_for_status()
  876. data = r.json()
  877. return {**data}
  878. except Exception as e:
  879. # Handle connection error here
  880. print(f"Connection error: {e}")
  881. detail = "Pipeline not found"
  882. if r is not None:
  883. try:
  884. res = r.json()
  885. if "detail" in res:
  886. detail = res["detail"]
  887. except:
  888. pass
  889. raise HTTPException(
  890. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  891. detail=detail,
  892. )
  893. @app.get("/api/pipelines/{pipeline_id}/valves")
  894. async def get_pipeline_valves(
  895. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  896. ):
  897. models = await get_all_models()
  898. r = None
  899. try:
  900. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  901. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  902. headers = {"Authorization": f"Bearer {key}"}
  903. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  904. r.raise_for_status()
  905. data = r.json()
  906. return {**data}
  907. except Exception as e:
  908. # Handle connection error here
  909. print(f"Connection error: {e}")
  910. detail = "Pipeline not found"
  911. if r is not None:
  912. try:
  913. res = r.json()
  914. if "detail" in res:
  915. detail = res["detail"]
  916. except:
  917. pass
  918. raise HTTPException(
  919. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  920. detail=detail,
  921. )
  922. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  923. async def get_pipeline_valves_spec(
  924. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  925. ):
  926. models = await get_all_models()
  927. r = None
  928. try:
  929. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  930. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  931. headers = {"Authorization": f"Bearer {key}"}
  932. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  933. r.raise_for_status()
  934. data = r.json()
  935. return {**data}
  936. except Exception as e:
  937. # Handle connection error here
  938. print(f"Connection error: {e}")
  939. detail = "Pipeline not found"
  940. if r is not None:
  941. try:
  942. res = r.json()
  943. if "detail" in res:
  944. detail = res["detail"]
  945. except:
  946. pass
  947. raise HTTPException(
  948. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  949. detail=detail,
  950. )
  951. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  952. async def update_pipeline_valves(
  953. urlIdx: Optional[int],
  954. pipeline_id: str,
  955. form_data: dict,
  956. user=Depends(get_admin_user),
  957. ):
  958. models = await get_all_models()
  959. r = None
  960. try:
  961. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  962. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  963. headers = {"Authorization": f"Bearer {key}"}
  964. r = requests.post(
  965. f"{url}/{pipeline_id}/valves/update",
  966. headers=headers,
  967. json={**form_data},
  968. )
  969. r.raise_for_status()
  970. data = r.json()
  971. return {**data}
  972. except Exception as e:
  973. # Handle connection error here
  974. print(f"Connection error: {e}")
  975. detail = "Pipeline not found"
  976. if r is not None:
  977. try:
  978. res = r.json()
  979. if "detail" in res:
  980. detail = res["detail"]
  981. except:
  982. pass
  983. raise HTTPException(
  984. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  985. detail=detail,
  986. )
  987. @app.get("/api/config")
  988. async def get_app_config():
  989. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  990. default_locale = "en-US"
  991. if "ui" in CONFIG_DATA:
  992. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  993. # The Rest of the Function Now Uses the Variables Defined Above
  994. return {
  995. "status": True,
  996. "name": WEBUI_NAME,
  997. "version": VERSION,
  998. "default_locale": default_locale,
  999. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1000. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1001. "features": {
  1002. "auth": WEBUI_AUTH,
  1003. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1004. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1005. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1006. "enable_image_generation": images_app.state.config.ENABLED,
  1007. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1008. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1009. },
  1010. "audio": {
  1011. "tts": {
  1012. "engine": audio_app.state.config.TTS_ENGINE,
  1013. "voice": audio_app.state.config.TTS_VOICE,
  1014. },
  1015. "stt": {
  1016. "engine": audio_app.state.config.STT_ENGINE,
  1017. },
  1018. },
  1019. }
  1020. @app.get("/api/config/model/filter")
  1021. async def get_model_filter_config(user=Depends(get_admin_user)):
  1022. return {
  1023. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1024. "models": app.state.config.MODEL_FILTER_LIST,
  1025. }
  1026. class ModelFilterConfigForm(BaseModel):
  1027. enabled: bool
  1028. models: List[str]
  1029. @app.post("/api/config/model/filter")
  1030. async def update_model_filter_config(
  1031. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1032. ):
  1033. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1034. app.state.config.MODEL_FILTER_LIST = form_data.models
  1035. return {
  1036. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1037. "models": app.state.config.MODEL_FILTER_LIST,
  1038. }
  1039. @app.get("/api/webhook")
  1040. async def get_webhook_url(user=Depends(get_admin_user)):
  1041. return {
  1042. "url": app.state.config.WEBHOOK_URL,
  1043. }
  1044. class UrlForm(BaseModel):
  1045. url: str
  1046. @app.post("/api/webhook")
  1047. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1048. app.state.config.WEBHOOK_URL = form_data.url
  1049. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1050. return {"url": app.state.config.WEBHOOK_URL}
  1051. @app.get("/api/version")
  1052. async def get_app_config():
  1053. return {
  1054. "version": VERSION,
  1055. }
  1056. @app.get("/api/changelog")
  1057. async def get_app_changelog():
  1058. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1059. @app.get("/api/version/updates")
  1060. async def get_app_latest_release_version():
  1061. try:
  1062. async with aiohttp.ClientSession(trust_env=True) as session:
  1063. async with session.get(
  1064. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1065. ) as response:
  1066. response.raise_for_status()
  1067. data = await response.json()
  1068. latest_version = data["tag_name"]
  1069. return {"current": VERSION, "latest": latest_version[1:]}
  1070. except aiohttp.ClientError as e:
  1071. raise HTTPException(
  1072. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1073. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1074. )
  1075. @app.get("/manifest.json")
  1076. async def get_manifest_json():
  1077. return {
  1078. "name": WEBUI_NAME,
  1079. "short_name": WEBUI_NAME,
  1080. "start_url": "/",
  1081. "display": "standalone",
  1082. "background_color": "#343541",
  1083. "theme_color": "#343541",
  1084. "orientation": "portrait-primary",
  1085. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1086. }
  1087. @app.get("/opensearch.xml")
  1088. async def get_opensearch_xml():
  1089. xml_content = rf"""
  1090. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1091. <ShortName>{WEBUI_NAME}</ShortName>
  1092. <Description>Search {WEBUI_NAME}</Description>
  1093. <InputEncoding>UTF-8</InputEncoding>
  1094. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1095. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1096. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1097. </OpenSearchDescription>
  1098. """
  1099. return Response(content=xml_content, media_type="application/xml")
  1100. @app.get("/health")
  1101. async def healthcheck():
  1102. return {"status": True}
  1103. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1104. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1105. if os.path.exists(FRONTEND_BUILD_DIR):
  1106. mimetypes.add_type("text/javascript", ".js")
  1107. app.mount(
  1108. "/",
  1109. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1110. name="spa-static-files",
  1111. )
  1112. else:
  1113. log.warning(
  1114. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1115. )