main.py 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import inspect
  15. import asyncio
  16. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  17. from fastapi.staticfiles import StaticFiles
  18. from fastapi.responses import JSONResponse
  19. from fastapi import HTTPException
  20. from fastapi.middleware.wsgi import WSGIMiddleware
  21. from fastapi.middleware.cors import CORSMiddleware
  22. from starlette.exceptions import HTTPException as StarletteHTTPException
  23. from starlette.middleware.base import BaseHTTPMiddleware
  24. from starlette.responses import StreamingResponse, Response
  25. from apps.socket.main import app as socket_app
  26. from apps.ollama.main import (
  27. app as ollama_app,
  28. OpenAIChatCompletionForm,
  29. get_all_models as get_ollama_models,
  30. generate_openai_chat_completion as generate_ollama_chat_completion,
  31. )
  32. from apps.openai.main import (
  33. app as openai_app,
  34. get_all_models as get_openai_models,
  35. generate_chat_completion as generate_openai_chat_completion,
  36. )
  37. from apps.audio.main import app as audio_app
  38. from apps.images.main import app as images_app
  39. from apps.rag.main import app as rag_app
  40. from apps.webui.main import app as webui_app
  41. from pydantic import BaseModel
  42. from typing import List, Optional
  43. from apps.webui.models.models import Models, ModelModel
  44. from apps.webui.models.tools import Tools
  45. from apps.webui.utils import load_toolkit_module_by_id
  46. from utils.utils import (
  47. get_admin_user,
  48. get_verified_user,
  49. get_current_user,
  50. get_http_authorization_cred,
  51. )
  52. from utils.task import (
  53. title_generation_template,
  54. search_query_generation_template,
  55. tools_function_calling_generation_template,
  56. )
  57. from utils.misc import get_last_user_message, add_or_update_system_message
  58. from apps.rag.utils import get_rag_context, rag_template
  59. from config import (
  60. CONFIG_DATA,
  61. WEBUI_NAME,
  62. WEBUI_URL,
  63. WEBUI_AUTH,
  64. ENV,
  65. VERSION,
  66. CHANGELOG,
  67. FRONTEND_BUILD_DIR,
  68. CACHE_DIR,
  69. STATIC_DIR,
  70. ENABLE_OPENAI_API,
  71. ENABLE_OLLAMA_API,
  72. ENABLE_MODEL_FILTER,
  73. MODEL_FILTER_LIST,
  74. GLOBAL_LOG_LEVEL,
  75. SRC_LOG_LEVELS,
  76. WEBHOOK_URL,
  77. ENABLE_ADMIN_EXPORT,
  78. WEBUI_BUILD_HASH,
  79. TASK_MODEL,
  80. TASK_MODEL_EXTERNAL,
  81. TITLE_GENERATION_PROMPT_TEMPLATE,
  82. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  83. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  84. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  85. AppConfig,
  86. )
  87. from constants import ERROR_MESSAGES
  88. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  89. log = logging.getLogger(__name__)
  90. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  91. class SPAStaticFiles(StaticFiles):
  92. async def get_response(self, path: str, scope):
  93. try:
  94. return await super().get_response(path, scope)
  95. except (HTTPException, StarletteHTTPException) as ex:
  96. if ex.status_code == 404:
  97. return await super().get_response("index.html", scope)
  98. else:
  99. raise ex
  100. print(
  101. rf"""
  102. ___ __ __ _ _ _ ___
  103. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  104. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  105. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  106. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  107. |_|
  108. v{VERSION} - building the best open-source AI user interface.
  109. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  110. https://github.com/open-webui/open-webui
  111. """
  112. )
  113. @asynccontextmanager
  114. async def lifespan(app: FastAPI):
  115. yield
  116. app = FastAPI(
  117. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  118. )
  119. app.state.config = AppConfig()
  120. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  121. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  122. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  123. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  124. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  125. app.state.config.TASK_MODEL = TASK_MODEL
  126. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  127. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  128. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  129. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  130. )
  131. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  132. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  133. )
  134. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  135. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  136. )
  137. app.state.MODELS = {}
  138. origins = ["*"]
  139. async def get_function_call_response(messages, tool_id, template, task_model_id, user):
  140. tool = Tools.get_tool_by_id(tool_id)
  141. tools_specs = json.dumps(tool.specs, indent=2)
  142. content = tools_function_calling_generation_template(template, tools_specs)
  143. user_message = get_last_user_message(messages)
  144. prompt = (
  145. "History:\n"
  146. + "\n".join(
  147. [
  148. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  149. for message in messages[::-1][:4]
  150. ]
  151. )
  152. + f"\nQuery: {user_message}"
  153. )
  154. print(prompt)
  155. payload = {
  156. "model": task_model_id,
  157. "messages": [
  158. {"role": "system", "content": content},
  159. {"role": "user", "content": f"Query: {prompt}"},
  160. ],
  161. "stream": False,
  162. }
  163. payload = filter_pipeline(payload, user)
  164. model = app.state.MODELS[task_model_id]
  165. response = None
  166. try:
  167. if model["owned_by"] == "ollama":
  168. response = await generate_ollama_chat_completion(
  169. OpenAIChatCompletionForm(**payload), user=user
  170. )
  171. else:
  172. response = await generate_openai_chat_completion(payload, user=user)
  173. content = None
  174. if hasattr(response, "body_iterator"):
  175. async for chunk in response.body_iterator:
  176. data = json.loads(chunk.decode("utf-8"))
  177. content = data["choices"][0]["message"]["content"]
  178. # Cleanup any remaining background tasks if necessary
  179. if response.background is not None:
  180. await response.background()
  181. else:
  182. content = response["choices"][0]["message"]["content"]
  183. # Parse the function response
  184. if content is not None:
  185. print(f"content: {content}")
  186. result = json.loads(content)
  187. print(result)
  188. # Call the function
  189. if "name" in result:
  190. if tool_id in webui_app.state.TOOLS:
  191. toolkit_module = webui_app.state.TOOLS[tool_id]
  192. else:
  193. toolkit_module = load_toolkit_module_by_id(tool_id)
  194. webui_app.state.TOOLS[tool_id] = toolkit_module
  195. function = getattr(toolkit_module, result["name"])
  196. function_result = None
  197. try:
  198. # Get the signature of the function
  199. sig = inspect.signature(function)
  200. # Check if '__user__' is a parameter of the function
  201. if "__user__" in sig.parameters:
  202. # Call the function with the '__user__' parameter included
  203. function_result = function(
  204. **{
  205. **result["parameters"],
  206. "__user__": {
  207. "id": user.id,
  208. "email": user.email,
  209. "name": user.name,
  210. "role": user.role,
  211. },
  212. }
  213. )
  214. else:
  215. # Call the function without modifying the parameters
  216. function_result = function(**result["parameters"])
  217. except Exception as e:
  218. print(e)
  219. # Add the function result to the system prompt
  220. if function_result:
  221. return function_result
  222. except Exception as e:
  223. print(f"Error: {e}")
  224. return None
  225. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  226. async def dispatch(self, request: Request, call_next):
  227. return_citations = False
  228. if request.method == "POST" and (
  229. "/ollama/api/chat" in request.url.path
  230. or "/chat/completions" in request.url.path
  231. ):
  232. log.debug(f"request.url.path: {request.url.path}")
  233. # Read the original request body
  234. body = await request.body()
  235. # Decode body to string
  236. body_str = body.decode("utf-8")
  237. # Parse string to JSON
  238. data = json.loads(body_str) if body_str else {}
  239. user = get_current_user(
  240. get_http_authorization_cred(request.headers.get("Authorization"))
  241. )
  242. # Remove the citations from the body
  243. return_citations = data.get("citations", False)
  244. if "citations" in data:
  245. del data["citations"]
  246. # Set the task model
  247. task_model_id = data["model"]
  248. if task_model_id not in app.state.MODELS:
  249. raise HTTPException(
  250. status_code=status.HTTP_404_NOT_FOUND,
  251. detail="Model not found",
  252. )
  253. # Check if the user has a custom task model
  254. # If the user has a custom task model, use that model
  255. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  256. if (
  257. app.state.config.TASK_MODEL
  258. and app.state.config.TASK_MODEL in app.state.MODELS
  259. ):
  260. task_model_id = app.state.config.TASK_MODEL
  261. else:
  262. if (
  263. app.state.config.TASK_MODEL_EXTERNAL
  264. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  265. ):
  266. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  267. prompt = get_last_user_message(data["messages"])
  268. context = ""
  269. # If tool_ids field is present, call the functions
  270. if "tool_ids" in data:
  271. print(data["tool_ids"])
  272. for tool_id in data["tool_ids"]:
  273. print(tool_id)
  274. response = await get_function_call_response(
  275. messages=data["messages"],
  276. tool_id=tool_id,
  277. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  278. task_model_id=task_model_id,
  279. user=user,
  280. )
  281. if response:
  282. context += ("\n" if context != "" else "") + response
  283. del data["tool_ids"]
  284. print(f"tool_context: {context}")
  285. # If docs field is present, generate RAG completions
  286. if "docs" in data:
  287. data = {**data}
  288. rag_context, citations = get_rag_context(
  289. docs=data["docs"],
  290. messages=data["messages"],
  291. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  292. k=rag_app.state.config.TOP_K,
  293. reranking_function=rag_app.state.sentence_transformer_rf,
  294. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  295. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  296. )
  297. if rag_context:
  298. context += ("\n" if context != "" else "") + rag_context
  299. del data["docs"]
  300. log.debug(f"rag_context: {rag_context}, citations: {citations}")
  301. if context != "":
  302. system_prompt = rag_template(
  303. rag_app.state.config.RAG_TEMPLATE, context, prompt
  304. )
  305. print(system_prompt)
  306. data["messages"] = add_or_update_system_message(
  307. f"\n{system_prompt}", data["messages"]
  308. )
  309. modified_body_bytes = json.dumps(data).encode("utf-8")
  310. # Replace the request body with the modified one
  311. request._body = modified_body_bytes
  312. # Set custom header to ensure content-length matches new body length
  313. request.headers.__dict__["_list"] = [
  314. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  315. *[
  316. (k, v)
  317. for k, v in request.headers.raw
  318. if k.lower() != b"content-length"
  319. ],
  320. ]
  321. response = await call_next(request)
  322. if return_citations:
  323. # Inject the citations into the response
  324. if isinstance(response, StreamingResponse):
  325. # If it's a streaming response, inject it as SSE event or NDJSON line
  326. content_type = response.headers.get("Content-Type")
  327. if "text/event-stream" in content_type:
  328. return StreamingResponse(
  329. self.openai_stream_wrapper(response.body_iterator, citations),
  330. )
  331. if "application/x-ndjson" in content_type:
  332. return StreamingResponse(
  333. self.ollama_stream_wrapper(response.body_iterator, citations),
  334. )
  335. return response
  336. async def _receive(self, body: bytes):
  337. return {"type": "http.request", "body": body, "more_body": False}
  338. async def openai_stream_wrapper(self, original_generator, citations):
  339. yield f"data: {json.dumps({'citations': citations})}\n\n"
  340. async for data in original_generator:
  341. yield data
  342. async def ollama_stream_wrapper(self, original_generator, citations):
  343. yield f"{json.dumps({'citations': citations})}\n"
  344. async for data in original_generator:
  345. yield data
  346. app.add_middleware(ChatCompletionMiddleware)
  347. def filter_pipeline(payload, user):
  348. user = {"id": user.id, "name": user.name, "role": user.role}
  349. model_id = payload["model"]
  350. filters = [
  351. model
  352. for model in app.state.MODELS.values()
  353. if "pipeline" in model
  354. and "type" in model["pipeline"]
  355. and model["pipeline"]["type"] == "filter"
  356. and (
  357. model["pipeline"]["pipelines"] == ["*"]
  358. or any(
  359. model_id == target_model_id
  360. for target_model_id in model["pipeline"]["pipelines"]
  361. )
  362. )
  363. ]
  364. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  365. model = app.state.MODELS[model_id]
  366. if "pipeline" in model:
  367. sorted_filters.append(model)
  368. for filter in sorted_filters:
  369. r = None
  370. try:
  371. urlIdx = filter["urlIdx"]
  372. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  373. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  374. if key != "":
  375. headers = {"Authorization": f"Bearer {key}"}
  376. r = requests.post(
  377. f"{url}/{filter['id']}/filter/inlet",
  378. headers=headers,
  379. json={
  380. "user": user,
  381. "body": payload,
  382. },
  383. )
  384. r.raise_for_status()
  385. payload = r.json()
  386. except Exception as e:
  387. # Handle connection error here
  388. print(f"Connection error: {e}")
  389. if r is not None:
  390. try:
  391. res = r.json()
  392. if "detail" in res:
  393. return JSONResponse(
  394. status_code=r.status_code,
  395. content=res,
  396. )
  397. except:
  398. pass
  399. else:
  400. pass
  401. if "pipeline" not in app.state.MODELS[model_id]:
  402. if "chat_id" in payload:
  403. del payload["chat_id"]
  404. if "title" in payload:
  405. del payload["title"]
  406. return payload
  407. class PipelineMiddleware(BaseHTTPMiddleware):
  408. async def dispatch(self, request: Request, call_next):
  409. if request.method == "POST" and (
  410. "/ollama/api/chat" in request.url.path
  411. or "/chat/completions" in request.url.path
  412. ):
  413. log.debug(f"request.url.path: {request.url.path}")
  414. # Read the original request body
  415. body = await request.body()
  416. # Decode body to string
  417. body_str = body.decode("utf-8")
  418. # Parse string to JSON
  419. data = json.loads(body_str) if body_str else {}
  420. user = get_current_user(
  421. get_http_authorization_cred(request.headers.get("Authorization"))
  422. )
  423. data = filter_pipeline(data, user)
  424. modified_body_bytes = json.dumps(data).encode("utf-8")
  425. # Replace the request body with the modified one
  426. request._body = modified_body_bytes
  427. # Set custom header to ensure content-length matches new body length
  428. request.headers.__dict__["_list"] = [
  429. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  430. *[
  431. (k, v)
  432. for k, v in request.headers.raw
  433. if k.lower() != b"content-length"
  434. ],
  435. ]
  436. response = await call_next(request)
  437. return response
  438. async def _receive(self, body: bytes):
  439. return {"type": "http.request", "body": body, "more_body": False}
  440. app.add_middleware(PipelineMiddleware)
  441. app.add_middleware(
  442. CORSMiddleware,
  443. allow_origins=origins,
  444. allow_credentials=True,
  445. allow_methods=["*"],
  446. allow_headers=["*"],
  447. )
  448. @app.middleware("http")
  449. async def check_url(request: Request, call_next):
  450. if len(app.state.MODELS) == 0:
  451. await get_all_models()
  452. else:
  453. pass
  454. start_time = int(time.time())
  455. response = await call_next(request)
  456. process_time = int(time.time()) - start_time
  457. response.headers["X-Process-Time"] = str(process_time)
  458. return response
  459. @app.middleware("http")
  460. async def update_embedding_function(request: Request, call_next):
  461. response = await call_next(request)
  462. if "/embedding/update" in request.url.path:
  463. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  464. return response
  465. app.mount("/ws", socket_app)
  466. app.mount("/ollama", ollama_app)
  467. app.mount("/openai", openai_app)
  468. app.mount("/images/api/v1", images_app)
  469. app.mount("/audio/api/v1", audio_app)
  470. app.mount("/rag/api/v1", rag_app)
  471. app.mount("/api/v1", webui_app)
  472. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  473. async def get_all_models():
  474. openai_models = []
  475. ollama_models = []
  476. if app.state.config.ENABLE_OPENAI_API:
  477. openai_models = await get_openai_models()
  478. openai_models = openai_models["data"]
  479. if app.state.config.ENABLE_OLLAMA_API:
  480. ollama_models = await get_ollama_models()
  481. ollama_models = [
  482. {
  483. "id": model["model"],
  484. "name": model["name"],
  485. "object": "model",
  486. "created": int(time.time()),
  487. "owned_by": "ollama",
  488. "ollama": model,
  489. }
  490. for model in ollama_models["models"]
  491. ]
  492. models = openai_models + ollama_models
  493. custom_models = Models.get_all_models()
  494. for custom_model in custom_models:
  495. if custom_model.base_model_id == None:
  496. for model in models:
  497. if (
  498. custom_model.id == model["id"]
  499. or custom_model.id == model["id"].split(":")[0]
  500. ):
  501. model["name"] = custom_model.name
  502. model["info"] = custom_model.model_dump()
  503. else:
  504. owned_by = "openai"
  505. for model in models:
  506. if (
  507. custom_model.base_model_id == model["id"]
  508. or custom_model.base_model_id == model["id"].split(":")[0]
  509. ):
  510. owned_by = model["owned_by"]
  511. break
  512. models.append(
  513. {
  514. "id": custom_model.id,
  515. "name": custom_model.name,
  516. "object": "model",
  517. "created": custom_model.created_at,
  518. "owned_by": owned_by,
  519. "info": custom_model.model_dump(),
  520. "preset": True,
  521. }
  522. )
  523. app.state.MODELS = {model["id"]: model for model in models}
  524. webui_app.state.MODELS = app.state.MODELS
  525. return models
  526. @app.get("/api/models")
  527. async def get_models(user=Depends(get_verified_user)):
  528. models = await get_all_models()
  529. # Filter out filter pipelines
  530. models = [
  531. model
  532. for model in models
  533. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  534. ]
  535. if app.state.config.ENABLE_MODEL_FILTER:
  536. if user.role == "user":
  537. models = list(
  538. filter(
  539. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  540. models,
  541. )
  542. )
  543. return {"data": models}
  544. return {"data": models}
  545. @app.get("/api/task/config")
  546. async def get_task_config(user=Depends(get_verified_user)):
  547. return {
  548. "TASK_MODEL": app.state.config.TASK_MODEL,
  549. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  550. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  551. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  552. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  553. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  554. }
  555. class TaskConfigForm(BaseModel):
  556. TASK_MODEL: Optional[str]
  557. TASK_MODEL_EXTERNAL: Optional[str]
  558. TITLE_GENERATION_PROMPT_TEMPLATE: str
  559. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  560. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  561. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  562. @app.post("/api/task/config/update")
  563. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  564. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  565. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  566. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  567. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  568. )
  569. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  570. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  571. )
  572. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  573. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  574. )
  575. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  576. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  577. )
  578. return {
  579. "TASK_MODEL": app.state.config.TASK_MODEL,
  580. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  581. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  582. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  583. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  584. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  585. }
  586. @app.post("/api/task/title/completions")
  587. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  588. print("generate_title")
  589. model_id = form_data["model"]
  590. if model_id not in app.state.MODELS:
  591. raise HTTPException(
  592. status_code=status.HTTP_404_NOT_FOUND,
  593. detail="Model not found",
  594. )
  595. # Check if the user has a custom task model
  596. # If the user has a custom task model, use that model
  597. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  598. if app.state.config.TASK_MODEL:
  599. task_model_id = app.state.config.TASK_MODEL
  600. if task_model_id in app.state.MODELS:
  601. model_id = task_model_id
  602. else:
  603. if app.state.config.TASK_MODEL_EXTERNAL:
  604. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  605. if task_model_id in app.state.MODELS:
  606. model_id = task_model_id
  607. print(model_id)
  608. model = app.state.MODELS[model_id]
  609. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  610. content = title_generation_template(
  611. template, form_data["prompt"], user.model_dump()
  612. )
  613. payload = {
  614. "model": model_id,
  615. "messages": [{"role": "user", "content": content}],
  616. "stream": False,
  617. "max_tokens": 50,
  618. "chat_id": form_data.get("chat_id", None),
  619. "title": True,
  620. }
  621. print(payload)
  622. payload = filter_pipeline(payload, user)
  623. if model["owned_by"] == "ollama":
  624. return await generate_ollama_chat_completion(
  625. OpenAIChatCompletionForm(**payload), user=user
  626. )
  627. else:
  628. return await generate_openai_chat_completion(payload, user=user)
  629. @app.post("/api/task/query/completions")
  630. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  631. print("generate_search_query")
  632. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  633. raise HTTPException(
  634. status_code=status.HTTP_400_BAD_REQUEST,
  635. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  636. )
  637. model_id = form_data["model"]
  638. if model_id not in app.state.MODELS:
  639. raise HTTPException(
  640. status_code=status.HTTP_404_NOT_FOUND,
  641. detail="Model not found",
  642. )
  643. # Check if the user has a custom task model
  644. # If the user has a custom task model, use that model
  645. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  646. if app.state.config.TASK_MODEL:
  647. task_model_id = app.state.config.TASK_MODEL
  648. if task_model_id in app.state.MODELS:
  649. model_id = task_model_id
  650. else:
  651. if app.state.config.TASK_MODEL_EXTERNAL:
  652. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  653. if task_model_id in app.state.MODELS:
  654. model_id = task_model_id
  655. print(model_id)
  656. model = app.state.MODELS[model_id]
  657. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  658. content = search_query_generation_template(
  659. template, form_data["prompt"], user.model_dump()
  660. )
  661. payload = {
  662. "model": model_id,
  663. "messages": [{"role": "user", "content": content}],
  664. "stream": False,
  665. "max_tokens": 30,
  666. }
  667. print(payload)
  668. payload = filter_pipeline(payload, user)
  669. if model["owned_by"] == "ollama":
  670. return await generate_ollama_chat_completion(
  671. OpenAIChatCompletionForm(**payload), user=user
  672. )
  673. else:
  674. return await generate_openai_chat_completion(payload, user=user)
  675. @app.post("/api/task/tools/completions")
  676. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  677. print("get_tools_function_calling")
  678. model_id = form_data["model"]
  679. if model_id not in app.state.MODELS:
  680. raise HTTPException(
  681. status_code=status.HTTP_404_NOT_FOUND,
  682. detail="Model not found",
  683. )
  684. # Check if the user has a custom task model
  685. # If the user has a custom task model, use that model
  686. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  687. if app.state.config.TASK_MODEL:
  688. task_model_id = app.state.config.TASK_MODEL
  689. if task_model_id in app.state.MODELS:
  690. model_id = task_model_id
  691. else:
  692. if app.state.config.TASK_MODEL_EXTERNAL:
  693. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  694. if task_model_id in app.state.MODELS:
  695. model_id = task_model_id
  696. print(model_id)
  697. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  698. return await get_function_call_response(
  699. form_data["messages"], form_data["tool_id"], template, model_id, user
  700. )
  701. @app.post("/api/chat/completions")
  702. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  703. model_id = form_data["model"]
  704. if model_id not in app.state.MODELS:
  705. raise HTTPException(
  706. status_code=status.HTTP_404_NOT_FOUND,
  707. detail="Model not found",
  708. )
  709. model = app.state.MODELS[model_id]
  710. print(model)
  711. if model["owned_by"] == "ollama":
  712. return await generate_ollama_chat_completion(
  713. OpenAIChatCompletionForm(**form_data), user=user
  714. )
  715. else:
  716. return await generate_openai_chat_completion(form_data, user=user)
  717. @app.post("/api/chat/completed")
  718. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  719. data = form_data
  720. model_id = data["model"]
  721. filters = [
  722. model
  723. for model in app.state.MODELS.values()
  724. if "pipeline" in model
  725. and "type" in model["pipeline"]
  726. and model["pipeline"]["type"] == "filter"
  727. and (
  728. model["pipeline"]["pipelines"] == ["*"]
  729. or any(
  730. model_id == target_model_id
  731. for target_model_id in model["pipeline"]["pipelines"]
  732. )
  733. )
  734. ]
  735. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  736. print(model_id)
  737. if model_id in app.state.MODELS:
  738. model = app.state.MODELS[model_id]
  739. if "pipeline" in model:
  740. sorted_filters = [model] + sorted_filters
  741. for filter in sorted_filters:
  742. r = None
  743. try:
  744. urlIdx = filter["urlIdx"]
  745. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  746. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  747. if key != "":
  748. headers = {"Authorization": f"Bearer {key}"}
  749. r = requests.post(
  750. f"{url}/{filter['id']}/filter/outlet",
  751. headers=headers,
  752. json={
  753. "user": {"id": user.id, "name": user.name, "role": user.role},
  754. "body": data,
  755. },
  756. )
  757. r.raise_for_status()
  758. data = r.json()
  759. except Exception as e:
  760. # Handle connection error here
  761. print(f"Connection error: {e}")
  762. if r is not None:
  763. try:
  764. res = r.json()
  765. if "detail" in res:
  766. return JSONResponse(
  767. status_code=r.status_code,
  768. content=res,
  769. )
  770. except:
  771. pass
  772. else:
  773. pass
  774. return data
  775. @app.get("/api/pipelines/list")
  776. async def get_pipelines_list(user=Depends(get_admin_user)):
  777. responses = await get_openai_models(raw=True)
  778. print(responses)
  779. urlIdxs = [
  780. idx
  781. for idx, response in enumerate(responses)
  782. if response != None and "pipelines" in response
  783. ]
  784. return {
  785. "data": [
  786. {
  787. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  788. "idx": urlIdx,
  789. }
  790. for urlIdx in urlIdxs
  791. ]
  792. }
  793. @app.post("/api/pipelines/upload")
  794. async def upload_pipeline(
  795. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  796. ):
  797. print("upload_pipeline", urlIdx, file.filename)
  798. # Check if the uploaded file is a python file
  799. if not file.filename.endswith(".py"):
  800. raise HTTPException(
  801. status_code=status.HTTP_400_BAD_REQUEST,
  802. detail="Only Python (.py) files are allowed.",
  803. )
  804. upload_folder = f"{CACHE_DIR}/pipelines"
  805. os.makedirs(upload_folder, exist_ok=True)
  806. file_path = os.path.join(upload_folder, file.filename)
  807. try:
  808. # Save the uploaded file
  809. with open(file_path, "wb") as buffer:
  810. shutil.copyfileobj(file.file, buffer)
  811. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  812. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  813. headers = {"Authorization": f"Bearer {key}"}
  814. with open(file_path, "rb") as f:
  815. files = {"file": f}
  816. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  817. r.raise_for_status()
  818. data = r.json()
  819. return {**data}
  820. except Exception as e:
  821. # Handle connection error here
  822. print(f"Connection error: {e}")
  823. detail = "Pipeline not found"
  824. if r is not None:
  825. try:
  826. res = r.json()
  827. if "detail" in res:
  828. detail = res["detail"]
  829. except:
  830. pass
  831. raise HTTPException(
  832. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  833. detail=detail,
  834. )
  835. finally:
  836. # Ensure the file is deleted after the upload is completed or on failure
  837. if os.path.exists(file_path):
  838. os.remove(file_path)
  839. class AddPipelineForm(BaseModel):
  840. url: str
  841. urlIdx: int
  842. @app.post("/api/pipelines/add")
  843. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  844. r = None
  845. try:
  846. urlIdx = form_data.urlIdx
  847. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  848. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  849. headers = {"Authorization": f"Bearer {key}"}
  850. r = requests.post(
  851. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  852. )
  853. r.raise_for_status()
  854. data = r.json()
  855. return {**data}
  856. except Exception as e:
  857. # Handle connection error here
  858. print(f"Connection error: {e}")
  859. detail = "Pipeline not found"
  860. if r is not None:
  861. try:
  862. res = r.json()
  863. if "detail" in res:
  864. detail = res["detail"]
  865. except:
  866. pass
  867. raise HTTPException(
  868. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  869. detail=detail,
  870. )
  871. class DeletePipelineForm(BaseModel):
  872. id: str
  873. urlIdx: int
  874. @app.delete("/api/pipelines/delete")
  875. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  876. r = None
  877. try:
  878. urlIdx = form_data.urlIdx
  879. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  880. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  881. headers = {"Authorization": f"Bearer {key}"}
  882. r = requests.delete(
  883. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  884. )
  885. r.raise_for_status()
  886. data = r.json()
  887. return {**data}
  888. except Exception as e:
  889. # Handle connection error here
  890. print(f"Connection error: {e}")
  891. detail = "Pipeline not found"
  892. if r is not None:
  893. try:
  894. res = r.json()
  895. if "detail" in res:
  896. detail = res["detail"]
  897. except:
  898. pass
  899. raise HTTPException(
  900. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  901. detail=detail,
  902. )
  903. @app.get("/api/pipelines")
  904. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  905. r = None
  906. try:
  907. urlIdx
  908. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  909. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  910. headers = {"Authorization": f"Bearer {key}"}
  911. r = requests.get(f"{url}/pipelines", headers=headers)
  912. r.raise_for_status()
  913. data = r.json()
  914. return {**data}
  915. except Exception as e:
  916. # Handle connection error here
  917. print(f"Connection error: {e}")
  918. detail = "Pipeline not found"
  919. if r is not None:
  920. try:
  921. res = r.json()
  922. if "detail" in res:
  923. detail = res["detail"]
  924. except:
  925. pass
  926. raise HTTPException(
  927. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  928. detail=detail,
  929. )
  930. @app.get("/api/pipelines/{pipeline_id}/valves")
  931. async def get_pipeline_valves(
  932. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  933. ):
  934. models = await get_all_models()
  935. r = None
  936. try:
  937. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  938. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  939. headers = {"Authorization": f"Bearer {key}"}
  940. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  941. r.raise_for_status()
  942. data = r.json()
  943. return {**data}
  944. except Exception as e:
  945. # Handle connection error here
  946. print(f"Connection error: {e}")
  947. detail = "Pipeline not found"
  948. if r is not None:
  949. try:
  950. res = r.json()
  951. if "detail" in res:
  952. detail = res["detail"]
  953. except:
  954. pass
  955. raise HTTPException(
  956. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  957. detail=detail,
  958. )
  959. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  960. async def get_pipeline_valves_spec(
  961. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  962. ):
  963. models = await get_all_models()
  964. r = None
  965. try:
  966. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  967. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  968. headers = {"Authorization": f"Bearer {key}"}
  969. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  970. r.raise_for_status()
  971. data = r.json()
  972. return {**data}
  973. except Exception as e:
  974. # Handle connection error here
  975. print(f"Connection error: {e}")
  976. detail = "Pipeline not found"
  977. if r is not None:
  978. try:
  979. res = r.json()
  980. if "detail" in res:
  981. detail = res["detail"]
  982. except:
  983. pass
  984. raise HTTPException(
  985. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  986. detail=detail,
  987. )
  988. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  989. async def update_pipeline_valves(
  990. urlIdx: Optional[int],
  991. pipeline_id: str,
  992. form_data: dict,
  993. user=Depends(get_admin_user),
  994. ):
  995. models = await get_all_models()
  996. r = None
  997. try:
  998. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  999. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1000. headers = {"Authorization": f"Bearer {key}"}
  1001. r = requests.post(
  1002. f"{url}/{pipeline_id}/valves/update",
  1003. headers=headers,
  1004. json={**form_data},
  1005. )
  1006. r.raise_for_status()
  1007. data = r.json()
  1008. return {**data}
  1009. except Exception as e:
  1010. # Handle connection error here
  1011. print(f"Connection error: {e}")
  1012. detail = "Pipeline not found"
  1013. if r is not None:
  1014. try:
  1015. res = r.json()
  1016. if "detail" in res:
  1017. detail = res["detail"]
  1018. except:
  1019. pass
  1020. raise HTTPException(
  1021. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1022. detail=detail,
  1023. )
  1024. @app.get("/api/config")
  1025. async def get_app_config():
  1026. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  1027. default_locale = "en-US"
  1028. if "ui" in CONFIG_DATA:
  1029. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  1030. # The Rest of the Function Now Uses the Variables Defined Above
  1031. return {
  1032. "status": True,
  1033. "name": WEBUI_NAME,
  1034. "version": VERSION,
  1035. "default_locale": default_locale,
  1036. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1037. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1038. "features": {
  1039. "auth": WEBUI_AUTH,
  1040. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1041. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1042. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1043. "enable_image_generation": images_app.state.config.ENABLED,
  1044. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1045. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1046. },
  1047. "audio": {
  1048. "tts": {
  1049. "engine": audio_app.state.config.TTS_ENGINE,
  1050. "voice": audio_app.state.config.TTS_VOICE,
  1051. },
  1052. "stt": {
  1053. "engine": audio_app.state.config.STT_ENGINE,
  1054. },
  1055. },
  1056. }
  1057. @app.get("/api/config/model/filter")
  1058. async def get_model_filter_config(user=Depends(get_admin_user)):
  1059. return {
  1060. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1061. "models": app.state.config.MODEL_FILTER_LIST,
  1062. }
  1063. class ModelFilterConfigForm(BaseModel):
  1064. enabled: bool
  1065. models: List[str]
  1066. @app.post("/api/config/model/filter")
  1067. async def update_model_filter_config(
  1068. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1069. ):
  1070. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1071. app.state.config.MODEL_FILTER_LIST = form_data.models
  1072. return {
  1073. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1074. "models": app.state.config.MODEL_FILTER_LIST,
  1075. }
  1076. @app.get("/api/webhook")
  1077. async def get_webhook_url(user=Depends(get_admin_user)):
  1078. return {
  1079. "url": app.state.config.WEBHOOK_URL,
  1080. }
  1081. class UrlForm(BaseModel):
  1082. url: str
  1083. @app.post("/api/webhook")
  1084. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1085. app.state.config.WEBHOOK_URL = form_data.url
  1086. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1087. return {"url": app.state.config.WEBHOOK_URL}
  1088. @app.get("/api/version")
  1089. async def get_app_config():
  1090. return {
  1091. "version": VERSION,
  1092. }
  1093. @app.get("/api/changelog")
  1094. async def get_app_changelog():
  1095. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1096. @app.get("/api/version/updates")
  1097. async def get_app_latest_release_version():
  1098. try:
  1099. async with aiohttp.ClientSession(trust_env=True) as session:
  1100. async with session.get(
  1101. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1102. ) as response:
  1103. response.raise_for_status()
  1104. data = await response.json()
  1105. latest_version = data["tag_name"]
  1106. return {"current": VERSION, "latest": latest_version[1:]}
  1107. except aiohttp.ClientError as e:
  1108. raise HTTPException(
  1109. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1110. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1111. )
  1112. @app.get("/manifest.json")
  1113. async def get_manifest_json():
  1114. return {
  1115. "name": WEBUI_NAME,
  1116. "short_name": WEBUI_NAME,
  1117. "start_url": "/",
  1118. "display": "standalone",
  1119. "background_color": "#343541",
  1120. "theme_color": "#343541",
  1121. "orientation": "portrait-primary",
  1122. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1123. }
  1124. @app.get("/opensearch.xml")
  1125. async def get_opensearch_xml():
  1126. xml_content = rf"""
  1127. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1128. <ShortName>{WEBUI_NAME}</ShortName>
  1129. <Description>Search {WEBUI_NAME}</Description>
  1130. <InputEncoding>UTF-8</InputEncoding>
  1131. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1132. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1133. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1134. </OpenSearchDescription>
  1135. """
  1136. return Response(content=xml_content, media_type="application/xml")
  1137. @app.get("/health")
  1138. async def healthcheck():
  1139. return {"status": True}
  1140. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1141. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1142. if os.path.exists(FRONTEND_BUILD_DIR):
  1143. mimetypes.add_type("text/javascript", ".js")
  1144. app.mount(
  1145. "/",
  1146. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1147. name="spa-static-files",
  1148. )
  1149. else:
  1150. log.warning(
  1151. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1152. )