main.py 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import uuid
  15. import inspect
  16. import asyncio
  17. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  18. from fastapi.staticfiles import StaticFiles
  19. from fastapi.responses import JSONResponse
  20. from fastapi import HTTPException
  21. from fastapi.middleware.wsgi import WSGIMiddleware
  22. from fastapi.middleware.cors import CORSMiddleware
  23. from starlette.exceptions import HTTPException as StarletteHTTPException
  24. from starlette.middleware.base import BaseHTTPMiddleware
  25. from starlette.responses import StreamingResponse, Response
  26. from apps.socket.main import app as socket_app
  27. from apps.ollama.main import (
  28. app as ollama_app,
  29. OpenAIChatCompletionForm,
  30. get_all_models as get_ollama_models,
  31. generate_openai_chat_completion as generate_ollama_chat_completion,
  32. )
  33. from apps.openai.main import (
  34. app as openai_app,
  35. get_all_models as get_openai_models,
  36. generate_chat_completion as generate_openai_chat_completion,
  37. )
  38. from apps.audio.main import app as audio_app
  39. from apps.images.main import app as images_app
  40. from apps.rag.main import app as rag_app
  41. from apps.webui.main import app as webui_app
  42. from pydantic import BaseModel
  43. from typing import List, Optional
  44. from apps.webui.models.models import Models, ModelModel
  45. from apps.webui.models.tools import Tools
  46. from apps.webui.utils import load_toolkit_module_by_id
  47. from utils.utils import (
  48. get_admin_user,
  49. get_verified_user,
  50. get_current_user,
  51. get_http_authorization_cred,
  52. )
  53. from utils.task import (
  54. title_generation_template,
  55. search_query_generation_template,
  56. tools_function_calling_generation_template,
  57. )
  58. from utils.misc import get_last_user_message, add_or_update_system_message
  59. from apps.rag.utils import get_rag_context, rag_template
  60. from config import (
  61. CONFIG_DATA,
  62. WEBUI_NAME,
  63. WEBUI_URL,
  64. WEBUI_AUTH,
  65. ENV,
  66. VERSION,
  67. CHANGELOG,
  68. FRONTEND_BUILD_DIR,
  69. UPLOAD_DIR,
  70. CACHE_DIR,
  71. STATIC_DIR,
  72. ENABLE_OPENAI_API,
  73. ENABLE_OLLAMA_API,
  74. ENABLE_MODEL_FILTER,
  75. MODEL_FILTER_LIST,
  76. GLOBAL_LOG_LEVEL,
  77. SRC_LOG_LEVELS,
  78. WEBHOOK_URL,
  79. ENABLE_ADMIN_EXPORT,
  80. WEBUI_BUILD_HASH,
  81. TASK_MODEL,
  82. TASK_MODEL_EXTERNAL,
  83. TITLE_GENERATION_PROMPT_TEMPLATE,
  84. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  85. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  86. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  87. AppConfig,
  88. )
  89. from constants import ERROR_MESSAGES
  90. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  91. log = logging.getLogger(__name__)
  92. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  93. class SPAStaticFiles(StaticFiles):
  94. async def get_response(self, path: str, scope):
  95. try:
  96. return await super().get_response(path, scope)
  97. except (HTTPException, StarletteHTTPException) as ex:
  98. if ex.status_code == 404:
  99. return await super().get_response("index.html", scope)
  100. else:
  101. raise ex
  102. print(
  103. rf"""
  104. ___ __ __ _ _ _ ___
  105. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  106. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  107. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  108. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  109. |_|
  110. v{VERSION} - building the best open-source AI user interface.
  111. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  112. https://github.com/open-webui/open-webui
  113. """
  114. )
  115. @asynccontextmanager
  116. async def lifespan(app: FastAPI):
  117. yield
  118. app = FastAPI(
  119. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  120. )
  121. app.state.config = AppConfig()
  122. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  123. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  124. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  125. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  126. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  127. app.state.config.TASK_MODEL = TASK_MODEL
  128. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  129. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  130. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  131. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  132. )
  133. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  134. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  135. )
  136. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  137. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  138. )
  139. app.state.MODELS = {}
  140. origins = ["*"]
  141. async def get_function_call_response(
  142. messages, files, tool_id, template, task_model_id, user
  143. ):
  144. tool = Tools.get_tool_by_id(tool_id)
  145. tools_specs = json.dumps(tool.specs, indent=2)
  146. content = tools_function_calling_generation_template(template, tools_specs)
  147. user_message = get_last_user_message(messages)
  148. prompt = (
  149. "History:\n"
  150. + "\n".join(
  151. [
  152. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  153. for message in messages[::-1][:4]
  154. ]
  155. )
  156. + f"\nQuery: {user_message}"
  157. )
  158. print(prompt)
  159. payload = {
  160. "model": task_model_id,
  161. "messages": [
  162. {"role": "system", "content": content},
  163. {"role": "user", "content": f"Query: {prompt}"},
  164. ],
  165. "stream": False,
  166. }
  167. try:
  168. payload = filter_pipeline(payload, user)
  169. except Exception as e:
  170. raise e
  171. model = app.state.MODELS[task_model_id]
  172. response = None
  173. try:
  174. if model["owned_by"] == "ollama":
  175. response = await generate_ollama_chat_completion(payload, user=user)
  176. else:
  177. response = await generate_openai_chat_completion(payload, user=user)
  178. content = None
  179. if hasattr(response, "body_iterator"):
  180. async for chunk in response.body_iterator:
  181. data = json.loads(chunk.decode("utf-8"))
  182. content = data["choices"][0]["message"]["content"]
  183. # Cleanup any remaining background tasks if necessary
  184. if response.background is not None:
  185. await response.background()
  186. else:
  187. content = response["choices"][0]["message"]["content"]
  188. # Parse the function response
  189. if content is not None:
  190. print(f"content: {content}")
  191. result = json.loads(content)
  192. print(result)
  193. # Call the function
  194. if "name" in result:
  195. if tool_id in webui_app.state.TOOLS:
  196. toolkit_module = webui_app.state.TOOLS[tool_id]
  197. else:
  198. toolkit_module = load_toolkit_module_by_id(tool_id)
  199. webui_app.state.TOOLS[tool_id] = toolkit_module
  200. file_handler = False
  201. # check if toolkit_module has file_handler self variable
  202. if hasattr(toolkit_module, "file_handler"):
  203. file_handler = True
  204. print("file_handler: ", file_handler)
  205. function = getattr(toolkit_module, result["name"])
  206. function_result = None
  207. try:
  208. # Get the signature of the function
  209. sig = inspect.signature(function)
  210. params = result["parameters"]
  211. if "__user__" in sig.parameters:
  212. # Call the function with the '__user__' parameter included
  213. params = {
  214. **params,
  215. "__user__": {
  216. "id": user.id,
  217. "email": user.email,
  218. "name": user.name,
  219. "role": user.role,
  220. },
  221. }
  222. if "__messages__" in sig.parameters:
  223. # Call the function with the '__messages__' parameter included
  224. params = {
  225. **params,
  226. "__messages__": messages,
  227. }
  228. if "__files__" in sig.parameters:
  229. # Call the function with the '__files__' parameter included
  230. params = {
  231. **params,
  232. "__files__": files,
  233. }
  234. if "__model__" in sig.parameters:
  235. # Call the function with the '__model__' parameter included
  236. params = {
  237. **params,
  238. "__model__": model,
  239. }
  240. if "__id__" in sig.parameters:
  241. # Call the function with the '__id__' parameter included
  242. params = {
  243. **params,
  244. "__id__": tool_id,
  245. }
  246. function_result = function(**params)
  247. except Exception as e:
  248. print(e)
  249. # Add the function result to the system prompt
  250. if function_result is not None:
  251. return function_result, file_handler
  252. except Exception as e:
  253. print(f"Error: {e}")
  254. return None, False
  255. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  256. async def dispatch(self, request: Request, call_next):
  257. return_citations = False
  258. if request.method == "POST" and (
  259. "/ollama/api/chat" in request.url.path
  260. or "/chat/completions" in request.url.path
  261. ):
  262. log.debug(f"request.url.path: {request.url.path}")
  263. # Read the original request body
  264. body = await request.body()
  265. # Decode body to string
  266. body_str = body.decode("utf-8")
  267. # Parse string to JSON
  268. data = json.loads(body_str) if body_str else {}
  269. user = get_current_user(
  270. request,
  271. get_http_authorization_cred(request.headers.get("Authorization"))
  272. )
  273. # Remove the citations from the body
  274. return_citations = data.get("citations", False)
  275. if "citations" in data:
  276. del data["citations"]
  277. # Set the task model
  278. task_model_id = data["model"]
  279. if task_model_id not in app.state.MODELS:
  280. raise HTTPException(
  281. status_code=status.HTTP_404_NOT_FOUND,
  282. detail="Model not found",
  283. )
  284. # Check if the user has a custom task model
  285. # If the user has a custom task model, use that model
  286. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  287. if (
  288. app.state.config.TASK_MODEL
  289. and app.state.config.TASK_MODEL in app.state.MODELS
  290. ):
  291. task_model_id = app.state.config.TASK_MODEL
  292. else:
  293. if (
  294. app.state.config.TASK_MODEL_EXTERNAL
  295. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  296. ):
  297. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  298. prompt = get_last_user_message(data["messages"])
  299. context = ""
  300. # If tool_ids field is present, call the functions
  301. skip_files = False
  302. if "tool_ids" in data:
  303. print(data["tool_ids"])
  304. for tool_id in data["tool_ids"]:
  305. print(tool_id)
  306. try:
  307. response, file_handler = await get_function_call_response(
  308. messages=data["messages"],
  309. files=data.get("files", []),
  310. tool_id=tool_id,
  311. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  312. task_model_id=task_model_id,
  313. user=user,
  314. )
  315. print(file_handler)
  316. if isinstance(response, str):
  317. context += ("\n" if context != "" else "") + response
  318. if file_handler:
  319. skip_files = True
  320. except Exception as e:
  321. print(f"Error: {e}")
  322. del data["tool_ids"]
  323. print(f"tool_context: {context}")
  324. # If files field is present, generate RAG completions
  325. # If skip_files is True, skip the RAG completions
  326. if "files" in data:
  327. if not skip_files:
  328. data = {**data}
  329. rag_context, citations = get_rag_context(
  330. files=data["files"],
  331. messages=data["messages"],
  332. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  333. k=rag_app.state.config.TOP_K,
  334. reranking_function=rag_app.state.sentence_transformer_rf,
  335. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  336. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  337. )
  338. if rag_context:
  339. context += ("\n" if context != "" else "") + rag_context
  340. log.debug(f"rag_context: {rag_context}, citations: {citations}")
  341. else:
  342. return_citations = False
  343. del data["files"]
  344. if context != "":
  345. system_prompt = rag_template(
  346. rag_app.state.config.RAG_TEMPLATE, context, prompt
  347. )
  348. print(system_prompt)
  349. data["messages"] = add_or_update_system_message(
  350. f"\n{system_prompt}", data["messages"]
  351. )
  352. modified_body_bytes = json.dumps(data).encode("utf-8")
  353. # Replace the request body with the modified one
  354. request._body = modified_body_bytes
  355. # Set custom header to ensure content-length matches new body length
  356. request.headers.__dict__["_list"] = [
  357. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  358. *[
  359. (k, v)
  360. for k, v in request.headers.raw
  361. if k.lower() != b"content-length"
  362. ],
  363. ]
  364. response = await call_next(request)
  365. if return_citations:
  366. # Inject the citations into the response
  367. if isinstance(response, StreamingResponse):
  368. # If it's a streaming response, inject it as SSE event or NDJSON line
  369. content_type = response.headers.get("Content-Type")
  370. if "text/event-stream" in content_type:
  371. return StreamingResponse(
  372. self.openai_stream_wrapper(response.body_iterator, citations),
  373. )
  374. if "application/x-ndjson" in content_type:
  375. return StreamingResponse(
  376. self.ollama_stream_wrapper(response.body_iterator, citations),
  377. )
  378. return response
  379. async def _receive(self, body: bytes):
  380. return {"type": "http.request", "body": body, "more_body": False}
  381. async def openai_stream_wrapper(self, original_generator, citations):
  382. yield f"data: {json.dumps({'citations': citations})}\n\n"
  383. async for data in original_generator:
  384. yield data
  385. async def ollama_stream_wrapper(self, original_generator, citations):
  386. yield f"{json.dumps({'citations': citations})}\n"
  387. async for data in original_generator:
  388. yield data
  389. app.add_middleware(ChatCompletionMiddleware)
  390. def filter_pipeline(payload, user):
  391. user = {"id": user.id, "name": user.name, "role": user.role}
  392. model_id = payload["model"]
  393. filters = [
  394. model
  395. for model in app.state.MODELS.values()
  396. if "pipeline" in model
  397. and "type" in model["pipeline"]
  398. and model["pipeline"]["type"] == "filter"
  399. and (
  400. model["pipeline"]["pipelines"] == ["*"]
  401. or any(
  402. model_id == target_model_id
  403. for target_model_id in model["pipeline"]["pipelines"]
  404. )
  405. )
  406. ]
  407. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  408. model = app.state.MODELS[model_id]
  409. if "pipeline" in model:
  410. sorted_filters.append(model)
  411. for filter in sorted_filters:
  412. r = None
  413. try:
  414. urlIdx = filter["urlIdx"]
  415. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  416. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  417. if key != "":
  418. headers = {"Authorization": f"Bearer {key}"}
  419. r = requests.post(
  420. f"{url}/{filter['id']}/filter/inlet",
  421. headers=headers,
  422. json={
  423. "user": user,
  424. "body": payload,
  425. },
  426. )
  427. r.raise_for_status()
  428. payload = r.json()
  429. except Exception as e:
  430. # Handle connection error here
  431. print(f"Connection error: {e}")
  432. if r is not None:
  433. try:
  434. res = r.json()
  435. except:
  436. pass
  437. if "detail" in res:
  438. raise Exception(r.status_code, res["detail"])
  439. else:
  440. pass
  441. if "pipeline" not in app.state.MODELS[model_id]:
  442. if "chat_id" in payload:
  443. del payload["chat_id"]
  444. if "title" in payload:
  445. del payload["title"]
  446. if "task" in payload:
  447. del payload["task"]
  448. return payload
  449. class PipelineMiddleware(BaseHTTPMiddleware):
  450. async def dispatch(self, request: Request, call_next):
  451. if request.method == "POST" and (
  452. "/ollama/api/chat" in request.url.path
  453. or "/chat/completions" in request.url.path
  454. ):
  455. log.debug(f"request.url.path: {request.url.path}")
  456. # Read the original request body
  457. body = await request.body()
  458. # Decode body to string
  459. body_str = body.decode("utf-8")
  460. # Parse string to JSON
  461. data = json.loads(body_str) if body_str else {}
  462. user = get_current_user(
  463. request,
  464. get_http_authorization_cred(request.headers.get("Authorization"))
  465. )
  466. try:
  467. data = filter_pipeline(data, user)
  468. except Exception as e:
  469. return JSONResponse(
  470. status_code=e.args[0],
  471. content={"detail": e.args[1]},
  472. )
  473. modified_body_bytes = json.dumps(data).encode("utf-8")
  474. # Replace the request body with the modified one
  475. request._body = modified_body_bytes
  476. # Set custom header to ensure content-length matches new body length
  477. request.headers.__dict__["_list"] = [
  478. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  479. *[
  480. (k, v)
  481. for k, v in request.headers.raw
  482. if k.lower() != b"content-length"
  483. ],
  484. ]
  485. response = await call_next(request)
  486. return response
  487. async def _receive(self, body: bytes):
  488. return {"type": "http.request", "body": body, "more_body": False}
  489. app.add_middleware(PipelineMiddleware)
  490. app.add_middleware(
  491. CORSMiddleware,
  492. allow_origins=origins,
  493. allow_credentials=True,
  494. allow_methods=["*"],
  495. allow_headers=["*"],
  496. )
  497. @app.middleware("http")
  498. async def check_url(request: Request, call_next):
  499. if len(app.state.MODELS) == 0:
  500. await get_all_models()
  501. else:
  502. pass
  503. start_time = int(time.time())
  504. response = await call_next(request)
  505. process_time = int(time.time()) - start_time
  506. response.headers["X-Process-Time"] = str(process_time)
  507. return response
  508. @app.middleware("http")
  509. async def update_embedding_function(request: Request, call_next):
  510. response = await call_next(request)
  511. if "/embedding/update" in request.url.path:
  512. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  513. return response
  514. app.mount("/ws", socket_app)
  515. app.mount("/ollama", ollama_app)
  516. app.mount("/openai", openai_app)
  517. app.mount("/images/api/v1", images_app)
  518. app.mount("/audio/api/v1", audio_app)
  519. app.mount("/rag/api/v1", rag_app)
  520. app.mount("/api/v1", webui_app)
  521. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  522. async def get_all_models():
  523. openai_models = []
  524. ollama_models = []
  525. if app.state.config.ENABLE_OPENAI_API:
  526. openai_models = await get_openai_models()
  527. openai_models = openai_models["data"]
  528. if app.state.config.ENABLE_OLLAMA_API:
  529. ollama_models = await get_ollama_models()
  530. ollama_models = [
  531. {
  532. "id": model["model"],
  533. "name": model["name"],
  534. "object": "model",
  535. "created": int(time.time()),
  536. "owned_by": "ollama",
  537. "ollama": model,
  538. }
  539. for model in ollama_models["models"]
  540. ]
  541. models = openai_models + ollama_models
  542. custom_models = Models.get_all_models()
  543. for custom_model in custom_models:
  544. if custom_model.base_model_id == None:
  545. for model in models:
  546. if (
  547. custom_model.id == model["id"]
  548. or custom_model.id == model["id"].split(":")[0]
  549. ):
  550. model["name"] = custom_model.name
  551. model["info"] = custom_model.model_dump()
  552. else:
  553. owned_by = "openai"
  554. for model in models:
  555. if (
  556. custom_model.base_model_id == model["id"]
  557. or custom_model.base_model_id == model["id"].split(":")[0]
  558. ):
  559. owned_by = model["owned_by"]
  560. break
  561. models.append(
  562. {
  563. "id": custom_model.id,
  564. "name": custom_model.name,
  565. "object": "model",
  566. "created": custom_model.created_at,
  567. "owned_by": owned_by,
  568. "info": custom_model.model_dump(),
  569. "preset": True,
  570. }
  571. )
  572. app.state.MODELS = {model["id"]: model for model in models}
  573. webui_app.state.MODELS = app.state.MODELS
  574. return models
  575. @app.get("/api/models")
  576. async def get_models(user=Depends(get_verified_user)):
  577. models = await get_all_models()
  578. # Filter out filter pipelines
  579. models = [
  580. model
  581. for model in models
  582. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  583. ]
  584. if app.state.config.ENABLE_MODEL_FILTER:
  585. if user.role == "user":
  586. models = list(
  587. filter(
  588. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  589. models,
  590. )
  591. )
  592. return {"data": models}
  593. return {"data": models}
  594. @app.get("/api/task/config")
  595. async def get_task_config(user=Depends(get_verified_user)):
  596. return {
  597. "TASK_MODEL": app.state.config.TASK_MODEL,
  598. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  599. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  600. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  601. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  602. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  603. }
  604. class TaskConfigForm(BaseModel):
  605. TASK_MODEL: Optional[str]
  606. TASK_MODEL_EXTERNAL: Optional[str]
  607. TITLE_GENERATION_PROMPT_TEMPLATE: str
  608. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  609. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  610. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  611. @app.post("/api/task/config/update")
  612. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  613. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  614. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  615. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  616. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  617. )
  618. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  619. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  620. )
  621. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  622. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  623. )
  624. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  625. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  626. )
  627. return {
  628. "TASK_MODEL": app.state.config.TASK_MODEL,
  629. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  630. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  631. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  632. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  633. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  634. }
  635. @app.post("/api/task/title/completions")
  636. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  637. print("generate_title")
  638. model_id = form_data["model"]
  639. if model_id not in app.state.MODELS:
  640. raise HTTPException(
  641. status_code=status.HTTP_404_NOT_FOUND,
  642. detail="Model not found",
  643. )
  644. # Check if the user has a custom task model
  645. # If the user has a custom task model, use that model
  646. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  647. if app.state.config.TASK_MODEL:
  648. task_model_id = app.state.config.TASK_MODEL
  649. if task_model_id in app.state.MODELS:
  650. model_id = task_model_id
  651. else:
  652. if app.state.config.TASK_MODEL_EXTERNAL:
  653. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  654. if task_model_id in app.state.MODELS:
  655. model_id = task_model_id
  656. print(model_id)
  657. model = app.state.MODELS[model_id]
  658. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  659. content = title_generation_template(
  660. template,
  661. form_data["prompt"],
  662. {
  663. "name": user.name,
  664. "location": user.info.get("location") if user.info else None,
  665. },
  666. )
  667. payload = {
  668. "model": model_id,
  669. "messages": [{"role": "user", "content": content}],
  670. "stream": False,
  671. "max_tokens": 50,
  672. "chat_id": form_data.get("chat_id", None),
  673. "title": True,
  674. }
  675. log.debug(payload)
  676. try:
  677. payload = filter_pipeline(payload, user)
  678. except Exception as e:
  679. return JSONResponse(
  680. status_code=e.args[0],
  681. content={"detail": e.args[1]},
  682. )
  683. if model["owned_by"] == "ollama":
  684. return await generate_ollama_chat_completion(payload, user=user)
  685. else:
  686. return await generate_openai_chat_completion(payload, user=user)
  687. @app.post("/api/task/query/completions")
  688. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  689. print("generate_search_query")
  690. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  691. raise HTTPException(
  692. status_code=status.HTTP_400_BAD_REQUEST,
  693. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  694. )
  695. model_id = form_data["model"]
  696. if model_id not in app.state.MODELS:
  697. raise HTTPException(
  698. status_code=status.HTTP_404_NOT_FOUND,
  699. detail="Model not found",
  700. )
  701. # Check if the user has a custom task model
  702. # If the user has a custom task model, use that model
  703. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  704. if app.state.config.TASK_MODEL:
  705. task_model_id = app.state.config.TASK_MODEL
  706. if task_model_id in app.state.MODELS:
  707. model_id = task_model_id
  708. else:
  709. if app.state.config.TASK_MODEL_EXTERNAL:
  710. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  711. if task_model_id in app.state.MODELS:
  712. model_id = task_model_id
  713. print(model_id)
  714. model = app.state.MODELS[model_id]
  715. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  716. content = search_query_generation_template(
  717. template, form_data["prompt"], {"name": user.name}
  718. )
  719. payload = {
  720. "model": model_id,
  721. "messages": [{"role": "user", "content": content}],
  722. "stream": False,
  723. "max_tokens": 30,
  724. "task": True,
  725. }
  726. print(payload)
  727. try:
  728. payload = filter_pipeline(payload, user)
  729. except Exception as e:
  730. return JSONResponse(
  731. status_code=e.args[0],
  732. content={"detail": e.args[1]},
  733. )
  734. if model["owned_by"] == "ollama":
  735. return await generate_ollama_chat_completion(payload, user=user)
  736. else:
  737. return await generate_openai_chat_completion(payload, user=user)
  738. @app.post("/api/task/emoji/completions")
  739. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  740. print("generate_emoji")
  741. model_id = form_data["model"]
  742. if model_id not in app.state.MODELS:
  743. raise HTTPException(
  744. status_code=status.HTTP_404_NOT_FOUND,
  745. detail="Model not found",
  746. )
  747. # Check if the user has a custom task model
  748. # If the user has a custom task model, use that model
  749. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  750. if app.state.config.TASK_MODEL:
  751. task_model_id = app.state.config.TASK_MODEL
  752. if task_model_id in app.state.MODELS:
  753. model_id = task_model_id
  754. else:
  755. if app.state.config.TASK_MODEL_EXTERNAL:
  756. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  757. if task_model_id in app.state.MODELS:
  758. model_id = task_model_id
  759. print(model_id)
  760. model = app.state.MODELS[model_id]
  761. template = '''
  762. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  763. Message: """{{prompt}}"""
  764. '''
  765. content = title_generation_template(
  766. template,
  767. form_data["prompt"],
  768. {
  769. "name": user.name,
  770. "location": user.info.get("location") if user.info else None,
  771. },
  772. )
  773. payload = {
  774. "model": model_id,
  775. "messages": [{"role": "user", "content": content}],
  776. "stream": False,
  777. "max_tokens": 4,
  778. "chat_id": form_data.get("chat_id", None),
  779. "task": True,
  780. }
  781. log.debug(payload)
  782. try:
  783. payload = filter_pipeline(payload, user)
  784. except Exception as e:
  785. return JSONResponse(
  786. status_code=e.args[0],
  787. content={"detail": e.args[1]},
  788. )
  789. if model["owned_by"] == "ollama":
  790. return await generate_ollama_chat_completion(payload, user=user)
  791. else:
  792. return await generate_openai_chat_completion(payload, user=user)
  793. @app.post("/api/task/tools/completions")
  794. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  795. print("get_tools_function_calling")
  796. model_id = form_data["model"]
  797. if model_id not in app.state.MODELS:
  798. raise HTTPException(
  799. status_code=status.HTTP_404_NOT_FOUND,
  800. detail="Model not found",
  801. )
  802. # Check if the user has a custom task model
  803. # If the user has a custom task model, use that model
  804. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  805. if app.state.config.TASK_MODEL:
  806. task_model_id = app.state.config.TASK_MODEL
  807. if task_model_id in app.state.MODELS:
  808. model_id = task_model_id
  809. else:
  810. if app.state.config.TASK_MODEL_EXTERNAL:
  811. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  812. if task_model_id in app.state.MODELS:
  813. model_id = task_model_id
  814. print(model_id)
  815. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  816. try:
  817. context, file_handler = await get_function_call_response(
  818. form_data["messages"],
  819. form_data.get("files", []),
  820. form_data["tool_id"],
  821. template,
  822. model_id,
  823. user,
  824. )
  825. return context
  826. except Exception as e:
  827. return JSONResponse(
  828. status_code=e.args[0],
  829. content={"detail": e.args[1]},
  830. )
  831. @app.post("/api/chat/completions")
  832. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  833. model_id = form_data["model"]
  834. if model_id not in app.state.MODELS:
  835. raise HTTPException(
  836. status_code=status.HTTP_404_NOT_FOUND,
  837. detail="Model not found",
  838. )
  839. model = app.state.MODELS[model_id]
  840. print(model)
  841. if model["owned_by"] == "ollama":
  842. return await generate_ollama_chat_completion(form_data, user=user)
  843. else:
  844. return await generate_openai_chat_completion(form_data, user=user)
  845. @app.post("/api/chat/completed")
  846. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  847. data = form_data
  848. model_id = data["model"]
  849. filters = [
  850. model
  851. for model in app.state.MODELS.values()
  852. if "pipeline" in model
  853. and "type" in model["pipeline"]
  854. and model["pipeline"]["type"] == "filter"
  855. and (
  856. model["pipeline"]["pipelines"] == ["*"]
  857. or any(
  858. model_id == target_model_id
  859. for target_model_id in model["pipeline"]["pipelines"]
  860. )
  861. )
  862. ]
  863. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  864. print(model_id)
  865. if model_id in app.state.MODELS:
  866. model = app.state.MODELS[model_id]
  867. if "pipeline" in model:
  868. sorted_filters = [model] + sorted_filters
  869. for filter in sorted_filters:
  870. r = None
  871. try:
  872. urlIdx = filter["urlIdx"]
  873. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  874. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  875. if key != "":
  876. headers = {"Authorization": f"Bearer {key}"}
  877. r = requests.post(
  878. f"{url}/{filter['id']}/filter/outlet",
  879. headers=headers,
  880. json={
  881. "user": {"id": user.id, "name": user.name, "role": user.role},
  882. "body": data,
  883. },
  884. )
  885. r.raise_for_status()
  886. data = r.json()
  887. except Exception as e:
  888. # Handle connection error here
  889. print(f"Connection error: {e}")
  890. if r is not None:
  891. try:
  892. res = r.json()
  893. if "detail" in res:
  894. return JSONResponse(
  895. status_code=r.status_code,
  896. content=res,
  897. )
  898. except:
  899. pass
  900. else:
  901. pass
  902. return data
  903. @app.get("/api/pipelines/list")
  904. async def get_pipelines_list(user=Depends(get_admin_user)):
  905. responses = await get_openai_models(raw=True)
  906. print(responses)
  907. urlIdxs = [
  908. idx
  909. for idx, response in enumerate(responses)
  910. if response != None and "pipelines" in response
  911. ]
  912. return {
  913. "data": [
  914. {
  915. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  916. "idx": urlIdx,
  917. }
  918. for urlIdx in urlIdxs
  919. ]
  920. }
  921. @app.post("/api/pipelines/upload")
  922. async def upload_pipeline(
  923. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  924. ):
  925. print("upload_pipeline", urlIdx, file.filename)
  926. # Check if the uploaded file is a python file
  927. if not file.filename.endswith(".py"):
  928. raise HTTPException(
  929. status_code=status.HTTP_400_BAD_REQUEST,
  930. detail="Only Python (.py) files are allowed.",
  931. )
  932. upload_folder = f"{CACHE_DIR}/pipelines"
  933. os.makedirs(upload_folder, exist_ok=True)
  934. file_path = os.path.join(upload_folder, file.filename)
  935. try:
  936. # Save the uploaded file
  937. with open(file_path, "wb") as buffer:
  938. shutil.copyfileobj(file.file, buffer)
  939. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  940. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  941. headers = {"Authorization": f"Bearer {key}"}
  942. with open(file_path, "rb") as f:
  943. files = {"file": f}
  944. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  945. r.raise_for_status()
  946. data = r.json()
  947. return {**data}
  948. except Exception as e:
  949. # Handle connection error here
  950. print(f"Connection error: {e}")
  951. detail = "Pipeline not found"
  952. if r is not None:
  953. try:
  954. res = r.json()
  955. if "detail" in res:
  956. detail = res["detail"]
  957. except:
  958. pass
  959. raise HTTPException(
  960. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  961. detail=detail,
  962. )
  963. finally:
  964. # Ensure the file is deleted after the upload is completed or on failure
  965. if os.path.exists(file_path):
  966. os.remove(file_path)
  967. class AddPipelineForm(BaseModel):
  968. url: str
  969. urlIdx: int
  970. @app.post("/api/pipelines/add")
  971. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  972. r = None
  973. try:
  974. urlIdx = form_data.urlIdx
  975. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  976. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  977. headers = {"Authorization": f"Bearer {key}"}
  978. r = requests.post(
  979. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  980. )
  981. r.raise_for_status()
  982. data = r.json()
  983. return {**data}
  984. except Exception as e:
  985. # Handle connection error here
  986. print(f"Connection error: {e}")
  987. detail = "Pipeline not found"
  988. if r is not None:
  989. try:
  990. res = r.json()
  991. if "detail" in res:
  992. detail = res["detail"]
  993. except:
  994. pass
  995. raise HTTPException(
  996. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  997. detail=detail,
  998. )
  999. class DeletePipelineForm(BaseModel):
  1000. id: str
  1001. urlIdx: int
  1002. @app.delete("/api/pipelines/delete")
  1003. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1004. r = None
  1005. try:
  1006. urlIdx = form_data.urlIdx
  1007. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1008. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1009. headers = {"Authorization": f"Bearer {key}"}
  1010. r = requests.delete(
  1011. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1012. )
  1013. r.raise_for_status()
  1014. data = r.json()
  1015. return {**data}
  1016. except Exception as e:
  1017. # Handle connection error here
  1018. print(f"Connection error: {e}")
  1019. detail = "Pipeline not found"
  1020. if r is not None:
  1021. try:
  1022. res = r.json()
  1023. if "detail" in res:
  1024. detail = res["detail"]
  1025. except:
  1026. pass
  1027. raise HTTPException(
  1028. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1029. detail=detail,
  1030. )
  1031. @app.get("/api/pipelines")
  1032. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1033. r = None
  1034. try:
  1035. urlIdx
  1036. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1037. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1038. headers = {"Authorization": f"Bearer {key}"}
  1039. r = requests.get(f"{url}/pipelines", headers=headers)
  1040. r.raise_for_status()
  1041. data = r.json()
  1042. return {**data}
  1043. except Exception as e:
  1044. # Handle connection error here
  1045. print(f"Connection error: {e}")
  1046. detail = "Pipeline not found"
  1047. if r is not None:
  1048. try:
  1049. res = r.json()
  1050. if "detail" in res:
  1051. detail = res["detail"]
  1052. except:
  1053. pass
  1054. raise HTTPException(
  1055. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1056. detail=detail,
  1057. )
  1058. @app.get("/api/pipelines/{pipeline_id}/valves")
  1059. async def get_pipeline_valves(
  1060. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1061. ):
  1062. models = await get_all_models()
  1063. r = None
  1064. try:
  1065. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1066. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1067. headers = {"Authorization": f"Bearer {key}"}
  1068. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1069. r.raise_for_status()
  1070. data = r.json()
  1071. return {**data}
  1072. except Exception as e:
  1073. # Handle connection error here
  1074. print(f"Connection error: {e}")
  1075. detail = "Pipeline not found"
  1076. if r is not None:
  1077. try:
  1078. res = r.json()
  1079. if "detail" in res:
  1080. detail = res["detail"]
  1081. except:
  1082. pass
  1083. raise HTTPException(
  1084. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1085. detail=detail,
  1086. )
  1087. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1088. async def get_pipeline_valves_spec(
  1089. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1090. ):
  1091. models = await get_all_models()
  1092. r = None
  1093. try:
  1094. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1095. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1096. headers = {"Authorization": f"Bearer {key}"}
  1097. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1098. r.raise_for_status()
  1099. data = r.json()
  1100. return {**data}
  1101. except Exception as e:
  1102. # Handle connection error here
  1103. print(f"Connection error: {e}")
  1104. detail = "Pipeline not found"
  1105. if r is not None:
  1106. try:
  1107. res = r.json()
  1108. if "detail" in res:
  1109. detail = res["detail"]
  1110. except:
  1111. pass
  1112. raise HTTPException(
  1113. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1114. detail=detail,
  1115. )
  1116. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1117. async def update_pipeline_valves(
  1118. urlIdx: Optional[int],
  1119. pipeline_id: str,
  1120. form_data: dict,
  1121. user=Depends(get_admin_user),
  1122. ):
  1123. models = await get_all_models()
  1124. r = None
  1125. try:
  1126. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1127. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1128. headers = {"Authorization": f"Bearer {key}"}
  1129. r = requests.post(
  1130. f"{url}/{pipeline_id}/valves/update",
  1131. headers=headers,
  1132. json={**form_data},
  1133. )
  1134. r.raise_for_status()
  1135. data = r.json()
  1136. return {**data}
  1137. except Exception as e:
  1138. # Handle connection error here
  1139. print(f"Connection error: {e}")
  1140. detail = "Pipeline not found"
  1141. if r is not None:
  1142. try:
  1143. res = r.json()
  1144. if "detail" in res:
  1145. detail = res["detail"]
  1146. except:
  1147. pass
  1148. raise HTTPException(
  1149. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1150. detail=detail,
  1151. )
  1152. @app.get("/api/config")
  1153. async def get_app_config():
  1154. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  1155. default_locale = "en-US"
  1156. if "ui" in CONFIG_DATA:
  1157. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  1158. # The Rest of the Function Now Uses the Variables Defined Above
  1159. return {
  1160. "status": True,
  1161. "name": WEBUI_NAME,
  1162. "version": VERSION,
  1163. "default_locale": default_locale,
  1164. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1165. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1166. "features": {
  1167. "auth": WEBUI_AUTH,
  1168. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1169. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1170. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1171. "enable_image_generation": images_app.state.config.ENABLED,
  1172. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1173. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1174. },
  1175. "audio": {
  1176. "tts": {
  1177. "engine": audio_app.state.config.TTS_ENGINE,
  1178. "voice": audio_app.state.config.TTS_VOICE,
  1179. },
  1180. "stt": {
  1181. "engine": audio_app.state.config.STT_ENGINE,
  1182. },
  1183. },
  1184. }
  1185. @app.get("/api/config/model/filter")
  1186. async def get_model_filter_config(user=Depends(get_admin_user)):
  1187. return {
  1188. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1189. "models": app.state.config.MODEL_FILTER_LIST,
  1190. }
  1191. class ModelFilterConfigForm(BaseModel):
  1192. enabled: bool
  1193. models: List[str]
  1194. @app.post("/api/config/model/filter")
  1195. async def update_model_filter_config(
  1196. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1197. ):
  1198. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1199. app.state.config.MODEL_FILTER_LIST = form_data.models
  1200. return {
  1201. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1202. "models": app.state.config.MODEL_FILTER_LIST,
  1203. }
  1204. @app.get("/api/webhook")
  1205. async def get_webhook_url(user=Depends(get_admin_user)):
  1206. return {
  1207. "url": app.state.config.WEBHOOK_URL,
  1208. }
  1209. class UrlForm(BaseModel):
  1210. url: str
  1211. @app.post("/api/webhook")
  1212. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1213. app.state.config.WEBHOOK_URL = form_data.url
  1214. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1215. return {"url": app.state.config.WEBHOOK_URL}
  1216. @app.get("/api/version")
  1217. async def get_app_config():
  1218. return {
  1219. "version": VERSION,
  1220. }
  1221. @app.get("/api/changelog")
  1222. async def get_app_changelog():
  1223. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1224. @app.get("/api/version/updates")
  1225. async def get_app_latest_release_version():
  1226. try:
  1227. async with aiohttp.ClientSession(trust_env=True) as session:
  1228. async with session.get(
  1229. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1230. ) as response:
  1231. response.raise_for_status()
  1232. data = await response.json()
  1233. latest_version = data["tag_name"]
  1234. return {"current": VERSION, "latest": latest_version[1:]}
  1235. except aiohttp.ClientError as e:
  1236. raise HTTPException(
  1237. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1238. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1239. )
  1240. @app.get("/manifest.json")
  1241. async def get_manifest_json():
  1242. return {
  1243. "name": WEBUI_NAME,
  1244. "short_name": WEBUI_NAME,
  1245. "start_url": "/",
  1246. "display": "standalone",
  1247. "background_color": "#343541",
  1248. "theme_color": "#343541",
  1249. "orientation": "portrait-primary",
  1250. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1251. }
  1252. @app.get("/opensearch.xml")
  1253. async def get_opensearch_xml():
  1254. xml_content = rf"""
  1255. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1256. <ShortName>{WEBUI_NAME}</ShortName>
  1257. <Description>Search {WEBUI_NAME}</Description>
  1258. <InputEncoding>UTF-8</InputEncoding>
  1259. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1260. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1261. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1262. </OpenSearchDescription>
  1263. """
  1264. return Response(content=xml_content, media_type="application/xml")
  1265. @app.get("/health")
  1266. async def healthcheck():
  1267. return {"status": True}
  1268. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1269. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1270. if os.path.exists(FRONTEND_BUILD_DIR):
  1271. mimetypes.add_type("text/javascript", ".js")
  1272. app.mount(
  1273. "/",
  1274. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1275. name="spa-static-files",
  1276. )
  1277. else:
  1278. log.warning(
  1279. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1280. )