main.py 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import uuid
  15. import inspect
  16. import asyncio
  17. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  18. from fastapi.staticfiles import StaticFiles
  19. from fastapi.responses import JSONResponse
  20. from fastapi import HTTPException
  21. from fastapi.middleware.wsgi import WSGIMiddleware
  22. from fastapi.middleware.cors import CORSMiddleware
  23. from starlette.exceptions import HTTPException as StarletteHTTPException
  24. from starlette.middleware.base import BaseHTTPMiddleware
  25. from starlette.responses import StreamingResponse, Response
  26. from apps.socket.main import app as socket_app
  27. from apps.ollama.main import (
  28. app as ollama_app,
  29. OpenAIChatCompletionForm,
  30. get_all_models as get_ollama_models,
  31. generate_openai_chat_completion as generate_ollama_chat_completion,
  32. )
  33. from apps.openai.main import (
  34. app as openai_app,
  35. get_all_models as get_openai_models,
  36. generate_chat_completion as generate_openai_chat_completion,
  37. )
  38. from apps.audio.main import app as audio_app
  39. from apps.images.main import app as images_app
  40. from apps.rag.main import app as rag_app
  41. from apps.webui.main import app as webui_app
  42. from pydantic import BaseModel
  43. from typing import List, Optional
  44. from apps.webui.models.models import Models, ModelModel
  45. from apps.webui.models.tools import Tools
  46. from apps.webui.utils import load_toolkit_module_by_id
  47. from utils.utils import (
  48. get_admin_user,
  49. get_verified_user,
  50. get_current_user,
  51. get_http_authorization_cred,
  52. )
  53. from utils.task import (
  54. title_generation_template,
  55. search_query_generation_template,
  56. tools_function_calling_generation_template,
  57. )
  58. from utils.misc import get_last_user_message, add_or_update_system_message
  59. from apps.rag.utils import get_rag_context, rag_template
  60. from config import (
  61. CONFIG_DATA,
  62. WEBUI_NAME,
  63. WEBUI_URL,
  64. WEBUI_AUTH,
  65. ENV,
  66. VERSION,
  67. CHANGELOG,
  68. FRONTEND_BUILD_DIR,
  69. UPLOAD_DIR,
  70. CACHE_DIR,
  71. STATIC_DIR,
  72. ENABLE_OPENAI_API,
  73. ENABLE_OLLAMA_API,
  74. ENABLE_MODEL_FILTER,
  75. MODEL_FILTER_LIST,
  76. GLOBAL_LOG_LEVEL,
  77. SRC_LOG_LEVELS,
  78. WEBHOOK_URL,
  79. ENABLE_ADMIN_EXPORT,
  80. WEBUI_BUILD_HASH,
  81. TASK_MODEL,
  82. TASK_MODEL_EXTERNAL,
  83. TITLE_GENERATION_PROMPT_TEMPLATE,
  84. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  85. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  86. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  87. AppConfig,
  88. )
  89. from constants import ERROR_MESSAGES
  90. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  91. log = logging.getLogger(__name__)
  92. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  93. class SPAStaticFiles(StaticFiles):
  94. async def get_response(self, path: str, scope):
  95. try:
  96. return await super().get_response(path, scope)
  97. except (HTTPException, StarletteHTTPException) as ex:
  98. if ex.status_code == 404:
  99. return await super().get_response("index.html", scope)
  100. else:
  101. raise ex
  102. print(
  103. rf"""
  104. ___ __ __ _ _ _ ___
  105. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  106. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  107. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  108. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  109. |_|
  110. v{VERSION} - building the best open-source AI user interface.
  111. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  112. https://github.com/open-webui/open-webui
  113. """
  114. )
  115. @asynccontextmanager
  116. async def lifespan(app: FastAPI):
  117. yield
  118. app = FastAPI(
  119. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  120. )
  121. app.state.config = AppConfig()
  122. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  123. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  124. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  125. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  126. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  127. app.state.config.TASK_MODEL = TASK_MODEL
  128. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  129. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  130. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  131. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  132. )
  133. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  134. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  135. )
  136. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  137. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  138. )
  139. app.state.MODELS = {}
  140. origins = ["*"]
  141. async def get_function_call_response(
  142. messages, files, tool_id, template, task_model_id, user
  143. ):
  144. tool = Tools.get_tool_by_id(tool_id)
  145. tools_specs = json.dumps(tool.specs, indent=2)
  146. content = tools_function_calling_generation_template(template, tools_specs)
  147. user_message = get_last_user_message(messages)
  148. prompt = (
  149. "History:\n"
  150. + "\n".join(
  151. [
  152. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  153. for message in messages[::-1][:4]
  154. ]
  155. )
  156. + f"\nQuery: {user_message}"
  157. )
  158. print(prompt)
  159. payload = {
  160. "model": task_model_id,
  161. "messages": [
  162. {"role": "system", "content": content},
  163. {"role": "user", "content": f"Query: {prompt}"},
  164. ],
  165. "stream": False,
  166. }
  167. try:
  168. payload = filter_pipeline(payload, user)
  169. except Exception as e:
  170. raise e
  171. model = app.state.MODELS[task_model_id]
  172. response = None
  173. try:
  174. if model["owned_by"] == "ollama":
  175. response = await generate_ollama_chat_completion(payload, user=user)
  176. else:
  177. response = await generate_openai_chat_completion(payload, user=user)
  178. content = None
  179. if hasattr(response, "body_iterator"):
  180. async for chunk in response.body_iterator:
  181. data = json.loads(chunk.decode("utf-8"))
  182. content = data["choices"][0]["message"]["content"]
  183. # Cleanup any remaining background tasks if necessary
  184. if response.background is not None:
  185. await response.background()
  186. else:
  187. content = response["choices"][0]["message"]["content"]
  188. # Parse the function response
  189. if content is not None:
  190. print(f"content: {content}")
  191. result = json.loads(content)
  192. print(result)
  193. # Call the function
  194. if "name" in result:
  195. if tool_id in webui_app.state.TOOLS:
  196. toolkit_module = webui_app.state.TOOLS[tool_id]
  197. else:
  198. toolkit_module = load_toolkit_module_by_id(tool_id)
  199. webui_app.state.TOOLS[tool_id] = toolkit_module
  200. file_handler = False
  201. # check if toolkit_module has file_handler self variable
  202. if hasattr(toolkit_module, "file_handler"):
  203. file_handler = True
  204. print("file_handler: ", file_handler)
  205. function = getattr(toolkit_module, result["name"])
  206. function_result = None
  207. try:
  208. # Get the signature of the function
  209. sig = inspect.signature(function)
  210. params = result["parameters"]
  211. if "__user__" in sig.parameters:
  212. # Call the function with the '__user__' parameter included
  213. params = {
  214. **params,
  215. "__user__": {
  216. "id": user.id,
  217. "email": user.email,
  218. "name": user.name,
  219. "role": user.role,
  220. },
  221. }
  222. if "__messages__" in sig.parameters:
  223. # Call the function with the '__messages__' parameter included
  224. params = {
  225. **params,
  226. "__messages__": messages,
  227. }
  228. if "__files__" in sig.parameters:
  229. # Call the function with the '__files__' parameter included
  230. params = {
  231. **params,
  232. "__files__": files,
  233. }
  234. if "__id__" in sig.parameters:
  235. # Call the function with the '__id__' parameter included
  236. params = {
  237. **params,
  238. "__id__": tool_id,
  239. }
  240. function_result = function(**params)
  241. except Exception as e:
  242. print(e)
  243. # Add the function result to the system prompt
  244. if function_result is not None:
  245. return function_result, file_handler
  246. except Exception as e:
  247. print(f"Error: {e}")
  248. return None, False
  249. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  250. async def dispatch(self, request: Request, call_next):
  251. return_citations = False
  252. if request.method == "POST" and (
  253. "/ollama/api/chat" in request.url.path
  254. or "/chat/completions" in request.url.path
  255. ):
  256. log.debug(f"request.url.path: {request.url.path}")
  257. # Read the original request body
  258. body = await request.body()
  259. # Decode body to string
  260. body_str = body.decode("utf-8")
  261. # Parse string to JSON
  262. data = json.loads(body_str) if body_str else {}
  263. user = get_current_user(
  264. get_http_authorization_cred(request.headers.get("Authorization"))
  265. )
  266. # Remove the citations from the body
  267. return_citations = data.get("citations", False)
  268. if "citations" in data:
  269. del data["citations"]
  270. # Set the task model
  271. task_model_id = data["model"]
  272. if task_model_id not in app.state.MODELS:
  273. raise HTTPException(
  274. status_code=status.HTTP_404_NOT_FOUND,
  275. detail="Model not found",
  276. )
  277. # Check if the user has a custom task model
  278. # If the user has a custom task model, use that model
  279. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  280. if (
  281. app.state.config.TASK_MODEL
  282. and app.state.config.TASK_MODEL in app.state.MODELS
  283. ):
  284. task_model_id = app.state.config.TASK_MODEL
  285. else:
  286. if (
  287. app.state.config.TASK_MODEL_EXTERNAL
  288. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  289. ):
  290. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  291. prompt = get_last_user_message(data["messages"])
  292. context = ""
  293. # If tool_ids field is present, call the functions
  294. skip_files = False
  295. if "tool_ids" in data:
  296. print(data["tool_ids"])
  297. for tool_id in data["tool_ids"]:
  298. print(tool_id)
  299. try:
  300. response, file_handler = await get_function_call_response(
  301. messages=data["messages"],
  302. files=data.get("files", []),
  303. tool_id=tool_id,
  304. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  305. task_model_id=task_model_id,
  306. user=user,
  307. )
  308. print(file_handler)
  309. if isinstance(response, str):
  310. context += ("\n" if context != "" else "") + response
  311. if file_handler:
  312. skip_files = True
  313. except Exception as e:
  314. print(f"Error: {e}")
  315. del data["tool_ids"]
  316. print(f"tool_context: {context}")
  317. # If files field is present, generate RAG completions
  318. # If skip_files is True, skip the RAG completions
  319. if "files" in data:
  320. if not skip_files:
  321. data = {**data}
  322. rag_context, citations = get_rag_context(
  323. files=data["files"],
  324. messages=data["messages"],
  325. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  326. k=rag_app.state.config.TOP_K,
  327. reranking_function=rag_app.state.sentence_transformer_rf,
  328. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  329. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  330. )
  331. if rag_context:
  332. context += ("\n" if context != "" else "") + rag_context
  333. log.debug(f"rag_context: {rag_context}, citations: {citations}")
  334. else:
  335. return_citations = False
  336. del data["files"]
  337. if context != "":
  338. system_prompt = rag_template(
  339. rag_app.state.config.RAG_TEMPLATE, context, prompt
  340. )
  341. print(system_prompt)
  342. data["messages"] = add_or_update_system_message(
  343. f"\n{system_prompt}", data["messages"]
  344. )
  345. modified_body_bytes = json.dumps(data).encode("utf-8")
  346. # Replace the request body with the modified one
  347. request._body = modified_body_bytes
  348. # Set custom header to ensure content-length matches new body length
  349. request.headers.__dict__["_list"] = [
  350. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  351. *[
  352. (k, v)
  353. for k, v in request.headers.raw
  354. if k.lower() != b"content-length"
  355. ],
  356. ]
  357. response = await call_next(request)
  358. if return_citations:
  359. # Inject the citations into the response
  360. if isinstance(response, StreamingResponse):
  361. # If it's a streaming response, inject it as SSE event or NDJSON line
  362. content_type = response.headers.get("Content-Type")
  363. if "text/event-stream" in content_type:
  364. return StreamingResponse(
  365. self.openai_stream_wrapper(response.body_iterator, citations),
  366. )
  367. if "application/x-ndjson" in content_type:
  368. return StreamingResponse(
  369. self.ollama_stream_wrapper(response.body_iterator, citations),
  370. )
  371. return response
  372. async def _receive(self, body: bytes):
  373. return {"type": "http.request", "body": body, "more_body": False}
  374. async def openai_stream_wrapper(self, original_generator, citations):
  375. yield f"data: {json.dumps({'citations': citations})}\n\n"
  376. async for data in original_generator:
  377. yield data
  378. async def ollama_stream_wrapper(self, original_generator, citations):
  379. yield f"{json.dumps({'citations': citations})}\n"
  380. async for data in original_generator:
  381. yield data
  382. app.add_middleware(ChatCompletionMiddleware)
  383. def filter_pipeline(payload, user):
  384. user = {"id": user.id, "name": user.name, "role": user.role}
  385. model_id = payload["model"]
  386. filters = [
  387. model
  388. for model in app.state.MODELS.values()
  389. if "pipeline" in model
  390. and "type" in model["pipeline"]
  391. and model["pipeline"]["type"] == "filter"
  392. and (
  393. model["pipeline"]["pipelines"] == ["*"]
  394. or any(
  395. model_id == target_model_id
  396. for target_model_id in model["pipeline"]["pipelines"]
  397. )
  398. )
  399. ]
  400. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  401. model = app.state.MODELS[model_id]
  402. if "pipeline" in model:
  403. sorted_filters.append(model)
  404. for filter in sorted_filters:
  405. r = None
  406. try:
  407. urlIdx = filter["urlIdx"]
  408. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  409. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  410. if key != "":
  411. headers = {"Authorization": f"Bearer {key}"}
  412. r = requests.post(
  413. f"{url}/{filter['id']}/filter/inlet",
  414. headers=headers,
  415. json={
  416. "user": user,
  417. "body": payload,
  418. },
  419. )
  420. r.raise_for_status()
  421. payload = r.json()
  422. except Exception as e:
  423. # Handle connection error here
  424. print(f"Connection error: {e}")
  425. if r is not None:
  426. try:
  427. res = r.json()
  428. except:
  429. pass
  430. if "detail" in res:
  431. raise Exception(r.status_code, res["detail"])
  432. else:
  433. pass
  434. if "pipeline" not in app.state.MODELS[model_id]:
  435. if "chat_id" in payload:
  436. del payload["chat_id"]
  437. if "title" in payload:
  438. del payload["title"]
  439. if "task" in payload:
  440. del payload["task"]
  441. return payload
  442. class PipelineMiddleware(BaseHTTPMiddleware):
  443. async def dispatch(self, request: Request, call_next):
  444. if request.method == "POST" and (
  445. "/ollama/api/chat" in request.url.path
  446. or "/chat/completions" in request.url.path
  447. ):
  448. log.debug(f"request.url.path: {request.url.path}")
  449. # Read the original request body
  450. body = await request.body()
  451. # Decode body to string
  452. body_str = body.decode("utf-8")
  453. # Parse string to JSON
  454. data = json.loads(body_str) if body_str else {}
  455. user = get_current_user(
  456. get_http_authorization_cred(request.headers.get("Authorization"))
  457. )
  458. try:
  459. data = filter_pipeline(data, user)
  460. except Exception as e:
  461. return JSONResponse(
  462. status_code=e.args[0],
  463. content={"detail": e.args[1]},
  464. )
  465. modified_body_bytes = json.dumps(data).encode("utf-8")
  466. # Replace the request body with the modified one
  467. request._body = modified_body_bytes
  468. # Set custom header to ensure content-length matches new body length
  469. request.headers.__dict__["_list"] = [
  470. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  471. *[
  472. (k, v)
  473. for k, v in request.headers.raw
  474. if k.lower() != b"content-length"
  475. ],
  476. ]
  477. response = await call_next(request)
  478. return response
  479. async def _receive(self, body: bytes):
  480. return {"type": "http.request", "body": body, "more_body": False}
  481. app.add_middleware(PipelineMiddleware)
  482. app.add_middleware(
  483. CORSMiddleware,
  484. allow_origins=origins,
  485. allow_credentials=True,
  486. allow_methods=["*"],
  487. allow_headers=["*"],
  488. )
  489. @app.middleware("http")
  490. async def check_url(request: Request, call_next):
  491. if len(app.state.MODELS) == 0:
  492. await get_all_models()
  493. else:
  494. pass
  495. start_time = int(time.time())
  496. response = await call_next(request)
  497. process_time = int(time.time()) - start_time
  498. response.headers["X-Process-Time"] = str(process_time)
  499. return response
  500. @app.middleware("http")
  501. async def update_embedding_function(request: Request, call_next):
  502. response = await call_next(request)
  503. if "/embedding/update" in request.url.path:
  504. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  505. return response
  506. app.mount("/ws", socket_app)
  507. app.mount("/ollama", ollama_app)
  508. app.mount("/openai", openai_app)
  509. app.mount("/images/api/v1", images_app)
  510. app.mount("/audio/api/v1", audio_app)
  511. app.mount("/rag/api/v1", rag_app)
  512. app.mount("/api/v1", webui_app)
  513. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  514. async def get_all_models():
  515. openai_models = []
  516. ollama_models = []
  517. if app.state.config.ENABLE_OPENAI_API:
  518. openai_models = await get_openai_models()
  519. openai_models = openai_models["data"]
  520. if app.state.config.ENABLE_OLLAMA_API:
  521. ollama_models = await get_ollama_models()
  522. ollama_models = [
  523. {
  524. "id": model["model"],
  525. "name": model["name"],
  526. "object": "model",
  527. "created": int(time.time()),
  528. "owned_by": "ollama",
  529. "ollama": model,
  530. }
  531. for model in ollama_models["models"]
  532. ]
  533. models = openai_models + ollama_models
  534. custom_models = Models.get_all_models()
  535. for custom_model in custom_models:
  536. if custom_model.base_model_id == None:
  537. for model in models:
  538. if (
  539. custom_model.id == model["id"]
  540. or custom_model.id == model["id"].split(":")[0]
  541. ):
  542. model["name"] = custom_model.name
  543. model["info"] = custom_model.model_dump()
  544. else:
  545. owned_by = "openai"
  546. for model in models:
  547. if (
  548. custom_model.base_model_id == model["id"]
  549. or custom_model.base_model_id == model["id"].split(":")[0]
  550. ):
  551. owned_by = model["owned_by"]
  552. break
  553. models.append(
  554. {
  555. "id": custom_model.id,
  556. "name": custom_model.name,
  557. "object": "model",
  558. "created": custom_model.created_at,
  559. "owned_by": owned_by,
  560. "info": custom_model.model_dump(),
  561. "preset": True,
  562. }
  563. )
  564. app.state.MODELS = {model["id"]: model for model in models}
  565. webui_app.state.MODELS = app.state.MODELS
  566. return models
  567. @app.get("/api/models")
  568. async def get_models(user=Depends(get_verified_user)):
  569. models = await get_all_models()
  570. # Filter out filter pipelines
  571. models = [
  572. model
  573. for model in models
  574. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  575. ]
  576. if app.state.config.ENABLE_MODEL_FILTER:
  577. if user.role == "user":
  578. models = list(
  579. filter(
  580. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  581. models,
  582. )
  583. )
  584. return {"data": models}
  585. return {"data": models}
  586. @app.get("/api/task/config")
  587. async def get_task_config(user=Depends(get_verified_user)):
  588. return {
  589. "TASK_MODEL": app.state.config.TASK_MODEL,
  590. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  591. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  592. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  593. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  594. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  595. }
  596. class TaskConfigForm(BaseModel):
  597. TASK_MODEL: Optional[str]
  598. TASK_MODEL_EXTERNAL: Optional[str]
  599. TITLE_GENERATION_PROMPT_TEMPLATE: str
  600. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  601. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  602. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  603. @app.post("/api/task/config/update")
  604. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  605. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  606. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  607. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  608. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  609. )
  610. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  611. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  612. )
  613. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  614. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  615. )
  616. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  617. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  618. )
  619. return {
  620. "TASK_MODEL": app.state.config.TASK_MODEL,
  621. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  622. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  623. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  624. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  625. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  626. }
  627. @app.post("/api/task/title/completions")
  628. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  629. print("generate_title")
  630. model_id = form_data["model"]
  631. if model_id not in app.state.MODELS:
  632. raise HTTPException(
  633. status_code=status.HTTP_404_NOT_FOUND,
  634. detail="Model not found",
  635. )
  636. # Check if the user has a custom task model
  637. # If the user has a custom task model, use that model
  638. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  639. if app.state.config.TASK_MODEL:
  640. task_model_id = app.state.config.TASK_MODEL
  641. if task_model_id in app.state.MODELS:
  642. model_id = task_model_id
  643. else:
  644. if app.state.config.TASK_MODEL_EXTERNAL:
  645. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  646. if task_model_id in app.state.MODELS:
  647. model_id = task_model_id
  648. print(model_id)
  649. model = app.state.MODELS[model_id]
  650. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  651. content = title_generation_template(
  652. template,
  653. form_data["prompt"],
  654. {
  655. "name": user.name,
  656. "location": user.info.get("location") if user.info else None,
  657. },
  658. )
  659. payload = {
  660. "model": model_id,
  661. "messages": [{"role": "user", "content": content}],
  662. "stream": False,
  663. "max_tokens": 50,
  664. "chat_id": form_data.get("chat_id", None),
  665. "title": True,
  666. }
  667. log.debug(payload)
  668. try:
  669. payload = filter_pipeline(payload, user)
  670. except Exception as e:
  671. return JSONResponse(
  672. status_code=e.args[0],
  673. content={"detail": e.args[1]},
  674. )
  675. if model["owned_by"] == "ollama":
  676. return await generate_ollama_chat_completion(payload, user=user)
  677. else:
  678. return await generate_openai_chat_completion(payload, user=user)
  679. @app.post("/api/task/query/completions")
  680. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  681. print("generate_search_query")
  682. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  683. raise HTTPException(
  684. status_code=status.HTTP_400_BAD_REQUEST,
  685. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  686. )
  687. model_id = form_data["model"]
  688. if model_id not in app.state.MODELS:
  689. raise HTTPException(
  690. status_code=status.HTTP_404_NOT_FOUND,
  691. detail="Model not found",
  692. )
  693. # Check if the user has a custom task model
  694. # If the user has a custom task model, use that model
  695. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  696. if app.state.config.TASK_MODEL:
  697. task_model_id = app.state.config.TASK_MODEL
  698. if task_model_id in app.state.MODELS:
  699. model_id = task_model_id
  700. else:
  701. if app.state.config.TASK_MODEL_EXTERNAL:
  702. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  703. if task_model_id in app.state.MODELS:
  704. model_id = task_model_id
  705. print(model_id)
  706. model = app.state.MODELS[model_id]
  707. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  708. content = search_query_generation_template(
  709. template, form_data["prompt"], {"name": user.name}
  710. )
  711. payload = {
  712. "model": model_id,
  713. "messages": [{"role": "user", "content": content}],
  714. "stream": False,
  715. "max_tokens": 30,
  716. "task": True,
  717. }
  718. print(payload)
  719. try:
  720. payload = filter_pipeline(payload, user)
  721. except Exception as e:
  722. return JSONResponse(
  723. status_code=e.args[0],
  724. content={"detail": e.args[1]},
  725. )
  726. if model["owned_by"] == "ollama":
  727. return await generate_ollama_chat_completion(payload, user=user)
  728. else:
  729. return await generate_openai_chat_completion(payload, user=user)
  730. @app.post("/api/task/emoji/completions")
  731. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  732. print("generate_emoji")
  733. model_id = form_data["model"]
  734. if model_id not in app.state.MODELS:
  735. raise HTTPException(
  736. status_code=status.HTTP_404_NOT_FOUND,
  737. detail="Model not found",
  738. )
  739. # Check if the user has a custom task model
  740. # If the user has a custom task model, use that model
  741. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  742. if app.state.config.TASK_MODEL:
  743. task_model_id = app.state.config.TASK_MODEL
  744. if task_model_id in app.state.MODELS:
  745. model_id = task_model_id
  746. else:
  747. if app.state.config.TASK_MODEL_EXTERNAL:
  748. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  749. if task_model_id in app.state.MODELS:
  750. model_id = task_model_id
  751. print(model_id)
  752. model = app.state.MODELS[model_id]
  753. template = '''
  754. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  755. Message: """{{prompt}}"""
  756. '''
  757. content = title_generation_template(
  758. template,
  759. form_data["prompt"],
  760. {
  761. "name": user.name,
  762. "location": user.info.get("location") if user.info else None,
  763. },
  764. )
  765. payload = {
  766. "model": model_id,
  767. "messages": [{"role": "user", "content": content}],
  768. "stream": False,
  769. "max_tokens": 4,
  770. "chat_id": form_data.get("chat_id", None),
  771. "task": True,
  772. }
  773. log.debug(payload)
  774. try:
  775. payload = filter_pipeline(payload, user)
  776. except Exception as e:
  777. return JSONResponse(
  778. status_code=e.args[0],
  779. content={"detail": e.args[1]},
  780. )
  781. if model["owned_by"] == "ollama":
  782. return await generate_ollama_chat_completion(payload, user=user)
  783. else:
  784. return await generate_openai_chat_completion(payload, user=user)
  785. @app.post("/api/task/tools/completions")
  786. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  787. print("get_tools_function_calling")
  788. model_id = form_data["model"]
  789. if model_id not in app.state.MODELS:
  790. raise HTTPException(
  791. status_code=status.HTTP_404_NOT_FOUND,
  792. detail="Model not found",
  793. )
  794. # Check if the user has a custom task model
  795. # If the user has a custom task model, use that model
  796. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  797. if app.state.config.TASK_MODEL:
  798. task_model_id = app.state.config.TASK_MODEL
  799. if task_model_id in app.state.MODELS:
  800. model_id = task_model_id
  801. else:
  802. if app.state.config.TASK_MODEL_EXTERNAL:
  803. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  804. if task_model_id in app.state.MODELS:
  805. model_id = task_model_id
  806. print(model_id)
  807. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  808. try:
  809. context, file_handler = await get_function_call_response(
  810. form_data["messages"],
  811. form_data.get("files", []),
  812. form_data["tool_id"],
  813. template,
  814. model_id,
  815. user,
  816. )
  817. return context
  818. except Exception as e:
  819. return JSONResponse(
  820. status_code=e.args[0],
  821. content={"detail": e.args[1]},
  822. )
  823. @app.post("/api/chat/completions")
  824. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  825. model_id = form_data["model"]
  826. if model_id not in app.state.MODELS:
  827. raise HTTPException(
  828. status_code=status.HTTP_404_NOT_FOUND,
  829. detail="Model not found",
  830. )
  831. model = app.state.MODELS[model_id]
  832. print(model)
  833. if model["owned_by"] == "ollama":
  834. return await generate_ollama_chat_completion(form_data, user=user)
  835. else:
  836. return await generate_openai_chat_completion(form_data, user=user)
  837. @app.post("/api/chat/completed")
  838. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  839. data = form_data
  840. model_id = data["model"]
  841. filters = [
  842. model
  843. for model in app.state.MODELS.values()
  844. if "pipeline" in model
  845. and "type" in model["pipeline"]
  846. and model["pipeline"]["type"] == "filter"
  847. and (
  848. model["pipeline"]["pipelines"] == ["*"]
  849. or any(
  850. model_id == target_model_id
  851. for target_model_id in model["pipeline"]["pipelines"]
  852. )
  853. )
  854. ]
  855. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  856. print(model_id)
  857. if model_id in app.state.MODELS:
  858. model = app.state.MODELS[model_id]
  859. if "pipeline" in model:
  860. sorted_filters = [model] + sorted_filters
  861. for filter in sorted_filters:
  862. r = None
  863. try:
  864. urlIdx = filter["urlIdx"]
  865. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  866. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  867. if key != "":
  868. headers = {"Authorization": f"Bearer {key}"}
  869. r = requests.post(
  870. f"{url}/{filter['id']}/filter/outlet",
  871. headers=headers,
  872. json={
  873. "user": {"id": user.id, "name": user.name, "role": user.role},
  874. "body": data,
  875. },
  876. )
  877. r.raise_for_status()
  878. data = r.json()
  879. except Exception as e:
  880. # Handle connection error here
  881. print(f"Connection error: {e}")
  882. if r is not None:
  883. try:
  884. res = r.json()
  885. if "detail" in res:
  886. return JSONResponse(
  887. status_code=r.status_code,
  888. content=res,
  889. )
  890. except:
  891. pass
  892. else:
  893. pass
  894. return data
  895. @app.get("/api/pipelines/list")
  896. async def get_pipelines_list(user=Depends(get_admin_user)):
  897. responses = await get_openai_models(raw=True)
  898. print(responses)
  899. urlIdxs = [
  900. idx
  901. for idx, response in enumerate(responses)
  902. if response != None and "pipelines" in response
  903. ]
  904. return {
  905. "data": [
  906. {
  907. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  908. "idx": urlIdx,
  909. }
  910. for urlIdx in urlIdxs
  911. ]
  912. }
  913. @app.post("/api/pipelines/upload")
  914. async def upload_pipeline(
  915. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  916. ):
  917. print("upload_pipeline", urlIdx, file.filename)
  918. # Check if the uploaded file is a python file
  919. if not file.filename.endswith(".py"):
  920. raise HTTPException(
  921. status_code=status.HTTP_400_BAD_REQUEST,
  922. detail="Only Python (.py) files are allowed.",
  923. )
  924. upload_folder = f"{CACHE_DIR}/pipelines"
  925. os.makedirs(upload_folder, exist_ok=True)
  926. file_path = os.path.join(upload_folder, file.filename)
  927. try:
  928. # Save the uploaded file
  929. with open(file_path, "wb") as buffer:
  930. shutil.copyfileobj(file.file, buffer)
  931. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  932. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  933. headers = {"Authorization": f"Bearer {key}"}
  934. with open(file_path, "rb") as f:
  935. files = {"file": f}
  936. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  937. r.raise_for_status()
  938. data = r.json()
  939. return {**data}
  940. except Exception as e:
  941. # Handle connection error here
  942. print(f"Connection error: {e}")
  943. detail = "Pipeline not found"
  944. if r is not None:
  945. try:
  946. res = r.json()
  947. if "detail" in res:
  948. detail = res["detail"]
  949. except:
  950. pass
  951. raise HTTPException(
  952. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  953. detail=detail,
  954. )
  955. finally:
  956. # Ensure the file is deleted after the upload is completed or on failure
  957. if os.path.exists(file_path):
  958. os.remove(file_path)
  959. class AddPipelineForm(BaseModel):
  960. url: str
  961. urlIdx: int
  962. @app.post("/api/pipelines/add")
  963. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  964. r = None
  965. try:
  966. urlIdx = form_data.urlIdx
  967. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  968. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  969. headers = {"Authorization": f"Bearer {key}"}
  970. r = requests.post(
  971. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  972. )
  973. r.raise_for_status()
  974. data = r.json()
  975. return {**data}
  976. except Exception as e:
  977. # Handle connection error here
  978. print(f"Connection error: {e}")
  979. detail = "Pipeline not found"
  980. if r is not None:
  981. try:
  982. res = r.json()
  983. if "detail" in res:
  984. detail = res["detail"]
  985. except:
  986. pass
  987. raise HTTPException(
  988. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  989. detail=detail,
  990. )
  991. class DeletePipelineForm(BaseModel):
  992. id: str
  993. urlIdx: int
  994. @app.delete("/api/pipelines/delete")
  995. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  996. r = None
  997. try:
  998. urlIdx = form_data.urlIdx
  999. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1000. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1001. headers = {"Authorization": f"Bearer {key}"}
  1002. r = requests.delete(
  1003. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1004. )
  1005. r.raise_for_status()
  1006. data = r.json()
  1007. return {**data}
  1008. except Exception as e:
  1009. # Handle connection error here
  1010. print(f"Connection error: {e}")
  1011. detail = "Pipeline not found"
  1012. if r is not None:
  1013. try:
  1014. res = r.json()
  1015. if "detail" in res:
  1016. detail = res["detail"]
  1017. except:
  1018. pass
  1019. raise HTTPException(
  1020. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1021. detail=detail,
  1022. )
  1023. @app.get("/api/pipelines")
  1024. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1025. r = None
  1026. try:
  1027. urlIdx
  1028. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1029. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1030. headers = {"Authorization": f"Bearer {key}"}
  1031. r = requests.get(f"{url}/pipelines", headers=headers)
  1032. r.raise_for_status()
  1033. data = r.json()
  1034. return {**data}
  1035. except Exception as e:
  1036. # Handle connection error here
  1037. print(f"Connection error: {e}")
  1038. detail = "Pipeline not found"
  1039. if r is not None:
  1040. try:
  1041. res = r.json()
  1042. if "detail" in res:
  1043. detail = res["detail"]
  1044. except:
  1045. pass
  1046. raise HTTPException(
  1047. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1048. detail=detail,
  1049. )
  1050. @app.get("/api/pipelines/{pipeline_id}/valves")
  1051. async def get_pipeline_valves(
  1052. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1053. ):
  1054. models = await get_all_models()
  1055. r = None
  1056. try:
  1057. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1058. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1059. headers = {"Authorization": f"Bearer {key}"}
  1060. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1061. r.raise_for_status()
  1062. data = r.json()
  1063. return {**data}
  1064. except Exception as e:
  1065. # Handle connection error here
  1066. print(f"Connection error: {e}")
  1067. detail = "Pipeline not found"
  1068. if r is not None:
  1069. try:
  1070. res = r.json()
  1071. if "detail" in res:
  1072. detail = res["detail"]
  1073. except:
  1074. pass
  1075. raise HTTPException(
  1076. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1077. detail=detail,
  1078. )
  1079. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1080. async def get_pipeline_valves_spec(
  1081. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1082. ):
  1083. models = await get_all_models()
  1084. r = None
  1085. try:
  1086. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1087. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1088. headers = {"Authorization": f"Bearer {key}"}
  1089. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1090. r.raise_for_status()
  1091. data = r.json()
  1092. return {**data}
  1093. except Exception as e:
  1094. # Handle connection error here
  1095. print(f"Connection error: {e}")
  1096. detail = "Pipeline not found"
  1097. if r is not None:
  1098. try:
  1099. res = r.json()
  1100. if "detail" in res:
  1101. detail = res["detail"]
  1102. except:
  1103. pass
  1104. raise HTTPException(
  1105. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1106. detail=detail,
  1107. )
  1108. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1109. async def update_pipeline_valves(
  1110. urlIdx: Optional[int],
  1111. pipeline_id: str,
  1112. form_data: dict,
  1113. user=Depends(get_admin_user),
  1114. ):
  1115. models = await get_all_models()
  1116. r = None
  1117. try:
  1118. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1119. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1120. headers = {"Authorization": f"Bearer {key}"}
  1121. r = requests.post(
  1122. f"{url}/{pipeline_id}/valves/update",
  1123. headers=headers,
  1124. json={**form_data},
  1125. )
  1126. r.raise_for_status()
  1127. data = r.json()
  1128. return {**data}
  1129. except Exception as e:
  1130. # Handle connection error here
  1131. print(f"Connection error: {e}")
  1132. detail = "Pipeline not found"
  1133. if r is not None:
  1134. try:
  1135. res = r.json()
  1136. if "detail" in res:
  1137. detail = res["detail"]
  1138. except:
  1139. pass
  1140. raise HTTPException(
  1141. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1142. detail=detail,
  1143. )
  1144. @app.get("/api/config")
  1145. async def get_app_config():
  1146. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  1147. default_locale = "en-US"
  1148. if "ui" in CONFIG_DATA:
  1149. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  1150. # The Rest of the Function Now Uses the Variables Defined Above
  1151. return {
  1152. "status": True,
  1153. "name": WEBUI_NAME,
  1154. "version": VERSION,
  1155. "default_locale": default_locale,
  1156. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1157. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1158. "features": {
  1159. "auth": WEBUI_AUTH,
  1160. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1161. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1162. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1163. "enable_image_generation": images_app.state.config.ENABLED,
  1164. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1165. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1166. },
  1167. "audio": {
  1168. "tts": {
  1169. "engine": audio_app.state.config.TTS_ENGINE,
  1170. "voice": audio_app.state.config.TTS_VOICE,
  1171. },
  1172. "stt": {
  1173. "engine": audio_app.state.config.STT_ENGINE,
  1174. },
  1175. },
  1176. }
  1177. @app.get("/api/config/model/filter")
  1178. async def get_model_filter_config(user=Depends(get_admin_user)):
  1179. return {
  1180. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1181. "models": app.state.config.MODEL_FILTER_LIST,
  1182. }
  1183. class ModelFilterConfigForm(BaseModel):
  1184. enabled: bool
  1185. models: List[str]
  1186. @app.post("/api/config/model/filter")
  1187. async def update_model_filter_config(
  1188. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1189. ):
  1190. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1191. app.state.config.MODEL_FILTER_LIST = form_data.models
  1192. return {
  1193. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1194. "models": app.state.config.MODEL_FILTER_LIST,
  1195. }
  1196. @app.get("/api/webhook")
  1197. async def get_webhook_url(user=Depends(get_admin_user)):
  1198. return {
  1199. "url": app.state.config.WEBHOOK_URL,
  1200. }
  1201. class UrlForm(BaseModel):
  1202. url: str
  1203. @app.post("/api/webhook")
  1204. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1205. app.state.config.WEBHOOK_URL = form_data.url
  1206. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1207. return {"url": app.state.config.WEBHOOK_URL}
  1208. @app.get("/api/version")
  1209. async def get_app_config():
  1210. return {
  1211. "version": VERSION,
  1212. }
  1213. @app.get("/api/changelog")
  1214. async def get_app_changelog():
  1215. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1216. @app.get("/api/version/updates")
  1217. async def get_app_latest_release_version():
  1218. try:
  1219. async with aiohttp.ClientSession(trust_env=True) as session:
  1220. async with session.get(
  1221. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1222. ) as response:
  1223. response.raise_for_status()
  1224. data = await response.json()
  1225. latest_version = data["tag_name"]
  1226. return {"current": VERSION, "latest": latest_version[1:]}
  1227. except aiohttp.ClientError as e:
  1228. raise HTTPException(
  1229. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1230. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1231. )
  1232. @app.get("/manifest.json")
  1233. async def get_manifest_json():
  1234. return {
  1235. "name": WEBUI_NAME,
  1236. "short_name": WEBUI_NAME,
  1237. "start_url": "/",
  1238. "display": "standalone",
  1239. "background_color": "#343541",
  1240. "theme_color": "#343541",
  1241. "orientation": "portrait-primary",
  1242. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1243. }
  1244. @app.get("/opensearch.xml")
  1245. async def get_opensearch_xml():
  1246. xml_content = rf"""
  1247. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1248. <ShortName>{WEBUI_NAME}</ShortName>
  1249. <Description>Search {WEBUI_NAME}</Description>
  1250. <InputEncoding>UTF-8</InputEncoding>
  1251. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1252. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1253. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1254. </OpenSearchDescription>
  1255. """
  1256. return Response(content=xml_content, media_type="application/xml")
  1257. @app.get("/health")
  1258. async def healthcheck():
  1259. return {"status": True}
  1260. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1261. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1262. if os.path.exists(FRONTEND_BUILD_DIR):
  1263. mimetypes.add_type("text/javascript", ".js")
  1264. app.mount(
  1265. "/",
  1266. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1267. name="spa-static-files",
  1268. )
  1269. else:
  1270. log.warning(
  1271. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1272. )