main.py 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662
  1. import asyncio
  2. import inspect
  3. import json
  4. import logging
  5. import mimetypes
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. import random
  11. from contextlib import asynccontextmanager
  12. from typing import Optional
  13. from aiocache import cached
  14. import aiohttp
  15. import requests
  16. from fastapi import (
  17. Depends,
  18. FastAPI,
  19. File,
  20. Form,
  21. HTTPException,
  22. Request,
  23. UploadFile,
  24. status,
  25. )
  26. from fastapi.middleware.cors import CORSMiddleware
  27. from fastapi.responses import JSONResponse, RedirectResponse
  28. from fastapi.staticfiles import StaticFiles
  29. from pydantic import BaseModel
  30. from sqlalchemy import text
  31. from starlette.exceptions import HTTPException as StarletteHTTPException
  32. from starlette.middleware.base import BaseHTTPMiddleware
  33. from starlette.middleware.sessions import SessionMiddleware
  34. from starlette.responses import Response, StreamingResponse
  35. from open_webui.apps.audio.main import app as audio_app
  36. from open_webui.apps.images.main import app as images_app
  37. from open_webui.apps.ollama.main import (
  38. app as ollama_app,
  39. get_all_models as get_ollama_models,
  40. generate_chat_completion as generate_ollama_chat_completion,
  41. GenerateChatCompletionForm,
  42. )
  43. from open_webui.apps.openai.main import (
  44. app as openai_app,
  45. generate_chat_completion as generate_openai_chat_completion,
  46. get_all_models as get_openai_models,
  47. get_all_models_responses as get_openai_models_responses,
  48. )
  49. from open_webui.apps.retrieval.main import app as retrieval_app
  50. from open_webui.apps.retrieval.utils import get_sources_from_files, rag_template
  51. from open_webui.apps.socket.main import (
  52. app as socket_app,
  53. periodic_usage_pool_cleanup,
  54. get_event_call,
  55. get_event_emitter,
  56. )
  57. from open_webui.apps.webui.internal.db import Session
  58. from open_webui.apps.webui.main import (
  59. app as webui_app,
  60. generate_function_chat_completion,
  61. get_all_models as get_open_webui_models,
  62. )
  63. from open_webui.apps.webui.models.functions import Functions
  64. from open_webui.apps.webui.models.models import Models
  65. from open_webui.apps.webui.models.users import UserModel, Users
  66. from open_webui.apps.webui.utils import load_function_module_by_id
  67. from open_webui.config import (
  68. CACHE_DIR,
  69. CORS_ALLOW_ORIGIN,
  70. DEFAULT_LOCALE,
  71. ENABLE_ADMIN_CHAT_ACCESS,
  72. ENABLE_ADMIN_EXPORT,
  73. ENABLE_OLLAMA_API,
  74. ENABLE_OPENAI_API,
  75. ENABLE_TAGS_GENERATION,
  76. ENV,
  77. FRONTEND_BUILD_DIR,
  78. OAUTH_PROVIDERS,
  79. STATIC_DIR,
  80. TASK_MODEL,
  81. TASK_MODEL_EXTERNAL,
  82. ENABLE_SEARCH_QUERY_GENERATION,
  83. ENABLE_RETRIEVAL_QUERY_GENERATION,
  84. QUERY_GENERATION_PROMPT_TEMPLATE,
  85. DEFAULT_QUERY_GENERATION_PROMPT_TEMPLATE,
  86. TITLE_GENERATION_PROMPT_TEMPLATE,
  87. TAGS_GENERATION_PROMPT_TEMPLATE,
  88. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  89. WEBHOOK_URL,
  90. WEBUI_AUTH,
  91. WEBUI_NAME,
  92. AppConfig,
  93. reset_config,
  94. )
  95. from open_webui.constants import TASKS
  96. from open_webui.env import (
  97. CHANGELOG,
  98. GLOBAL_LOG_LEVEL,
  99. SAFE_MODE,
  100. SRC_LOG_LEVELS,
  101. VERSION,
  102. WEBUI_BUILD_HASH,
  103. WEBUI_SECRET_KEY,
  104. WEBUI_SESSION_COOKIE_SAME_SITE,
  105. WEBUI_SESSION_COOKIE_SECURE,
  106. WEBUI_URL,
  107. RESET_CONFIG_ON_START,
  108. OFFLINE_MODE,
  109. )
  110. from open_webui.utils.misc import (
  111. add_or_update_system_message,
  112. get_last_user_message,
  113. prepend_to_first_user_message_content,
  114. )
  115. from open_webui.utils.oauth import oauth_manager
  116. from open_webui.utils.payload import convert_payload_openai_to_ollama
  117. from open_webui.utils.response import (
  118. convert_response_ollama_to_openai,
  119. convert_streaming_response_ollama_to_openai,
  120. )
  121. from open_webui.utils.security_headers import SecurityHeadersMiddleware
  122. from open_webui.utils.task import (
  123. moa_response_generation_template,
  124. tags_generation_template,
  125. query_generation_template,
  126. emoji_generation_template,
  127. title_generation_template,
  128. tools_function_calling_generation_template,
  129. )
  130. from open_webui.utils.tools import get_tools
  131. from open_webui.utils.utils import (
  132. decode_token,
  133. get_admin_user,
  134. get_current_user,
  135. get_http_authorization_cred,
  136. get_verified_user,
  137. )
  138. from open_webui.utils.access_control import has_access
  139. if SAFE_MODE:
  140. print("SAFE MODE ENABLED")
  141. Functions.deactivate_all_functions()
  142. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  143. log = logging.getLogger(__name__)
  144. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  145. class SPAStaticFiles(StaticFiles):
  146. async def get_response(self, path: str, scope):
  147. try:
  148. return await super().get_response(path, scope)
  149. except (HTTPException, StarletteHTTPException) as ex:
  150. if ex.status_code == 404:
  151. return await super().get_response("index.html", scope)
  152. else:
  153. raise ex
  154. print(
  155. rf"""
  156. ___ __ __ _ _ _ ___
  157. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  158. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  159. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  160. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  161. |_|
  162. v{VERSION} - building the best open-source AI user interface.
  163. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  164. https://github.com/open-webui/open-webui
  165. """
  166. )
  167. @asynccontextmanager
  168. async def lifespan(app: FastAPI):
  169. if RESET_CONFIG_ON_START:
  170. reset_config()
  171. asyncio.create_task(periodic_usage_pool_cleanup())
  172. yield
  173. app = FastAPI(
  174. docs_url="/docs" if ENV == "dev" else None,
  175. openapi_url="/openapi.json" if ENV == "dev" else None,
  176. redoc_url=None,
  177. lifespan=lifespan,
  178. )
  179. app.state.config = AppConfig()
  180. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  181. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  182. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  183. app.state.config.TASK_MODEL = TASK_MODEL
  184. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  185. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  186. app.state.config.ENABLE_TAGS_GENERATION = ENABLE_TAGS_GENERATION
  187. app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE = TAGS_GENERATION_PROMPT_TEMPLATE
  188. app.state.config.ENABLE_SEARCH_QUERY_GENERATION = ENABLE_SEARCH_QUERY_GENERATION
  189. app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION = ENABLE_RETRIEVAL_QUERY_GENERATION
  190. app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE = QUERY_GENERATION_PROMPT_TEMPLATE
  191. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  192. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  193. )
  194. ##################################
  195. #
  196. # ChatCompletion Middleware
  197. #
  198. ##################################
  199. def get_filter_function_ids(model):
  200. def get_priority(function_id):
  201. function = Functions.get_function_by_id(function_id)
  202. if function is not None and hasattr(function, "valves"):
  203. # TODO: Fix FunctionModel
  204. return (function.valves if function.valves else {}).get("priority", 0)
  205. return 0
  206. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  207. if "info" in model and "meta" in model["info"]:
  208. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  209. filter_ids = list(set(filter_ids))
  210. enabled_filter_ids = [
  211. function.id
  212. for function in Functions.get_functions_by_type("filter", active_only=True)
  213. ]
  214. filter_ids = [
  215. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  216. ]
  217. filter_ids.sort(key=get_priority)
  218. return filter_ids
  219. async def chat_completion_filter_functions_handler(body, model, extra_params):
  220. skip_files = None
  221. filter_ids = get_filter_function_ids(model)
  222. for filter_id in filter_ids:
  223. filter = Functions.get_function_by_id(filter_id)
  224. if not filter:
  225. continue
  226. if filter_id in webui_app.state.FUNCTIONS:
  227. function_module = webui_app.state.FUNCTIONS[filter_id]
  228. else:
  229. function_module, _, _ = load_function_module_by_id(filter_id)
  230. webui_app.state.FUNCTIONS[filter_id] = function_module
  231. # Check if the function has a file_handler variable
  232. if hasattr(function_module, "file_handler"):
  233. skip_files = function_module.file_handler
  234. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  235. valves = Functions.get_function_valves_by_id(filter_id)
  236. function_module.valves = function_module.Valves(
  237. **(valves if valves else {})
  238. )
  239. if not hasattr(function_module, "inlet"):
  240. continue
  241. try:
  242. inlet = function_module.inlet
  243. # Get the signature of the function
  244. sig = inspect.signature(inlet)
  245. params = {"body": body} | {
  246. k: v
  247. for k, v in {
  248. **extra_params,
  249. "__model__": model,
  250. "__id__": filter_id,
  251. }.items()
  252. if k in sig.parameters
  253. }
  254. if "__user__" in params and hasattr(function_module, "UserValves"):
  255. try:
  256. params["__user__"]["valves"] = function_module.UserValves(
  257. **Functions.get_user_valves_by_id_and_user_id(
  258. filter_id, params["__user__"]["id"]
  259. )
  260. )
  261. except Exception as e:
  262. print(e)
  263. if inspect.iscoroutinefunction(inlet):
  264. body = await inlet(**params)
  265. else:
  266. body = inlet(**params)
  267. except Exception as e:
  268. print(f"Error: {e}")
  269. raise e
  270. if skip_files and "files" in body.get("metadata", {}):
  271. del body["metadata"]["files"]
  272. return body, {}
  273. def get_tools_function_calling_payload(messages, task_model_id, content):
  274. user_message = get_last_user_message(messages)
  275. history = "\n".join(
  276. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  277. for message in messages[::-1][:4]
  278. )
  279. prompt = f"History:\n{history}\nQuery: {user_message}"
  280. return {
  281. "model": task_model_id,
  282. "messages": [
  283. {"role": "system", "content": content},
  284. {"role": "user", "content": f"Query: {prompt}"},
  285. ],
  286. "stream": False,
  287. "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
  288. }
  289. async def get_content_from_response(response) -> Optional[str]:
  290. content = None
  291. if hasattr(response, "body_iterator"):
  292. async for chunk in response.body_iterator:
  293. data = json.loads(chunk.decode("utf-8"))
  294. content = data["choices"][0]["message"]["content"]
  295. # Cleanup any remaining background tasks if necessary
  296. if response.background is not None:
  297. await response.background()
  298. else:
  299. content = response["choices"][0]["message"]["content"]
  300. return content
  301. def get_task_model_id(
  302. default_model_id: str, task_model: str, task_model_external: str, models
  303. ) -> str:
  304. # Set the task model
  305. task_model_id = default_model_id
  306. # Check if the user has a custom task model and use that model
  307. if models[task_model_id]["owned_by"] == "ollama":
  308. if task_model and task_model in models:
  309. task_model_id = task_model
  310. else:
  311. if task_model_external and task_model_external in models:
  312. task_model_id = task_model_external
  313. return task_model_id
  314. async def chat_completion_tools_handler(
  315. body: dict, user: UserModel, models, extra_params: dict
  316. ) -> tuple[dict, dict]:
  317. # If tool_ids field is present, call the functions
  318. metadata = body.get("metadata", {})
  319. tool_ids = metadata.get("tool_ids", None)
  320. log.debug(f"{tool_ids=}")
  321. if not tool_ids:
  322. return body, {}
  323. skip_files = False
  324. sources = []
  325. task_model_id = get_task_model_id(
  326. body["model"],
  327. app.state.config.TASK_MODEL,
  328. app.state.config.TASK_MODEL_EXTERNAL,
  329. models,
  330. )
  331. tools = get_tools(
  332. webui_app,
  333. tool_ids,
  334. user,
  335. {
  336. **extra_params,
  337. "__model__": models[task_model_id],
  338. "__messages__": body["messages"],
  339. "__files__": metadata.get("files", []),
  340. },
  341. )
  342. log.info(f"{tools=}")
  343. specs = [tool["spec"] for tool in tools.values()]
  344. tools_specs = json.dumps(specs)
  345. if app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE != "":
  346. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  347. else:
  348. template = """Available Tools: {{TOOLS}}\nReturn an empty string if no tools match the query. If a function tool matches, construct and return a JSON object in the format {\"name\": \"functionName\", \"parameters\": {\"requiredFunctionParamKey\": \"requiredFunctionParamValue\"}} using the appropriate tool and its parameters. Only return the object and limit the response to the JSON object without additional text."""
  349. tools_function_calling_prompt = tools_function_calling_generation_template(
  350. template, tools_specs
  351. )
  352. log.info(f"{tools_function_calling_prompt=}")
  353. payload = get_tools_function_calling_payload(
  354. body["messages"], task_model_id, tools_function_calling_prompt
  355. )
  356. try:
  357. payload = filter_pipeline(payload, user, models)
  358. except Exception as e:
  359. raise e
  360. try:
  361. response = await generate_chat_completions(form_data=payload, user=user)
  362. log.debug(f"{response=}")
  363. content = await get_content_from_response(response)
  364. log.debug(f"{content=}")
  365. if not content:
  366. return body, {}
  367. try:
  368. content = content[content.find("{") : content.rfind("}") + 1]
  369. if not content:
  370. raise Exception("No JSON object found in the response")
  371. result = json.loads(content)
  372. tool_function_name = result.get("name", None)
  373. if tool_function_name not in tools:
  374. return body, {}
  375. tool_function_params = result.get("parameters", {})
  376. try:
  377. required_params = (
  378. tools[tool_function_name]
  379. .get("spec", {})
  380. .get("parameters", {})
  381. .get("required", [])
  382. )
  383. tool_function = tools[tool_function_name]["callable"]
  384. tool_function_params = {
  385. k: v
  386. for k, v in tool_function_params.items()
  387. if k in required_params
  388. }
  389. tool_output = await tool_function(**tool_function_params)
  390. except Exception as e:
  391. tool_output = str(e)
  392. print(tools[tool_function_name]["citation"])
  393. if isinstance(tool_output, str):
  394. if tools[tool_function_name]["citation"]:
  395. sources.append(
  396. {
  397. "source": {
  398. "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  399. },
  400. "document": [tool_output],
  401. "metadata": [
  402. {
  403. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  404. }
  405. ],
  406. }
  407. )
  408. else:
  409. sources.append(
  410. {
  411. "source": {},
  412. "document": [tool_output],
  413. "metadata": [
  414. {
  415. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  416. }
  417. ],
  418. }
  419. )
  420. if tools[tool_function_name]["file_handler"]:
  421. skip_files = True
  422. except Exception as e:
  423. log.exception(f"Error: {e}")
  424. content = None
  425. except Exception as e:
  426. log.exception(f"Error: {e}")
  427. content = None
  428. log.debug(f"tool_contexts: {sources}")
  429. if skip_files and "files" in body.get("metadata", {}):
  430. del body["metadata"]["files"]
  431. return body, {"sources": sources}
  432. async def chat_completion_files_handler(
  433. body: dict, user: UserModel
  434. ) -> tuple[dict, dict[str, list]]:
  435. sources = []
  436. if files := body.get("metadata", {}).get("files", None):
  437. try:
  438. queries_response = await generate_queries(
  439. {
  440. "model": body["model"],
  441. "messages": body["messages"],
  442. "type": "retrieval",
  443. },
  444. user,
  445. )
  446. queries_response = queries_response["choices"][0]["message"]["content"]
  447. try:
  448. queries_response = json.loads(queries_response)
  449. except Exception as e:
  450. queries_response = {"queries": []}
  451. queries = queries_response.get("queries", [])
  452. except Exception as e:
  453. queries = []
  454. if len(queries) == 0:
  455. queries = [get_last_user_message(body["messages"])]
  456. sources = get_sources_from_files(
  457. files=files,
  458. queries=queries,
  459. embedding_function=retrieval_app.state.EMBEDDING_FUNCTION,
  460. k=retrieval_app.state.config.TOP_K,
  461. reranking_function=retrieval_app.state.sentence_transformer_rf,
  462. r=retrieval_app.state.config.RELEVANCE_THRESHOLD,
  463. hybrid_search=retrieval_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  464. )
  465. log.debug(f"rag_contexts:sources: {sources}")
  466. return body, {"sources": sources}
  467. def is_chat_completion_request(request):
  468. return request.method == "POST" and any(
  469. endpoint in request.url.path
  470. for endpoint in ["/ollama/api/chat", "/chat/completions"]
  471. )
  472. async def get_body_and_model_and_user(request, models):
  473. # Read the original request body
  474. body = await request.body()
  475. body_str = body.decode("utf-8")
  476. body = json.loads(body_str) if body_str else {}
  477. model_id = body["model"]
  478. if model_id not in models:
  479. raise Exception("Model not found")
  480. model = models[model_id]
  481. user = get_current_user(
  482. request,
  483. get_http_authorization_cred(request.headers.get("Authorization")),
  484. )
  485. return body, model, user
  486. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  487. async def dispatch(self, request: Request, call_next):
  488. if not is_chat_completion_request(request):
  489. return await call_next(request)
  490. log.debug(f"request.url.path: {request.url.path}")
  491. model_list = await get_all_models()
  492. models = {model["id"]: model for model in model_list}
  493. try:
  494. body, model, user = await get_body_and_model_and_user(request, models)
  495. except Exception as e:
  496. return JSONResponse(
  497. status_code=status.HTTP_400_BAD_REQUEST,
  498. content={"detail": str(e)},
  499. )
  500. model_info = Models.get_model_by_id(model["id"])
  501. if user.role == "user":
  502. if model.get("arena"):
  503. if not has_access(
  504. user.id,
  505. type="read",
  506. access_control=model.get("info", {})
  507. .get("meta", {})
  508. .get("access_control", {}),
  509. ):
  510. raise HTTPException(
  511. status_code=403,
  512. detail="Model not found",
  513. )
  514. else:
  515. if not model_info:
  516. return JSONResponse(
  517. status_code=status.HTTP_404_NOT_FOUND,
  518. content={"detail": "Model not found"},
  519. )
  520. elif not (
  521. user.id == model_info.user_id
  522. or has_access(
  523. user.id, type="read", access_control=model_info.access_control
  524. )
  525. ):
  526. return JSONResponse(
  527. status_code=status.HTTP_403_FORBIDDEN,
  528. content={"detail": "User does not have access to the model"},
  529. )
  530. metadata = {
  531. "chat_id": body.pop("chat_id", None),
  532. "message_id": body.pop("id", None),
  533. "session_id": body.pop("session_id", None),
  534. "tool_ids": body.get("tool_ids", None),
  535. "files": body.get("files", None),
  536. }
  537. body["metadata"] = metadata
  538. extra_params = {
  539. "__event_emitter__": get_event_emitter(metadata),
  540. "__event_call__": get_event_call(metadata),
  541. "__user__": {
  542. "id": user.id,
  543. "email": user.email,
  544. "name": user.name,
  545. "role": user.role,
  546. },
  547. "__metadata__": metadata,
  548. }
  549. # Initialize data_items to store additional data to be sent to the client
  550. # Initialize contexts and citation
  551. data_items = []
  552. sources = []
  553. try:
  554. body, flags = await chat_completion_filter_functions_handler(
  555. body, model, extra_params
  556. )
  557. except Exception as e:
  558. return JSONResponse(
  559. status_code=status.HTTP_400_BAD_REQUEST,
  560. content={"detail": str(e)},
  561. )
  562. tool_ids = body.pop("tool_ids", None)
  563. files = body.pop("files", None)
  564. metadata = {
  565. **metadata,
  566. "tool_ids": tool_ids,
  567. "files": files,
  568. }
  569. body["metadata"] = metadata
  570. try:
  571. body, flags = await chat_completion_tools_handler(
  572. body, user, models, extra_params
  573. )
  574. sources.extend(flags.get("sources", []))
  575. except Exception as e:
  576. log.exception(e)
  577. try:
  578. body, flags = await chat_completion_files_handler(body, user)
  579. sources.extend(flags.get("sources", []))
  580. except Exception as e:
  581. log.exception(e)
  582. # If context is not empty, insert it into the messages
  583. if len(sources) > 0:
  584. context_string = ""
  585. for source_idx, source in enumerate(sources):
  586. source_id = source.get("source", {}).get("name", "")
  587. if "document" in source:
  588. for doc_idx, doc_context in enumerate(source["document"]):
  589. metadata = source.get("metadata")
  590. doc_source_id = None
  591. if metadata:
  592. doc_source_id = metadata[doc_idx].get("source", source_id)
  593. if source_id:
  594. context_string += f"<source><source_id>{doc_source_id if doc_source_id is not None else source_id}</source_id><source_context>{doc_context}</source_context></source>\n"
  595. else:
  596. # If there is no source_id, then do not include the source_id tag
  597. context_string += f"<source><source_context>{doc_context}</source_context></source>\n"
  598. context_string = context_string.strip()
  599. prompt = get_last_user_message(body["messages"])
  600. if prompt is None:
  601. raise Exception("No user message found")
  602. if (
  603. retrieval_app.state.config.RELEVANCE_THRESHOLD == 0
  604. and context_string.strip() == ""
  605. ):
  606. log.debug(
  607. f"With a 0 relevancy threshold for RAG, the context cannot be empty"
  608. )
  609. # Workaround for Ollama 2.0+ system prompt issue
  610. # TODO: replace with add_or_update_system_message
  611. if model["owned_by"] == "ollama":
  612. body["messages"] = prepend_to_first_user_message_content(
  613. rag_template(
  614. retrieval_app.state.config.RAG_TEMPLATE, context_string, prompt
  615. ),
  616. body["messages"],
  617. )
  618. else:
  619. body["messages"] = add_or_update_system_message(
  620. rag_template(
  621. retrieval_app.state.config.RAG_TEMPLATE, context_string, prompt
  622. ),
  623. body["messages"],
  624. )
  625. # If there are citations, add them to the data_items
  626. sources = [
  627. source for source in sources if source.get("source", {}).get("name", "")
  628. ]
  629. if len(sources) > 0:
  630. data_items.append({"sources": sources})
  631. modified_body_bytes = json.dumps(body).encode("utf-8")
  632. # Replace the request body with the modified one
  633. request._body = modified_body_bytes
  634. # Set custom header to ensure content-length matches new body length
  635. request.headers.__dict__["_list"] = [
  636. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  637. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  638. ]
  639. response = await call_next(request)
  640. if not isinstance(response, StreamingResponse):
  641. return response
  642. content_type = response.headers["Content-Type"]
  643. is_openai = "text/event-stream" in content_type
  644. is_ollama = "application/x-ndjson" in content_type
  645. if not is_openai and not is_ollama:
  646. return response
  647. def wrap_item(item):
  648. return f"data: {item}\n\n" if is_openai else f"{item}\n"
  649. async def stream_wrapper(original_generator, data_items):
  650. for item in data_items:
  651. yield wrap_item(json.dumps(item))
  652. async for data in original_generator:
  653. yield data
  654. return StreamingResponse(
  655. stream_wrapper(response.body_iterator, data_items),
  656. headers=dict(response.headers),
  657. )
  658. async def _receive(self, body: bytes):
  659. return {"type": "http.request", "body": body, "more_body": False}
  660. app.add_middleware(ChatCompletionMiddleware)
  661. ##################################
  662. #
  663. # Pipeline Middleware
  664. #
  665. ##################################
  666. def get_sorted_filters(model_id, models):
  667. filters = [
  668. model
  669. for model in models.values()
  670. if "pipeline" in model
  671. and "type" in model["pipeline"]
  672. and model["pipeline"]["type"] == "filter"
  673. and (
  674. model["pipeline"]["pipelines"] == ["*"]
  675. or any(
  676. model_id == target_model_id
  677. for target_model_id in model["pipeline"]["pipelines"]
  678. )
  679. )
  680. ]
  681. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  682. return sorted_filters
  683. def filter_pipeline(payload, user, models):
  684. user = {"id": user.id, "email": user.email, "name": user.name, "role": user.role}
  685. model_id = payload["model"]
  686. sorted_filters = get_sorted_filters(model_id, models)
  687. model = models[model_id]
  688. if "pipeline" in model:
  689. sorted_filters.append(model)
  690. for filter in sorted_filters:
  691. r = None
  692. try:
  693. urlIdx = filter["urlIdx"]
  694. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  695. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  696. if key == "":
  697. continue
  698. headers = {"Authorization": f"Bearer {key}"}
  699. r = requests.post(
  700. f"{url}/{filter['id']}/filter/inlet",
  701. headers=headers,
  702. json={
  703. "user": user,
  704. "body": payload,
  705. },
  706. )
  707. r.raise_for_status()
  708. payload = r.json()
  709. except Exception as e:
  710. # Handle connection error here
  711. print(f"Connection error: {e}")
  712. if r is not None:
  713. res = r.json()
  714. if "detail" in res:
  715. raise Exception(r.status_code, res["detail"])
  716. return payload
  717. class PipelineMiddleware(BaseHTTPMiddleware):
  718. async def dispatch(self, request: Request, call_next):
  719. if not is_chat_completion_request(request):
  720. return await call_next(request)
  721. log.debug(f"request.url.path: {request.url.path}")
  722. # Read the original request body
  723. body = await request.body()
  724. # Decode body to string
  725. body_str = body.decode("utf-8")
  726. # Parse string to JSON
  727. data = json.loads(body_str) if body_str else {}
  728. try:
  729. user = get_current_user(
  730. request,
  731. get_http_authorization_cred(request.headers["Authorization"]),
  732. )
  733. except KeyError as e:
  734. if len(e.args) > 1:
  735. return JSONResponse(
  736. status_code=e.args[0],
  737. content={"detail": e.args[1]},
  738. )
  739. else:
  740. return JSONResponse(
  741. status_code=status.HTTP_401_UNAUTHORIZED,
  742. content={"detail": "Not authenticated"},
  743. )
  744. except HTTPException as e:
  745. return JSONResponse(
  746. status_code=e.status_code,
  747. content={"detail": e.detail},
  748. )
  749. model_list = await get_all_models()
  750. models = {model["id"]: model for model in model_list}
  751. try:
  752. data = filter_pipeline(data, user, models)
  753. except Exception as e:
  754. if len(e.args) > 1:
  755. return JSONResponse(
  756. status_code=e.args[0],
  757. content={"detail": e.args[1]},
  758. )
  759. else:
  760. return JSONResponse(
  761. status_code=status.HTTP_400_BAD_REQUEST,
  762. content={"detail": str(e)},
  763. )
  764. modified_body_bytes = json.dumps(data).encode("utf-8")
  765. # Replace the request body with the modified one
  766. request._body = modified_body_bytes
  767. # Set custom header to ensure content-length matches new body length
  768. request.headers.__dict__["_list"] = [
  769. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  770. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  771. ]
  772. response = await call_next(request)
  773. return response
  774. async def _receive(self, body: bytes):
  775. return {"type": "http.request", "body": body, "more_body": False}
  776. app.add_middleware(PipelineMiddleware)
  777. from urllib.parse import urlencode, parse_qs, urlparse
  778. class RedirectMiddleware(BaseHTTPMiddleware):
  779. async def dispatch(self, request: Request, call_next):
  780. # Check if the request is a GET request
  781. if request.method == "GET":
  782. path = request.url.path
  783. query_params = dict(parse_qs(urlparse(str(request.url)).query))
  784. # Check for the specific watch path and the presence of 'v' parameter
  785. if path.endswith("/watch") and "v" in query_params:
  786. video_id = query_params["v"][0] # Extract the first 'v' parameter
  787. encoded_video_id = urlencode({"youtube": video_id})
  788. redirect_url = f"/?{encoded_video_id}"
  789. return RedirectResponse(url=redirect_url)
  790. # Proceed with the normal flow of other requests
  791. response = await call_next(request)
  792. return response
  793. # Add the middleware to the app
  794. app.add_middleware(RedirectMiddleware)
  795. app.add_middleware(
  796. CORSMiddleware,
  797. allow_origins=CORS_ALLOW_ORIGIN,
  798. allow_credentials=True,
  799. allow_methods=["*"],
  800. allow_headers=["*"],
  801. )
  802. app.add_middleware(SecurityHeadersMiddleware)
  803. @app.middleware("http")
  804. async def commit_session_after_request(request: Request, call_next):
  805. response = await call_next(request)
  806. #log.debug("Commit session after request")
  807. Session.commit()
  808. return response
  809. @app.middleware("http")
  810. async def check_url(request: Request, call_next):
  811. start_time = int(time.time())
  812. request.state.enable_api_key = webui_app.state.config.ENABLE_API_KEY
  813. response = await call_next(request)
  814. process_time = int(time.time()) - start_time
  815. response.headers["X-Process-Time"] = str(process_time)
  816. return response
  817. @app.middleware("http")
  818. async def update_embedding_function(request: Request, call_next):
  819. response = await call_next(request)
  820. if "/embedding/update" in request.url.path:
  821. webui_app.state.EMBEDDING_FUNCTION = retrieval_app.state.EMBEDDING_FUNCTION
  822. return response
  823. @app.middleware("http")
  824. async def inspect_websocket(request: Request, call_next):
  825. if (
  826. "/ws/socket.io" in request.url.path
  827. and request.query_params.get("transport") == "websocket"
  828. ):
  829. upgrade = (request.headers.get("Upgrade") or "").lower()
  830. connection = (request.headers.get("Connection") or "").lower().split(",")
  831. # Check that there's the correct headers for an upgrade, else reject the connection
  832. # This is to work around this upstream issue: https://github.com/miguelgrinberg/python-engineio/issues/367
  833. if upgrade != "websocket" or "upgrade" not in connection:
  834. return JSONResponse(
  835. status_code=status.HTTP_400_BAD_REQUEST,
  836. content={"detail": "Invalid WebSocket upgrade request"},
  837. )
  838. return await call_next(request)
  839. app.mount("/ws", socket_app)
  840. app.mount("/ollama", ollama_app)
  841. app.mount("/openai", openai_app)
  842. app.mount("/images/api/v1", images_app)
  843. app.mount("/audio/api/v1", audio_app)
  844. app.mount("/retrieval/api/v1", retrieval_app)
  845. app.mount("/api/v1", webui_app)
  846. webui_app.state.EMBEDDING_FUNCTION = retrieval_app.state.EMBEDDING_FUNCTION
  847. async def get_all_base_models():
  848. open_webui_models = []
  849. openai_models = []
  850. ollama_models = []
  851. if app.state.config.ENABLE_OPENAI_API:
  852. openai_models = await get_openai_models()
  853. openai_models = openai_models["data"]
  854. if app.state.config.ENABLE_OLLAMA_API:
  855. ollama_models = await get_ollama_models()
  856. ollama_models = [
  857. {
  858. "id": model["model"],
  859. "name": model["name"],
  860. "object": "model",
  861. "created": int(time.time()),
  862. "owned_by": "ollama",
  863. "ollama": model,
  864. }
  865. for model in ollama_models["models"]
  866. ]
  867. open_webui_models = await get_open_webui_models()
  868. models = open_webui_models + openai_models + ollama_models
  869. return models
  870. @cached(ttl=3)
  871. async def get_all_models():
  872. models = await get_all_base_models()
  873. # If there are no models, return an empty list
  874. if len([model for model in models if not model.get("arena", False)]) == 0:
  875. return []
  876. global_action_ids = [
  877. function.id for function in Functions.get_global_action_functions()
  878. ]
  879. enabled_action_ids = [
  880. function.id
  881. for function in Functions.get_functions_by_type("action", active_only=True)
  882. ]
  883. custom_models = Models.get_all_models()
  884. for custom_model in custom_models:
  885. if custom_model.base_model_id is None:
  886. for model in models:
  887. if (
  888. custom_model.id == model["id"]
  889. or custom_model.id == model["id"].split(":")[0]
  890. ):
  891. if custom_model.is_active:
  892. model["name"] = custom_model.name
  893. model["info"] = custom_model.model_dump()
  894. action_ids = []
  895. if "info" in model and "meta" in model["info"]:
  896. action_ids.extend(
  897. model["info"]["meta"].get("actionIds", [])
  898. )
  899. model["action_ids"] = action_ids
  900. else:
  901. models.remove(model)
  902. elif custom_model.is_active and (
  903. custom_model.id not in [model["id"] for model in models]
  904. ):
  905. owned_by = "openai"
  906. pipe = None
  907. action_ids = []
  908. for model in models:
  909. if (
  910. custom_model.base_model_id == model["id"]
  911. or custom_model.base_model_id == model["id"].split(":")[0]
  912. ):
  913. owned_by = model["owned_by"]
  914. if "pipe" in model:
  915. pipe = model["pipe"]
  916. break
  917. if custom_model.meta:
  918. meta = custom_model.meta.model_dump()
  919. if "actionIds" in meta:
  920. action_ids.extend(meta["actionIds"])
  921. models.append(
  922. {
  923. "id": f"{custom_model.id}",
  924. "name": custom_model.name,
  925. "object": "model",
  926. "created": custom_model.created_at,
  927. "owned_by": owned_by,
  928. "info": custom_model.model_dump(),
  929. "preset": True,
  930. **({"pipe": pipe} if pipe is not None else {}),
  931. "action_ids": action_ids,
  932. }
  933. )
  934. # Process action_ids to get the actions
  935. def get_action_items_from_module(function, module):
  936. actions = []
  937. if hasattr(module, "actions"):
  938. actions = module.actions
  939. return [
  940. {
  941. "id": f"{function.id}.{action['id']}",
  942. "name": action.get("name", f"{function.name} ({action['id']})"),
  943. "description": function.meta.description,
  944. "icon_url": action.get(
  945. "icon_url", function.meta.manifest.get("icon_url", None)
  946. ),
  947. }
  948. for action in actions
  949. ]
  950. else:
  951. return [
  952. {
  953. "id": function.id,
  954. "name": function.name,
  955. "description": function.meta.description,
  956. "icon_url": function.meta.manifest.get("icon_url", None),
  957. }
  958. ]
  959. def get_function_module_by_id(function_id):
  960. if function_id in webui_app.state.FUNCTIONS:
  961. function_module = webui_app.state.FUNCTIONS[function_id]
  962. else:
  963. function_module, _, _ = load_function_module_by_id(function_id)
  964. webui_app.state.FUNCTIONS[function_id] = function_module
  965. for model in models:
  966. action_ids = [
  967. action_id
  968. for action_id in list(set(model.pop("action_ids", []) + global_action_ids))
  969. if action_id in enabled_action_ids
  970. ]
  971. model["actions"] = []
  972. for action_id in action_ids:
  973. action_function = Functions.get_function_by_id(action_id)
  974. if action_function is None:
  975. raise Exception(f"Action not found: {action_id}")
  976. function_module = get_function_module_by_id(action_id)
  977. model["actions"].extend(
  978. get_action_items_from_module(action_function, function_module)
  979. )
  980. log.debug(f"get_all_models() returned {len(models)} models")
  981. return models
  982. @app.get("/api/models")
  983. async def get_models(user=Depends(get_verified_user)):
  984. models = await get_all_models()
  985. # Filter out filter pipelines
  986. models = [
  987. model
  988. for model in models
  989. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  990. ]
  991. # Filter out models that the user does not have access to
  992. if user.role == "user":
  993. filtered_models = []
  994. for model in models:
  995. if model.get("arena"):
  996. if has_access(
  997. user.id,
  998. type="read",
  999. access_control=model.get("info", {})
  1000. .get("meta", {})
  1001. .get("access_control", {}),
  1002. ):
  1003. filtered_models.append(model)
  1004. continue
  1005. model_info = Models.get_model_by_id(model["id"])
  1006. if model_info:
  1007. if user.id == model_info.user_id or has_access(
  1008. user.id, type="read", access_control=model_info.access_control
  1009. ):
  1010. filtered_models.append(model)
  1011. models = filtered_models
  1012. log.debug(f"/api/models returned filtered models accessible to the user: {json.dumps([model['id'] for model in models])}")
  1013. return {"data": models}
  1014. @app.get("/api/models/base")
  1015. async def get_base_models(user=Depends(get_admin_user)):
  1016. models = await get_all_base_models()
  1017. # Filter out arena models
  1018. models = [model for model in models if not model.get("arena", False)]
  1019. return {"data": models}
  1020. @app.post("/api/chat/completions")
  1021. async def generate_chat_completions(
  1022. form_data: dict, user=Depends(get_verified_user), bypass_filter: bool = False
  1023. ):
  1024. model_list = await get_all_models()
  1025. models = {model["id"]: model for model in model_list}
  1026. model_id = form_data["model"]
  1027. if model_id not in models:
  1028. raise HTTPException(
  1029. status_code=status.HTTP_404_NOT_FOUND,
  1030. detail="Model not found",
  1031. )
  1032. model = models[model_id]
  1033. # Check if user has access to the model
  1034. if not bypass_filter and user.role == "user":
  1035. if model.get("arena"):
  1036. if not has_access(
  1037. user.id,
  1038. type="read",
  1039. access_control=model.get("info", {})
  1040. .get("meta", {})
  1041. .get("access_control", {}),
  1042. ):
  1043. raise HTTPException(
  1044. status_code=403,
  1045. detail="Model not found",
  1046. )
  1047. else:
  1048. model_info = Models.get_model_by_id(model_id)
  1049. if not model_info:
  1050. raise HTTPException(
  1051. status_code=404,
  1052. detail="Model not found",
  1053. )
  1054. elif not (
  1055. user.id == model_info.user_id
  1056. or has_access(
  1057. user.id, type="read", access_control=model_info.access_control
  1058. )
  1059. ):
  1060. raise HTTPException(
  1061. status_code=403,
  1062. detail="Model not found",
  1063. )
  1064. if model["owned_by"] == "arena":
  1065. model_ids = model.get("info", {}).get("meta", {}).get("model_ids")
  1066. filter_mode = model.get("info", {}).get("meta", {}).get("filter_mode")
  1067. if model_ids and filter_mode == "exclude":
  1068. model_ids = [
  1069. model["id"]
  1070. for model in await get_all_models()
  1071. if model.get("owned_by") != "arena" and model["id"] not in model_ids
  1072. ]
  1073. selected_model_id = None
  1074. if isinstance(model_ids, list) and model_ids:
  1075. selected_model_id = random.choice(model_ids)
  1076. else:
  1077. model_ids = [
  1078. model["id"]
  1079. for model in await get_all_models()
  1080. if model.get("owned_by") != "arena"
  1081. ]
  1082. selected_model_id = random.choice(model_ids)
  1083. form_data["model"] = selected_model_id
  1084. if form_data.get("stream") == True:
  1085. async def stream_wrapper(stream):
  1086. yield f"data: {json.dumps({'selected_model_id': selected_model_id})}\n\n"
  1087. async for chunk in stream:
  1088. yield chunk
  1089. response = await generate_chat_completions(
  1090. form_data, user, bypass_filter=True
  1091. )
  1092. return StreamingResponse(
  1093. stream_wrapper(response.body_iterator), media_type="text/event-stream"
  1094. )
  1095. else:
  1096. return {
  1097. **(
  1098. await generate_chat_completions(form_data, user, bypass_filter=True)
  1099. ),
  1100. "selected_model_id": selected_model_id,
  1101. }
  1102. if model.get("pipe"):
  1103. # Below does not require bypass_filter because this is the only route the uses this function and it is already bypassing the filter
  1104. return await generate_function_chat_completion(
  1105. form_data, user=user, models=models
  1106. )
  1107. if model["owned_by"] == "ollama":
  1108. # Using /ollama/api/chat endpoint
  1109. form_data = convert_payload_openai_to_ollama(form_data)
  1110. form_data = GenerateChatCompletionForm(**form_data)
  1111. response = await generate_ollama_chat_completion(
  1112. form_data=form_data, user=user, bypass_filter=bypass_filter
  1113. )
  1114. if form_data.stream:
  1115. response.headers["content-type"] = "text/event-stream"
  1116. return StreamingResponse(
  1117. convert_streaming_response_ollama_to_openai(response),
  1118. headers=dict(response.headers),
  1119. )
  1120. else:
  1121. return convert_response_ollama_to_openai(response)
  1122. else:
  1123. return await generate_openai_chat_completion(
  1124. form_data, user=user, bypass_filter=bypass_filter
  1125. )
  1126. @app.post("/api/chat/completed")
  1127. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  1128. model_list = await get_all_models()
  1129. models = {model["id"]: model for model in model_list}
  1130. data = form_data
  1131. model_id = data["model"]
  1132. if model_id not in models:
  1133. raise HTTPException(
  1134. status_code=status.HTTP_404_NOT_FOUND,
  1135. detail="Model not found",
  1136. )
  1137. model = models[model_id]
  1138. sorted_filters = get_sorted_filters(model_id, models)
  1139. if "pipeline" in model:
  1140. sorted_filters = [model] + sorted_filters
  1141. for filter in sorted_filters:
  1142. r = None
  1143. try:
  1144. urlIdx = filter["urlIdx"]
  1145. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1146. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1147. if key != "":
  1148. headers = {"Authorization": f"Bearer {key}"}
  1149. r = requests.post(
  1150. f"{url}/{filter['id']}/filter/outlet",
  1151. headers=headers,
  1152. json={
  1153. "user": {
  1154. "id": user.id,
  1155. "name": user.name,
  1156. "email": user.email,
  1157. "role": user.role,
  1158. },
  1159. "body": data,
  1160. },
  1161. )
  1162. r.raise_for_status()
  1163. data = r.json()
  1164. except Exception as e:
  1165. # Handle connection error here
  1166. print(f"Connection error: {e}")
  1167. if r is not None:
  1168. try:
  1169. res = r.json()
  1170. if "detail" in res:
  1171. return JSONResponse(
  1172. status_code=r.status_code,
  1173. content=res,
  1174. )
  1175. except Exception:
  1176. pass
  1177. else:
  1178. pass
  1179. __event_emitter__ = get_event_emitter(
  1180. {
  1181. "chat_id": data["chat_id"],
  1182. "message_id": data["id"],
  1183. "session_id": data["session_id"],
  1184. }
  1185. )
  1186. __event_call__ = get_event_call(
  1187. {
  1188. "chat_id": data["chat_id"],
  1189. "message_id": data["id"],
  1190. "session_id": data["session_id"],
  1191. }
  1192. )
  1193. def get_priority(function_id):
  1194. function = Functions.get_function_by_id(function_id)
  1195. if function is not None and hasattr(function, "valves"):
  1196. # TODO: Fix FunctionModel to include vavles
  1197. return (function.valves if function.valves else {}).get("priority", 0)
  1198. return 0
  1199. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  1200. if "info" in model and "meta" in model["info"]:
  1201. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  1202. filter_ids = list(set(filter_ids))
  1203. enabled_filter_ids = [
  1204. function.id
  1205. for function in Functions.get_functions_by_type("filter", active_only=True)
  1206. ]
  1207. filter_ids = [
  1208. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  1209. ]
  1210. # Sort filter_ids by priority, using the get_priority function
  1211. filter_ids.sort(key=get_priority)
  1212. for filter_id in filter_ids:
  1213. filter = Functions.get_function_by_id(filter_id)
  1214. if not filter:
  1215. continue
  1216. if filter_id in webui_app.state.FUNCTIONS:
  1217. function_module = webui_app.state.FUNCTIONS[filter_id]
  1218. else:
  1219. function_module, _, _ = load_function_module_by_id(filter_id)
  1220. webui_app.state.FUNCTIONS[filter_id] = function_module
  1221. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  1222. valves = Functions.get_function_valves_by_id(filter_id)
  1223. function_module.valves = function_module.Valves(
  1224. **(valves if valves else {})
  1225. )
  1226. if not hasattr(function_module, "outlet"):
  1227. continue
  1228. try:
  1229. outlet = function_module.outlet
  1230. # Get the signature of the function
  1231. sig = inspect.signature(outlet)
  1232. params = {"body": data}
  1233. # Extra parameters to be passed to the function
  1234. extra_params = {
  1235. "__model__": model,
  1236. "__id__": filter_id,
  1237. "__event_emitter__": __event_emitter__,
  1238. "__event_call__": __event_call__,
  1239. }
  1240. # Add extra params in contained in function signature
  1241. for key, value in extra_params.items():
  1242. if key in sig.parameters:
  1243. params[key] = value
  1244. if "__user__" in sig.parameters:
  1245. __user__ = {
  1246. "id": user.id,
  1247. "email": user.email,
  1248. "name": user.name,
  1249. "role": user.role,
  1250. }
  1251. try:
  1252. if hasattr(function_module, "UserValves"):
  1253. __user__["valves"] = function_module.UserValves(
  1254. **Functions.get_user_valves_by_id_and_user_id(
  1255. filter_id, user.id
  1256. )
  1257. )
  1258. except Exception as e:
  1259. print(e)
  1260. params = {**params, "__user__": __user__}
  1261. if inspect.iscoroutinefunction(outlet):
  1262. data = await outlet(**params)
  1263. else:
  1264. data = outlet(**params)
  1265. except Exception as e:
  1266. print(f"Error: {e}")
  1267. return JSONResponse(
  1268. status_code=status.HTTP_400_BAD_REQUEST,
  1269. content={"detail": str(e)},
  1270. )
  1271. return data
  1272. @app.post("/api/chat/actions/{action_id}")
  1273. async def chat_action(action_id: str, form_data: dict, user=Depends(get_verified_user)):
  1274. if "." in action_id:
  1275. action_id, sub_action_id = action_id.split(".")
  1276. else:
  1277. sub_action_id = None
  1278. action = Functions.get_function_by_id(action_id)
  1279. if not action:
  1280. raise HTTPException(
  1281. status_code=status.HTTP_404_NOT_FOUND,
  1282. detail="Action not found",
  1283. )
  1284. model_list = await get_all_models()
  1285. models = {model["id"]: model for model in model_list}
  1286. data = form_data
  1287. model_id = data["model"]
  1288. if model_id not in models:
  1289. raise HTTPException(
  1290. status_code=status.HTTP_404_NOT_FOUND,
  1291. detail="Model not found",
  1292. )
  1293. model = models[model_id]
  1294. __event_emitter__ = get_event_emitter(
  1295. {
  1296. "chat_id": data["chat_id"],
  1297. "message_id": data["id"],
  1298. "session_id": data["session_id"],
  1299. }
  1300. )
  1301. __event_call__ = get_event_call(
  1302. {
  1303. "chat_id": data["chat_id"],
  1304. "message_id": data["id"],
  1305. "session_id": data["session_id"],
  1306. }
  1307. )
  1308. if action_id in webui_app.state.FUNCTIONS:
  1309. function_module = webui_app.state.FUNCTIONS[action_id]
  1310. else:
  1311. function_module, _, _ = load_function_module_by_id(action_id)
  1312. webui_app.state.FUNCTIONS[action_id] = function_module
  1313. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  1314. valves = Functions.get_function_valves_by_id(action_id)
  1315. function_module.valves = function_module.Valves(**(valves if valves else {}))
  1316. if hasattr(function_module, "action"):
  1317. try:
  1318. action = function_module.action
  1319. # Get the signature of the function
  1320. sig = inspect.signature(action)
  1321. params = {"body": data}
  1322. # Extra parameters to be passed to the function
  1323. extra_params = {
  1324. "__model__": model,
  1325. "__id__": sub_action_id if sub_action_id is not None else action_id,
  1326. "__event_emitter__": __event_emitter__,
  1327. "__event_call__": __event_call__,
  1328. }
  1329. # Add extra params in contained in function signature
  1330. for key, value in extra_params.items():
  1331. if key in sig.parameters:
  1332. params[key] = value
  1333. if "__user__" in sig.parameters:
  1334. __user__ = {
  1335. "id": user.id,
  1336. "email": user.email,
  1337. "name": user.name,
  1338. "role": user.role,
  1339. }
  1340. try:
  1341. if hasattr(function_module, "UserValves"):
  1342. __user__["valves"] = function_module.UserValves(
  1343. **Functions.get_user_valves_by_id_and_user_id(
  1344. action_id, user.id
  1345. )
  1346. )
  1347. except Exception as e:
  1348. print(e)
  1349. params = {**params, "__user__": __user__}
  1350. if inspect.iscoroutinefunction(action):
  1351. data = await action(**params)
  1352. else:
  1353. data = action(**params)
  1354. except Exception as e:
  1355. print(f"Error: {e}")
  1356. return JSONResponse(
  1357. status_code=status.HTTP_400_BAD_REQUEST,
  1358. content={"detail": str(e)},
  1359. )
  1360. return data
  1361. ##################################
  1362. #
  1363. # Task Endpoints
  1364. #
  1365. ##################################
  1366. # TODO: Refactor task API endpoints below into a separate file
  1367. @app.get("/api/task/config")
  1368. async def get_task_config(user=Depends(get_verified_user)):
  1369. return {
  1370. "TASK_MODEL": app.state.config.TASK_MODEL,
  1371. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1372. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1373. "TAGS_GENERATION_PROMPT_TEMPLATE": app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE,
  1374. "ENABLE_TAGS_GENERATION": app.state.config.ENABLE_TAGS_GENERATION,
  1375. "ENABLE_SEARCH_QUERY_GENERATION": app.state.config.ENABLE_SEARCH_QUERY_GENERATION,
  1376. "ENABLE_RETRIEVAL_QUERY_GENERATION": app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION,
  1377. "QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE,
  1378. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1379. }
  1380. class TaskConfigForm(BaseModel):
  1381. TASK_MODEL: Optional[str]
  1382. TASK_MODEL_EXTERNAL: Optional[str]
  1383. TITLE_GENERATION_PROMPT_TEMPLATE: str
  1384. TAGS_GENERATION_PROMPT_TEMPLATE: str
  1385. ENABLE_TAGS_GENERATION: bool
  1386. ENABLE_SEARCH_QUERY_GENERATION: bool
  1387. ENABLE_RETRIEVAL_QUERY_GENERATION: bool
  1388. QUERY_GENERATION_PROMPT_TEMPLATE: str
  1389. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  1390. @app.post("/api/task/config/update")
  1391. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  1392. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  1393. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  1394. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  1395. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  1396. )
  1397. app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE = (
  1398. form_data.TAGS_GENERATION_PROMPT_TEMPLATE
  1399. )
  1400. app.state.config.ENABLE_TAGS_GENERATION = form_data.ENABLE_TAGS_GENERATION
  1401. app.state.config.ENABLE_SEARCH_QUERY_GENERATION = (
  1402. form_data.ENABLE_SEARCH_QUERY_GENERATION
  1403. )
  1404. app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION = (
  1405. form_data.ENABLE_RETRIEVAL_QUERY_GENERATION
  1406. )
  1407. app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE = (
  1408. form_data.QUERY_GENERATION_PROMPT_TEMPLATE
  1409. )
  1410. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  1411. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  1412. )
  1413. return {
  1414. "TASK_MODEL": app.state.config.TASK_MODEL,
  1415. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1416. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1417. "TAGS_GENERATION_PROMPT_TEMPLATE": app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE,
  1418. "ENABLE_TAGS_GENERATION": app.state.config.ENABLE_TAGS_GENERATION,
  1419. "ENABLE_SEARCH_QUERY_GENERATION": app.state.config.ENABLE_SEARCH_QUERY_GENERATION,
  1420. "ENABLE_RETRIEVAL_QUERY_GENERATION": app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION,
  1421. "QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE,
  1422. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1423. }
  1424. @app.post("/api/task/title/completions")
  1425. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  1426. model_list = await get_all_models()
  1427. models = {model["id"]: model for model in model_list}
  1428. model_id = form_data["model"]
  1429. if model_id not in models:
  1430. raise HTTPException(
  1431. status_code=status.HTTP_404_NOT_FOUND,
  1432. detail="Model not found",
  1433. )
  1434. # Check if the user has a custom task model
  1435. # If the user has a custom task model, use that model
  1436. task_model_id = get_task_model_id(
  1437. model_id,
  1438. app.state.config.TASK_MODEL,
  1439. app.state.config.TASK_MODEL_EXTERNAL,
  1440. models,
  1441. )
  1442. log.debug(f"generating chat title using model {task_model_id} for user {user.email} ")
  1443. if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
  1444. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  1445. else:
  1446. template = """Create a concise, 3-5 word title with an emoji as a title for the chat history, in the given language. Suitable Emojis for the summary can be used to enhance understanding but avoid quotation marks or special formatting. RESPOND ONLY WITH THE TITLE TEXT.
  1447. Examples of titles:
  1448. 📉 Stock Market Trends
  1449. 🍪 Perfect Chocolate Chip Recipe
  1450. Evolution of Music Streaming
  1451. Remote Work Productivity Tips
  1452. Artificial Intelligence in Healthcare
  1453. 🎮 Video Game Development Insights
  1454. <chat_history>
  1455. {{MESSAGES:END:2}}
  1456. </chat_history>"""
  1457. content = title_generation_template(
  1458. template,
  1459. form_data["messages"],
  1460. {
  1461. "name": user.name,
  1462. "location": user.info.get("location") if user.info else None,
  1463. },
  1464. )
  1465. payload = {
  1466. "model": task_model_id,
  1467. "messages": [{"role": "user", "content": content}],
  1468. "stream": False,
  1469. **(
  1470. {"max_tokens": 50}
  1471. if models[task_model_id]["owned_by"] == "ollama"
  1472. else {
  1473. "max_completion_tokens": 50,
  1474. }
  1475. ),
  1476. "metadata": {
  1477. "task": str(TASKS.TITLE_GENERATION),
  1478. "task_body": form_data,
  1479. "chat_id": form_data.get("chat_id", None)
  1480. },
  1481. }
  1482. # Handle pipeline filters
  1483. try:
  1484. payload = filter_pipeline(payload, user, models)
  1485. except Exception as e:
  1486. if len(e.args) > 1:
  1487. return JSONResponse(
  1488. status_code=e.args[0],
  1489. content={"detail": e.args[1]},
  1490. )
  1491. else:
  1492. return JSONResponse(
  1493. status_code=status.HTTP_400_BAD_REQUEST,
  1494. content={"detail": str(e)},
  1495. )
  1496. if "chat_id" in payload:
  1497. del payload["chat_id"]
  1498. return await generate_chat_completions(form_data=payload, user=user)
  1499. @app.post("/api/task/tags/completions")
  1500. async def generate_chat_tags(form_data: dict, user=Depends(get_verified_user)):
  1501. if not app.state.config.ENABLE_TAGS_GENERATION:
  1502. return JSONResponse(
  1503. status_code=status.HTTP_200_OK,
  1504. content={"detail": "Tags generation is disabled"},
  1505. )
  1506. model_list = await get_all_models()
  1507. models = {model["id"]: model for model in model_list}
  1508. model_id = form_data["model"]
  1509. if model_id not in models:
  1510. raise HTTPException(
  1511. status_code=status.HTTP_404_NOT_FOUND,
  1512. detail="Model not found",
  1513. )
  1514. # Check if the user has a custom task model
  1515. # If the user has a custom task model, use that model
  1516. task_model_id = get_task_model_id(
  1517. model_id,
  1518. app.state.config.TASK_MODEL,
  1519. app.state.config.TASK_MODEL_EXTERNAL,
  1520. models,
  1521. )
  1522. log.debug(f"generating chat tags using model {task_model_id} for user {user.email} ")
  1523. if app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE != "":
  1524. template = app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE
  1525. else:
  1526. template = """### Task:
  1527. Generate 1-3 broad tags categorizing the main themes of the chat history, along with 1-3 more specific subtopic tags.
  1528. ### Guidelines:
  1529. - Start with high-level domains (e.g. Science, Technology, Philosophy, Arts, Politics, Business, Health, Sports, Entertainment, Education)
  1530. - Consider including relevant subfields/subdomains if they are strongly represented throughout the conversation
  1531. - If content is too short (less than 3 messages) or too diverse, use only ["General"]
  1532. - Use the chat's primary language; default to English if multilingual
  1533. - Prioritize accuracy over specificity
  1534. ### Output:
  1535. JSON format: { "tags": ["tag1", "tag2", "tag3"] }
  1536. ### Chat History:
  1537. <chat_history>
  1538. {{MESSAGES:END:6}}
  1539. </chat_history>"""
  1540. content = tags_generation_template(
  1541. template, form_data["messages"], {"name": user.name}
  1542. )
  1543. payload = {
  1544. "model": task_model_id,
  1545. "messages": [{"role": "user", "content": content}],
  1546. "stream": False,
  1547. "metadata": {
  1548. "task": str(TASKS.TAGS_GENERATION),
  1549. "task_body": form_data,
  1550. "chat_id": form_data.get("chat_id", None)
  1551. }
  1552. }
  1553. # Handle pipeline filters
  1554. try:
  1555. payload = filter_pipeline(payload, user, models)
  1556. except Exception as e:
  1557. if len(e.args) > 1:
  1558. return JSONResponse(
  1559. status_code=e.args[0],
  1560. content={"detail": e.args[1]},
  1561. )
  1562. else:
  1563. return JSONResponse(
  1564. status_code=status.HTTP_400_BAD_REQUEST,
  1565. content={"detail": str(e)},
  1566. )
  1567. if "chat_id" in payload:
  1568. del payload["chat_id"]
  1569. return await generate_chat_completions(form_data=payload, user=user)
  1570. @app.post("/api/task/queries/completions")
  1571. async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
  1572. type = form_data.get("type")
  1573. if type == "web_search":
  1574. if not app.state.config.ENABLE_SEARCH_QUERY_GENERATION:
  1575. raise HTTPException(
  1576. status_code=status.HTTP_400_BAD_REQUEST,
  1577. detail=f"Search query generation is disabled",
  1578. )
  1579. elif type == "retrieval":
  1580. if not app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION:
  1581. raise HTTPException(
  1582. status_code=status.HTTP_400_BAD_REQUEST,
  1583. detail=f"Query generation is disabled",
  1584. )
  1585. model_list = await get_all_models()
  1586. models = {model["id"]: model for model in model_list}
  1587. model_id = form_data["model"]
  1588. if model_id not in models:
  1589. raise HTTPException(
  1590. status_code=status.HTTP_404_NOT_FOUND,
  1591. detail="Model not found",
  1592. )
  1593. # Check if the user has a custom task model
  1594. # If the user has a custom task model, use that model
  1595. task_model_id = get_task_model_id(
  1596. model_id,
  1597. app.state.config.TASK_MODEL,
  1598. app.state.config.TASK_MODEL_EXTERNAL,
  1599. models,
  1600. )
  1601. log.debug(f"generating {type} queries using model {task_model_id} for user {user.email}")
  1602. if app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE != "":
  1603. template = app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE
  1604. else:
  1605. template = DEFAULT_QUERY_GENERATION_PROMPT_TEMPLATE
  1606. content = query_generation_template(
  1607. template, form_data["messages"], {"name": user.name}
  1608. )
  1609. payload = {
  1610. "model": task_model_id,
  1611. "messages": [{"role": "user", "content": content}],
  1612. "stream": False,
  1613. "metadata": {"task": str(TASKS.QUERY_GENERATION), "task_body": form_data, "chat_id": form_data.get("chat_id", None)},
  1614. }
  1615. # Handle pipeline filters
  1616. try:
  1617. payload = filter_pipeline(payload, user, models)
  1618. except Exception as e:
  1619. if len(e.args) > 1:
  1620. return JSONResponse(
  1621. status_code=e.args[0],
  1622. content={"detail": e.args[1]},
  1623. )
  1624. else:
  1625. return JSONResponse(
  1626. status_code=status.HTTP_400_BAD_REQUEST,
  1627. content={"detail": str(e)},
  1628. )
  1629. if "chat_id" in payload:
  1630. del payload["chat_id"]
  1631. return await generate_chat_completions(form_data=payload, user=user)
  1632. @app.post("/api/task/emoji/completions")
  1633. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  1634. model_list = await get_all_models()
  1635. models = {model["id"]: model for model in model_list}
  1636. model_id = form_data["model"]
  1637. if model_id not in models:
  1638. raise HTTPException(
  1639. status_code=status.HTTP_404_NOT_FOUND,
  1640. detail="Model not found",
  1641. )
  1642. # Check if the user has a custom task model
  1643. # If the user has a custom task model, use that model
  1644. task_model_id = get_task_model_id(
  1645. model_id,
  1646. app.state.config.TASK_MODEL,
  1647. app.state.config.TASK_MODEL_EXTERNAL,
  1648. models,
  1649. )
  1650. log.debug(f"generating emoji using model {task_model_id} for user {user.email} ")
  1651. template = '''
  1652. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  1653. Message: """{{prompt}}"""
  1654. '''
  1655. content = emoji_generation_template(
  1656. template,
  1657. form_data["prompt"],
  1658. {
  1659. "name": user.name,
  1660. "location": user.info.get("location") if user.info else None,
  1661. },
  1662. )
  1663. payload = {
  1664. "model": task_model_id,
  1665. "messages": [{"role": "user", "content": content}],
  1666. "stream": False,
  1667. **(
  1668. {"max_tokens": 4}
  1669. if models[task_model_id]["owned_by"] == "ollama"
  1670. else {
  1671. "max_completion_tokens": 4,
  1672. }
  1673. ),
  1674. "chat_id": form_data.get("chat_id", None),
  1675. "metadata": {"task": str(TASKS.EMOJI_GENERATION), "task_body": form_data},
  1676. }
  1677. # Handle pipeline filters
  1678. try:
  1679. payload = filter_pipeline(payload, user, models)
  1680. except Exception as e:
  1681. if len(e.args) > 1:
  1682. return JSONResponse(
  1683. status_code=e.args[0],
  1684. content={"detail": e.args[1]},
  1685. )
  1686. else:
  1687. return JSONResponse(
  1688. status_code=status.HTTP_400_BAD_REQUEST,
  1689. content={"detail": str(e)},
  1690. )
  1691. if "chat_id" in payload:
  1692. del payload["chat_id"]
  1693. return await generate_chat_completions(form_data=payload, user=user)
  1694. @app.post("/api/task/moa/completions")
  1695. async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)):
  1696. model_list = await get_all_models()
  1697. models = {model["id"]: model for model in model_list}
  1698. model_id = form_data["model"]
  1699. if model_id not in models:
  1700. raise HTTPException(
  1701. status_code=status.HTTP_404_NOT_FOUND,
  1702. detail="Model not found",
  1703. )
  1704. # Check if the user has a custom task model
  1705. # If the user has a custom task model, use that model
  1706. task_model_id = get_task_model_id(
  1707. model_id,
  1708. app.state.config.TASK_MODEL,
  1709. app.state.config.TASK_MODEL_EXTERNAL,
  1710. models,
  1711. )
  1712. log.debug(f"generating MOA model {task_model_id} for user {user.email} ")
  1713. template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
  1714. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.
  1715. Responses from models: {{responses}}"""
  1716. content = moa_response_generation_template(
  1717. template,
  1718. form_data["prompt"],
  1719. form_data["responses"],
  1720. )
  1721. payload = {
  1722. "model": task_model_id,
  1723. "messages": [{"role": "user", "content": content}],
  1724. "stream": form_data.get("stream", False),
  1725. "chat_id": form_data.get("chat_id", None),
  1726. "metadata": {
  1727. "task": str(TASKS.MOA_RESPONSE_GENERATION),
  1728. "task_body": form_data,
  1729. },
  1730. }
  1731. try:
  1732. payload = filter_pipeline(payload, user, models)
  1733. except Exception as e:
  1734. if len(e.args) > 1:
  1735. return JSONResponse(
  1736. status_code=e.args[0],
  1737. content={"detail": e.args[1]},
  1738. )
  1739. else:
  1740. return JSONResponse(
  1741. status_code=status.HTTP_400_BAD_REQUEST,
  1742. content={"detail": str(e)},
  1743. )
  1744. if "chat_id" in payload:
  1745. del payload["chat_id"]
  1746. return await generate_chat_completions(form_data=payload, user=user)
  1747. ##################################
  1748. #
  1749. # Pipelines Endpoints
  1750. #
  1751. ##################################
  1752. # TODO: Refactor pipelines API endpoints below into a separate file
  1753. @app.get("/api/pipelines/list")
  1754. async def get_pipelines_list(user=Depends(get_admin_user)):
  1755. responses = await get_openai_models_responses()
  1756. log.debug(f"get_pipelines_list: get_openai_models_responses returned {responses}")
  1757. urlIdxs = [
  1758. idx
  1759. for idx, response in enumerate(responses)
  1760. if response is not None and "pipelines" in response
  1761. ]
  1762. return {
  1763. "data": [
  1764. {
  1765. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  1766. "idx": urlIdx,
  1767. }
  1768. for urlIdx in urlIdxs
  1769. ]
  1770. }
  1771. @app.post("/api/pipelines/upload")
  1772. async def upload_pipeline(
  1773. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  1774. ):
  1775. print("upload_pipeline", urlIdx, file.filename)
  1776. # Check if the uploaded file is a python file
  1777. if not (file.filename and file.filename.endswith(".py")):
  1778. raise HTTPException(
  1779. status_code=status.HTTP_400_BAD_REQUEST,
  1780. detail="Only Python (.py) files are allowed.",
  1781. )
  1782. upload_folder = f"{CACHE_DIR}/pipelines"
  1783. os.makedirs(upload_folder, exist_ok=True)
  1784. file_path = os.path.join(upload_folder, file.filename)
  1785. r = None
  1786. try:
  1787. # Save the uploaded file
  1788. with open(file_path, "wb") as buffer:
  1789. shutil.copyfileobj(file.file, buffer)
  1790. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1791. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1792. headers = {"Authorization": f"Bearer {key}"}
  1793. with open(file_path, "rb") as f:
  1794. files = {"file": f}
  1795. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  1796. r.raise_for_status()
  1797. data = r.json()
  1798. return {**data}
  1799. except Exception as e:
  1800. # Handle connection error here
  1801. print(f"Connection error: {e}")
  1802. detail = "Pipeline not found"
  1803. status_code = status.HTTP_404_NOT_FOUND
  1804. if r is not None:
  1805. status_code = r.status_code
  1806. try:
  1807. res = r.json()
  1808. if "detail" in res:
  1809. detail = res["detail"]
  1810. except Exception:
  1811. pass
  1812. raise HTTPException(
  1813. status_code=status_code,
  1814. detail=detail,
  1815. )
  1816. finally:
  1817. # Ensure the file is deleted after the upload is completed or on failure
  1818. if os.path.exists(file_path):
  1819. os.remove(file_path)
  1820. class AddPipelineForm(BaseModel):
  1821. url: str
  1822. urlIdx: int
  1823. @app.post("/api/pipelines/add")
  1824. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  1825. r = None
  1826. try:
  1827. urlIdx = form_data.urlIdx
  1828. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1829. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1830. headers = {"Authorization": f"Bearer {key}"}
  1831. r = requests.post(
  1832. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  1833. )
  1834. r.raise_for_status()
  1835. data = r.json()
  1836. return {**data}
  1837. except Exception as e:
  1838. # Handle connection error here
  1839. print(f"Connection error: {e}")
  1840. detail = "Pipeline not found"
  1841. if r is not None:
  1842. try:
  1843. res = r.json()
  1844. if "detail" in res:
  1845. detail = res["detail"]
  1846. except Exception:
  1847. pass
  1848. raise HTTPException(
  1849. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1850. detail=detail,
  1851. )
  1852. class DeletePipelineForm(BaseModel):
  1853. id: str
  1854. urlIdx: int
  1855. @app.delete("/api/pipelines/delete")
  1856. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1857. r = None
  1858. try:
  1859. urlIdx = form_data.urlIdx
  1860. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1861. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1862. headers = {"Authorization": f"Bearer {key}"}
  1863. r = requests.delete(
  1864. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1865. )
  1866. r.raise_for_status()
  1867. data = r.json()
  1868. return {**data}
  1869. except Exception as e:
  1870. # Handle connection error here
  1871. print(f"Connection error: {e}")
  1872. detail = "Pipeline not found"
  1873. if r is not None:
  1874. try:
  1875. res = r.json()
  1876. if "detail" in res:
  1877. detail = res["detail"]
  1878. except Exception:
  1879. pass
  1880. raise HTTPException(
  1881. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1882. detail=detail,
  1883. )
  1884. @app.get("/api/pipelines")
  1885. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1886. r = None
  1887. try:
  1888. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1889. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1890. headers = {"Authorization": f"Bearer {key}"}
  1891. r = requests.get(f"{url}/pipelines", headers=headers)
  1892. r.raise_for_status()
  1893. data = r.json()
  1894. return {**data}
  1895. except Exception as e:
  1896. # Handle connection error here
  1897. print(f"Connection error: {e}")
  1898. detail = "Pipeline not found"
  1899. if r is not None:
  1900. try:
  1901. res = r.json()
  1902. if "detail" in res:
  1903. detail = res["detail"]
  1904. except Exception:
  1905. pass
  1906. raise HTTPException(
  1907. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1908. detail=detail,
  1909. )
  1910. @app.get("/api/pipelines/{pipeline_id}/valves")
  1911. async def get_pipeline_valves(
  1912. urlIdx: Optional[int],
  1913. pipeline_id: str,
  1914. user=Depends(get_admin_user),
  1915. ):
  1916. r = None
  1917. try:
  1918. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1919. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1920. headers = {"Authorization": f"Bearer {key}"}
  1921. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1922. r.raise_for_status()
  1923. data = r.json()
  1924. return {**data}
  1925. except Exception as e:
  1926. # Handle connection error here
  1927. print(f"Connection error: {e}")
  1928. detail = "Pipeline not found"
  1929. if r is not None:
  1930. try:
  1931. res = r.json()
  1932. if "detail" in res:
  1933. detail = res["detail"]
  1934. except Exception:
  1935. pass
  1936. raise HTTPException(
  1937. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1938. detail=detail,
  1939. )
  1940. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1941. async def get_pipeline_valves_spec(
  1942. urlIdx: Optional[int],
  1943. pipeline_id: str,
  1944. user=Depends(get_admin_user),
  1945. ):
  1946. r = None
  1947. try:
  1948. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1949. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1950. headers = {"Authorization": f"Bearer {key}"}
  1951. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1952. r.raise_for_status()
  1953. data = r.json()
  1954. return {**data}
  1955. except Exception as e:
  1956. # Handle connection error here
  1957. print(f"Connection error: {e}")
  1958. detail = "Pipeline not found"
  1959. if r is not None:
  1960. try:
  1961. res = r.json()
  1962. if "detail" in res:
  1963. detail = res["detail"]
  1964. except Exception:
  1965. pass
  1966. raise HTTPException(
  1967. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1968. detail=detail,
  1969. )
  1970. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1971. async def update_pipeline_valves(
  1972. urlIdx: Optional[int],
  1973. pipeline_id: str,
  1974. form_data: dict,
  1975. user=Depends(get_admin_user),
  1976. ):
  1977. r = None
  1978. try:
  1979. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1980. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1981. headers = {"Authorization": f"Bearer {key}"}
  1982. r = requests.post(
  1983. f"{url}/{pipeline_id}/valves/update",
  1984. headers=headers,
  1985. json={**form_data},
  1986. )
  1987. r.raise_for_status()
  1988. data = r.json()
  1989. return {**data}
  1990. except Exception as e:
  1991. # Handle connection error here
  1992. print(f"Connection error: {e}")
  1993. detail = "Pipeline not found"
  1994. if r is not None:
  1995. try:
  1996. res = r.json()
  1997. if "detail" in res:
  1998. detail = res["detail"]
  1999. except Exception:
  2000. pass
  2001. raise HTTPException(
  2002. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  2003. detail=detail,
  2004. )
  2005. ##################################
  2006. #
  2007. # Config Endpoints
  2008. #
  2009. ##################################
  2010. @app.get("/api/config")
  2011. async def get_app_config(request: Request):
  2012. user = None
  2013. if "token" in request.cookies:
  2014. token = request.cookies.get("token")
  2015. try:
  2016. data = decode_token(token)
  2017. except Exception as e:
  2018. log.debug(e)
  2019. raise HTTPException(
  2020. status_code=status.HTTP_401_UNAUTHORIZED,
  2021. detail="Invalid token",
  2022. )
  2023. if data is not None and "id" in data:
  2024. user = Users.get_user_by_id(data["id"])
  2025. onboarding = False
  2026. if user is None:
  2027. user_count = Users.get_num_users()
  2028. onboarding = user_count == 0
  2029. return {
  2030. **({"onboarding": True} if onboarding else {}),
  2031. "status": True,
  2032. "name": WEBUI_NAME,
  2033. "version": VERSION,
  2034. "default_locale": str(DEFAULT_LOCALE),
  2035. "oauth": {
  2036. "providers": {
  2037. name: config.get("name", name)
  2038. for name, config in OAUTH_PROVIDERS.items()
  2039. }
  2040. },
  2041. "features": {
  2042. "auth": WEBUI_AUTH,
  2043. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  2044. "enable_ldap": webui_app.state.config.ENABLE_LDAP,
  2045. "enable_api_key": webui_app.state.config.ENABLE_API_KEY,
  2046. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  2047. "enable_login_form": webui_app.state.config.ENABLE_LOGIN_FORM,
  2048. **(
  2049. {
  2050. "enable_web_search": retrieval_app.state.config.ENABLE_RAG_WEB_SEARCH,
  2051. "enable_image_generation": images_app.state.config.ENABLED,
  2052. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  2053. "enable_message_rating": webui_app.state.config.ENABLE_MESSAGE_RATING,
  2054. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  2055. "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS,
  2056. }
  2057. if user is not None
  2058. else {}
  2059. ),
  2060. },
  2061. **(
  2062. {
  2063. "default_models": webui_app.state.config.DEFAULT_MODELS,
  2064. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  2065. "audio": {
  2066. "tts": {
  2067. "engine": audio_app.state.config.TTS_ENGINE,
  2068. "voice": audio_app.state.config.TTS_VOICE,
  2069. "split_on": audio_app.state.config.TTS_SPLIT_ON,
  2070. },
  2071. "stt": {
  2072. "engine": audio_app.state.config.STT_ENGINE,
  2073. },
  2074. },
  2075. "file": {
  2076. "max_size": retrieval_app.state.config.FILE_MAX_SIZE,
  2077. "max_count": retrieval_app.state.config.FILE_MAX_COUNT,
  2078. },
  2079. "permissions": {**webui_app.state.config.USER_PERMISSIONS},
  2080. }
  2081. if user is not None
  2082. else {}
  2083. ),
  2084. }
  2085. # TODO: webhook endpoint should be under config endpoints
  2086. @app.get("/api/webhook")
  2087. async def get_webhook_url(user=Depends(get_admin_user)):
  2088. return {
  2089. "url": app.state.config.WEBHOOK_URL,
  2090. }
  2091. class UrlForm(BaseModel):
  2092. url: str
  2093. @app.post("/api/webhook")
  2094. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  2095. app.state.config.WEBHOOK_URL = form_data.url
  2096. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  2097. return {"url": app.state.config.WEBHOOK_URL}
  2098. @app.get("/api/version")
  2099. async def get_app_version():
  2100. return {
  2101. "version": VERSION,
  2102. }
  2103. @app.get("/api/changelog")
  2104. async def get_app_changelog():
  2105. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  2106. @app.get("/api/version/updates")
  2107. async def get_app_latest_release_version():
  2108. if OFFLINE_MODE:
  2109. log.debug(
  2110. f"Offline mode is enabled, returning current version as latest version"
  2111. )
  2112. return {"current": VERSION, "latest": VERSION}
  2113. try:
  2114. timeout = aiohttp.ClientTimeout(total=1)
  2115. async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
  2116. async with session.get(
  2117. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  2118. ) as response:
  2119. response.raise_for_status()
  2120. data = await response.json()
  2121. latest_version = data["tag_name"]
  2122. return {"current": VERSION, "latest": latest_version[1:]}
  2123. except Exception as e:
  2124. log.debug(e)
  2125. return {"current": VERSION, "latest": VERSION}
  2126. ############################
  2127. # OAuth Login & Callback
  2128. ############################
  2129. # SessionMiddleware is used by authlib for oauth
  2130. if len(OAUTH_PROVIDERS) > 0:
  2131. app.add_middleware(
  2132. SessionMiddleware,
  2133. secret_key=WEBUI_SECRET_KEY,
  2134. session_cookie="oui-session",
  2135. same_site=WEBUI_SESSION_COOKIE_SAME_SITE,
  2136. https_only=WEBUI_SESSION_COOKIE_SECURE,
  2137. )
  2138. @app.get("/oauth/{provider}/login")
  2139. async def oauth_login(provider: str, request: Request):
  2140. return await oauth_manager.handle_login(provider, request)
  2141. # OAuth login logic is as follows:
  2142. # 1. Attempt to find a user with matching subject ID, tied to the provider
  2143. # 2. If OAUTH_MERGE_ACCOUNTS_BY_EMAIL is true, find a user with the email address provided via OAuth
  2144. # - This is considered insecure in general, as OAuth providers do not always verify email addresses
  2145. # 3. If there is no user, and ENABLE_OAUTH_SIGNUP is true, create a user
  2146. # - Email addresses are considered unique, so we fail registration if the email address is already taken
  2147. @app.get("/oauth/{provider}/callback")
  2148. async def oauth_callback(provider: str, request: Request, response: Response):
  2149. return await oauth_manager.handle_callback(provider, request, response)
  2150. @app.get("/manifest.json")
  2151. async def get_manifest_json():
  2152. return {
  2153. "name": WEBUI_NAME,
  2154. "short_name": WEBUI_NAME,
  2155. "description": "Open WebUI is an open, extensible, user-friendly interface for AI that adapts to your workflow.",
  2156. "start_url": "/",
  2157. "display": "standalone",
  2158. "background_color": "#343541",
  2159. "orientation": "natural",
  2160. "icons": [
  2161. {
  2162. "src": "/static/logo.png",
  2163. "type": "image/png",
  2164. "sizes": "500x500",
  2165. "purpose": "any",
  2166. },
  2167. {
  2168. "src": "/static/logo.png",
  2169. "type": "image/png",
  2170. "sizes": "500x500",
  2171. "purpose": "maskable",
  2172. },
  2173. ],
  2174. }
  2175. @app.get("/opensearch.xml")
  2176. async def get_opensearch_xml():
  2177. xml_content = rf"""
  2178. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  2179. <ShortName>{WEBUI_NAME}</ShortName>
  2180. <Description>Search {WEBUI_NAME}</Description>
  2181. <InputEncoding>UTF-8</InputEncoding>
  2182. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/static/favicon.png</Image>
  2183. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  2184. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  2185. </OpenSearchDescription>
  2186. """
  2187. return Response(content=xml_content, media_type="application/xml")
  2188. @app.get("/health")
  2189. async def healthcheck():
  2190. return {"status": True}
  2191. @app.get("/health/db")
  2192. async def healthcheck_with_db():
  2193. Session.execute(text("SELECT 1;")).all()
  2194. return {"status": True}
  2195. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  2196. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  2197. if os.path.exists(FRONTEND_BUILD_DIR):
  2198. mimetypes.add_type("text/javascript", ".js")
  2199. app.mount(
  2200. "/",
  2201. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  2202. name="spa-static-files",
  2203. )
  2204. else:
  2205. log.warning(
  2206. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  2207. )