main.py 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676
  1. import asyncio
  2. import inspect
  3. import json
  4. import logging
  5. import mimetypes
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. import random
  11. from contextlib import asynccontextmanager
  12. from typing import Optional
  13. from aiocache import cached
  14. import aiohttp
  15. import requests
  16. from fastapi import (
  17. Depends,
  18. FastAPI,
  19. File,
  20. Form,
  21. HTTPException,
  22. Request,
  23. UploadFile,
  24. status,
  25. )
  26. from fastapi.middleware.cors import CORSMiddleware
  27. from fastapi.responses import JSONResponse, RedirectResponse
  28. from fastapi.staticfiles import StaticFiles
  29. from pydantic import BaseModel
  30. from sqlalchemy import text
  31. from starlette.exceptions import HTTPException as StarletteHTTPException
  32. from starlette.middleware.base import BaseHTTPMiddleware
  33. from starlette.middleware.sessions import SessionMiddleware
  34. from starlette.responses import Response, StreamingResponse
  35. from open_webui.apps.audio.main import app as audio_app
  36. from open_webui.apps.images.main import app as images_app
  37. from open_webui.apps.ollama.main import (
  38. app as ollama_app,
  39. get_all_models as get_ollama_models,
  40. generate_chat_completion as generate_ollama_chat_completion,
  41. GenerateChatCompletionForm,
  42. )
  43. from open_webui.apps.openai.main import (
  44. app as openai_app,
  45. generate_chat_completion as generate_openai_chat_completion,
  46. get_all_models as get_openai_models,
  47. get_all_models_responses as get_openai_models_responses,
  48. )
  49. from open_webui.apps.retrieval.main import app as retrieval_app
  50. from open_webui.apps.retrieval.utils import get_sources_from_files
  51. from open_webui.apps.socket.main import (
  52. app as socket_app,
  53. periodic_usage_pool_cleanup,
  54. get_event_call,
  55. get_event_emitter,
  56. )
  57. from open_webui.apps.webui.internal.db import Session
  58. from open_webui.apps.webui.main import (
  59. app as webui_app,
  60. generate_function_chat_completion,
  61. get_all_models as get_open_webui_models,
  62. )
  63. from open_webui.apps.webui.models.functions import Functions
  64. from open_webui.apps.webui.models.models import Models
  65. from open_webui.apps.webui.models.users import UserModel, Users
  66. from open_webui.apps.webui.utils import load_function_module_by_id
  67. from open_webui.config import (
  68. CACHE_DIR,
  69. CORS_ALLOW_ORIGIN,
  70. DEFAULT_LOCALE,
  71. ENABLE_ADMIN_CHAT_ACCESS,
  72. ENABLE_ADMIN_EXPORT,
  73. ENABLE_OLLAMA_API,
  74. ENABLE_OPENAI_API,
  75. ENABLE_TAGS_GENERATION,
  76. ENV,
  77. FRONTEND_BUILD_DIR,
  78. OAUTH_PROVIDERS,
  79. STATIC_DIR,
  80. TASK_MODEL,
  81. TASK_MODEL_EXTERNAL,
  82. ENABLE_SEARCH_QUERY_GENERATION,
  83. ENABLE_RETRIEVAL_QUERY_GENERATION,
  84. QUERY_GENERATION_PROMPT_TEMPLATE,
  85. DEFAULT_QUERY_GENERATION_PROMPT_TEMPLATE,
  86. TITLE_GENERATION_PROMPT_TEMPLATE,
  87. TAGS_GENERATION_PROMPT_TEMPLATE,
  88. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  89. WEBHOOK_URL,
  90. WEBUI_AUTH,
  91. WEBUI_NAME,
  92. AppConfig,
  93. reset_config,
  94. )
  95. from open_webui.constants import TASKS
  96. from open_webui.env import (
  97. CHANGELOG,
  98. GLOBAL_LOG_LEVEL,
  99. SAFE_MODE,
  100. SRC_LOG_LEVELS,
  101. VERSION,
  102. WEBUI_BUILD_HASH,
  103. WEBUI_SECRET_KEY,
  104. WEBUI_SESSION_COOKIE_SAME_SITE,
  105. WEBUI_SESSION_COOKIE_SECURE,
  106. WEBUI_URL,
  107. RESET_CONFIG_ON_START,
  108. OFFLINE_MODE,
  109. )
  110. from open_webui.utils.misc import (
  111. add_or_update_system_message,
  112. get_last_user_message,
  113. prepend_to_first_user_message_content,
  114. )
  115. from open_webui.utils.oauth import oauth_manager
  116. from open_webui.utils.payload import convert_payload_openai_to_ollama
  117. from open_webui.utils.response import (
  118. convert_response_ollama_to_openai,
  119. convert_streaming_response_ollama_to_openai,
  120. )
  121. from open_webui.utils.security_headers import SecurityHeadersMiddleware
  122. from open_webui.utils.task import (
  123. rag_template,
  124. title_generation_template,
  125. query_generation_template,
  126. tags_generation_template,
  127. emoji_generation_template,
  128. moa_response_generation_template,
  129. tools_function_calling_generation_template,
  130. )
  131. from open_webui.utils.tools import get_tools
  132. from open_webui.utils.utils import (
  133. decode_token,
  134. get_admin_user,
  135. get_current_user,
  136. get_http_authorization_cred,
  137. get_verified_user,
  138. )
  139. from open_webui.utils.access_control import has_access
  140. if SAFE_MODE:
  141. print("SAFE MODE ENABLED")
  142. Functions.deactivate_all_functions()
  143. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  144. log = logging.getLogger(__name__)
  145. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  146. class SPAStaticFiles(StaticFiles):
  147. async def get_response(self, path: str, scope):
  148. try:
  149. return await super().get_response(path, scope)
  150. except (HTTPException, StarletteHTTPException) as ex:
  151. if ex.status_code == 404:
  152. return await super().get_response("index.html", scope)
  153. else:
  154. raise ex
  155. print(
  156. rf"""
  157. ___ __ __ _ _ _ ___
  158. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  159. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  160. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  161. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  162. |_|
  163. v{VERSION} - building the best open-source AI user interface.
  164. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  165. https://github.com/open-webui/open-webui
  166. """
  167. )
  168. @asynccontextmanager
  169. async def lifespan(app: FastAPI):
  170. if RESET_CONFIG_ON_START:
  171. reset_config()
  172. asyncio.create_task(periodic_usage_pool_cleanup())
  173. yield
  174. app = FastAPI(
  175. docs_url="/docs" if ENV == "dev" else None,
  176. openapi_url="/openapi.json" if ENV == "dev" else None,
  177. redoc_url=None,
  178. lifespan=lifespan,
  179. )
  180. app.state.config = AppConfig()
  181. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  182. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  183. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  184. app.state.config.TASK_MODEL = TASK_MODEL
  185. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  186. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  187. app.state.config.ENABLE_TAGS_GENERATION = ENABLE_TAGS_GENERATION
  188. app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE = TAGS_GENERATION_PROMPT_TEMPLATE
  189. app.state.config.ENABLE_SEARCH_QUERY_GENERATION = ENABLE_SEARCH_QUERY_GENERATION
  190. app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION = ENABLE_RETRIEVAL_QUERY_GENERATION
  191. app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE = QUERY_GENERATION_PROMPT_TEMPLATE
  192. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  193. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  194. )
  195. ##################################
  196. #
  197. # ChatCompletion Middleware
  198. #
  199. ##################################
  200. def get_filter_function_ids(model):
  201. def get_priority(function_id):
  202. function = Functions.get_function_by_id(function_id)
  203. if function is not None and hasattr(function, "valves"):
  204. # TODO: Fix FunctionModel
  205. return (function.valves if function.valves else {}).get("priority", 0)
  206. return 0
  207. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  208. if "info" in model and "meta" in model["info"]:
  209. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  210. filter_ids = list(set(filter_ids))
  211. enabled_filter_ids = [
  212. function.id
  213. for function in Functions.get_functions_by_type("filter", active_only=True)
  214. ]
  215. filter_ids = [
  216. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  217. ]
  218. filter_ids.sort(key=get_priority)
  219. return filter_ids
  220. async def chat_completion_filter_functions_handler(body, model, extra_params):
  221. skip_files = None
  222. filter_ids = get_filter_function_ids(model)
  223. for filter_id in filter_ids:
  224. filter = Functions.get_function_by_id(filter_id)
  225. if not filter:
  226. continue
  227. if filter_id in webui_app.state.FUNCTIONS:
  228. function_module = webui_app.state.FUNCTIONS[filter_id]
  229. else:
  230. function_module, _, _ = load_function_module_by_id(filter_id)
  231. webui_app.state.FUNCTIONS[filter_id] = function_module
  232. # Check if the function has a file_handler variable
  233. if hasattr(function_module, "file_handler"):
  234. skip_files = function_module.file_handler
  235. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  236. valves = Functions.get_function_valves_by_id(filter_id)
  237. function_module.valves = function_module.Valves(
  238. **(valves if valves else {})
  239. )
  240. if not hasattr(function_module, "inlet"):
  241. continue
  242. try:
  243. inlet = function_module.inlet
  244. # Get the signature of the function
  245. sig = inspect.signature(inlet)
  246. params = {"body": body} | {
  247. k: v
  248. for k, v in {
  249. **extra_params,
  250. "__model__": model,
  251. "__id__": filter_id,
  252. }.items()
  253. if k in sig.parameters
  254. }
  255. if "__user__" in params and hasattr(function_module, "UserValves"):
  256. try:
  257. params["__user__"]["valves"] = function_module.UserValves(
  258. **Functions.get_user_valves_by_id_and_user_id(
  259. filter_id, params["__user__"]["id"]
  260. )
  261. )
  262. except Exception as e:
  263. print(e)
  264. if inspect.iscoroutinefunction(inlet):
  265. body = await inlet(**params)
  266. else:
  267. body = inlet(**params)
  268. except Exception as e:
  269. print(f"Error: {e}")
  270. raise e
  271. if skip_files and "files" in body.get("metadata", {}):
  272. del body["metadata"]["files"]
  273. return body, {}
  274. def get_tools_function_calling_payload(messages, task_model_id, content):
  275. user_message = get_last_user_message(messages)
  276. history = "\n".join(
  277. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  278. for message in messages[::-1][:4]
  279. )
  280. prompt = f"History:\n{history}\nQuery: {user_message}"
  281. return {
  282. "model": task_model_id,
  283. "messages": [
  284. {"role": "system", "content": content},
  285. {"role": "user", "content": f"Query: {prompt}"},
  286. ],
  287. "stream": False,
  288. "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
  289. }
  290. async def get_content_from_response(response) -> Optional[str]:
  291. content = None
  292. if hasattr(response, "body_iterator"):
  293. async for chunk in response.body_iterator:
  294. data = json.loads(chunk.decode("utf-8"))
  295. content = data["choices"][0]["message"]["content"]
  296. # Cleanup any remaining background tasks if necessary
  297. if response.background is not None:
  298. await response.background()
  299. else:
  300. content = response["choices"][0]["message"]["content"]
  301. return content
  302. def get_task_model_id(
  303. default_model_id: str, task_model: str, task_model_external: str, models
  304. ) -> str:
  305. # Set the task model
  306. task_model_id = default_model_id
  307. # Check if the user has a custom task model and use that model
  308. if models[task_model_id]["owned_by"] == "ollama":
  309. if task_model and task_model in models:
  310. task_model_id = task_model
  311. else:
  312. if task_model_external and task_model_external in models:
  313. task_model_id = task_model_external
  314. return task_model_id
  315. async def chat_completion_tools_handler(
  316. body: dict, user: UserModel, models, extra_params: dict
  317. ) -> tuple[dict, dict]:
  318. # If tool_ids field is present, call the functions
  319. metadata = body.get("metadata", {})
  320. tool_ids = metadata.get("tool_ids", None)
  321. log.debug(f"{tool_ids=}")
  322. if not tool_ids:
  323. return body, {}
  324. skip_files = False
  325. sources = []
  326. task_model_id = get_task_model_id(
  327. body["model"],
  328. app.state.config.TASK_MODEL,
  329. app.state.config.TASK_MODEL_EXTERNAL,
  330. models,
  331. )
  332. tools = get_tools(
  333. webui_app,
  334. tool_ids,
  335. user,
  336. {
  337. **extra_params,
  338. "__model__": models[task_model_id],
  339. "__messages__": body["messages"],
  340. "__files__": metadata.get("files", []),
  341. },
  342. )
  343. log.info(f"{tools=}")
  344. specs = [tool["spec"] for tool in tools.values()]
  345. tools_specs = json.dumps(specs)
  346. if app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE != "":
  347. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  348. else:
  349. template = """Available Tools: {{TOOLS}}\nReturn an empty string if no tools match the query. If a function tool matches, construct and return a JSON object in the format {\"name\": \"functionName\", \"parameters\": {\"requiredFunctionParamKey\": \"requiredFunctionParamValue\"}} using the appropriate tool and its parameters. Only return the object and limit the response to the JSON object without additional text."""
  350. tools_function_calling_prompt = tools_function_calling_generation_template(
  351. template, tools_specs
  352. )
  353. log.info(f"{tools_function_calling_prompt=}")
  354. payload = get_tools_function_calling_payload(
  355. body["messages"], task_model_id, tools_function_calling_prompt
  356. )
  357. try:
  358. payload = filter_pipeline(payload, user, models)
  359. except Exception as e:
  360. raise e
  361. try:
  362. response = await generate_chat_completions(form_data=payload, user=user)
  363. log.debug(f"{response=}")
  364. content = await get_content_from_response(response)
  365. log.debug(f"{content=}")
  366. if not content:
  367. return body, {}
  368. try:
  369. content = content[content.find("{") : content.rfind("}") + 1]
  370. if not content:
  371. raise Exception("No JSON object found in the response")
  372. result = json.loads(content)
  373. tool_function_name = result.get("name", None)
  374. if tool_function_name not in tools:
  375. return body, {}
  376. tool_function_params = result.get("parameters", {})
  377. try:
  378. required_params = (
  379. tools[tool_function_name]
  380. .get("spec", {})
  381. .get("parameters", {})
  382. .get("required", [])
  383. )
  384. tool_function = tools[tool_function_name]["callable"]
  385. tool_function_params = {
  386. k: v
  387. for k, v in tool_function_params.items()
  388. if k in required_params
  389. }
  390. tool_output = await tool_function(**tool_function_params)
  391. except Exception as e:
  392. tool_output = str(e)
  393. print(tools[tool_function_name]["citation"])
  394. if isinstance(tool_output, str):
  395. if tools[tool_function_name]["citation"]:
  396. sources.append(
  397. {
  398. "source": {
  399. "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  400. },
  401. "document": [tool_output],
  402. "metadata": [
  403. {
  404. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  405. }
  406. ],
  407. }
  408. )
  409. else:
  410. sources.append(
  411. {
  412. "source": {},
  413. "document": [tool_output],
  414. "metadata": [
  415. {
  416. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  417. }
  418. ],
  419. }
  420. )
  421. if tools[tool_function_name]["file_handler"]:
  422. skip_files = True
  423. except Exception as e:
  424. log.exception(f"Error: {e}")
  425. content = None
  426. except Exception as e:
  427. log.exception(f"Error: {e}")
  428. content = None
  429. log.debug(f"tool_contexts: {sources}")
  430. if skip_files and "files" in body.get("metadata", {}):
  431. del body["metadata"]["files"]
  432. return body, {"sources": sources}
  433. async def chat_completion_files_handler(
  434. body: dict, user: UserModel
  435. ) -> tuple[dict, dict[str, list]]:
  436. sources = []
  437. if files := body.get("metadata", {}).get("files", None):
  438. try:
  439. queries_response = await generate_queries(
  440. {
  441. "model": body["model"],
  442. "messages": body["messages"],
  443. "type": "retrieval",
  444. },
  445. user,
  446. )
  447. queries_response = queries_response["choices"][0]["message"]["content"]
  448. try:
  449. queries_response = json.loads(queries_response)
  450. except Exception as e:
  451. queries_response = {"queries": []}
  452. queries = queries_response.get("queries", [])
  453. except Exception as e:
  454. queries = []
  455. if len(queries) == 0:
  456. queries = [get_last_user_message(body["messages"])]
  457. sources = get_sources_from_files(
  458. files=files,
  459. queries=queries,
  460. embedding_function=retrieval_app.state.EMBEDDING_FUNCTION,
  461. k=retrieval_app.state.config.TOP_K,
  462. reranking_function=retrieval_app.state.sentence_transformer_rf,
  463. r=retrieval_app.state.config.RELEVANCE_THRESHOLD,
  464. hybrid_search=retrieval_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  465. )
  466. log.debug(f"rag_contexts:sources: {sources}")
  467. return body, {"sources": sources}
  468. def is_chat_completion_request(request):
  469. return request.method == "POST" and any(
  470. endpoint in request.url.path
  471. for endpoint in ["/ollama/api/chat", "/chat/completions"]
  472. )
  473. async def get_body_and_model_and_user(request, models):
  474. # Read the original request body
  475. body = await request.body()
  476. body_str = body.decode("utf-8")
  477. body = json.loads(body_str) if body_str else {}
  478. model_id = body["model"]
  479. if model_id not in models:
  480. raise Exception("Model not found")
  481. model = models[model_id]
  482. user = get_current_user(
  483. request,
  484. get_http_authorization_cred(request.headers.get("Authorization")),
  485. )
  486. return body, model, user
  487. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  488. async def dispatch(self, request: Request, call_next):
  489. if not is_chat_completion_request(request):
  490. return await call_next(request)
  491. log.debug(f"request.url.path: {request.url.path}")
  492. model_list = await get_all_models()
  493. models = {model["id"]: model for model in model_list}
  494. try:
  495. body, model, user = await get_body_and_model_and_user(request, models)
  496. except Exception as e:
  497. return JSONResponse(
  498. status_code=status.HTTP_400_BAD_REQUEST,
  499. content={"detail": str(e)},
  500. )
  501. model_info = Models.get_model_by_id(model["id"])
  502. if user.role == "user":
  503. if model.get("arena"):
  504. if not has_access(
  505. user.id,
  506. type="read",
  507. access_control=model.get("info", {})
  508. .get("meta", {})
  509. .get("access_control", {}),
  510. ):
  511. raise HTTPException(
  512. status_code=403,
  513. detail="Model not found",
  514. )
  515. else:
  516. if not model_info:
  517. return JSONResponse(
  518. status_code=status.HTTP_404_NOT_FOUND,
  519. content={"detail": "Model not found"},
  520. )
  521. elif not (
  522. user.id == model_info.user_id
  523. or has_access(
  524. user.id, type="read", access_control=model_info.access_control
  525. )
  526. ):
  527. return JSONResponse(
  528. status_code=status.HTTP_403_FORBIDDEN,
  529. content={"detail": "User does not have access to the model"},
  530. )
  531. metadata = {
  532. "chat_id": body.pop("chat_id", None),
  533. "message_id": body.pop("id", None),
  534. "session_id": body.pop("session_id", None),
  535. "tool_ids": body.get("tool_ids", None),
  536. "files": body.get("files", None),
  537. }
  538. body["metadata"] = metadata
  539. extra_params = {
  540. "__event_emitter__": get_event_emitter(metadata),
  541. "__event_call__": get_event_call(metadata),
  542. "__user__": {
  543. "id": user.id,
  544. "email": user.email,
  545. "name": user.name,
  546. "role": user.role,
  547. },
  548. "__metadata__": metadata,
  549. }
  550. # Initialize data_items to store additional data to be sent to the client
  551. # Initialize contexts and citation
  552. data_items = []
  553. sources = []
  554. try:
  555. body, flags = await chat_completion_filter_functions_handler(
  556. body, model, extra_params
  557. )
  558. except Exception as e:
  559. return JSONResponse(
  560. status_code=status.HTTP_400_BAD_REQUEST,
  561. content={"detail": str(e)},
  562. )
  563. tool_ids = body.pop("tool_ids", None)
  564. files = body.pop("files", None)
  565. metadata = {
  566. **metadata,
  567. "tool_ids": tool_ids,
  568. "files": files,
  569. }
  570. body["metadata"] = metadata
  571. try:
  572. body, flags = await chat_completion_tools_handler(
  573. body, user, models, extra_params
  574. )
  575. sources.extend(flags.get("sources", []))
  576. except Exception as e:
  577. log.exception(e)
  578. try:
  579. body, flags = await chat_completion_files_handler(body, user)
  580. sources.extend(flags.get("sources", []))
  581. except Exception as e:
  582. log.exception(e)
  583. # If context is not empty, insert it into the messages
  584. if len(sources) > 0:
  585. context_string = ""
  586. for source_idx, source in enumerate(sources):
  587. source_id = source.get("source", {}).get("name", "")
  588. if "document" in source:
  589. for doc_idx, doc_context in enumerate(source["document"]):
  590. metadata = source.get("metadata")
  591. doc_source_id = None
  592. if metadata:
  593. doc_source_id = metadata[doc_idx].get("source", source_id)
  594. if source_id:
  595. context_string += f"<source><source_id>{doc_source_id if doc_source_id is not None else source_id}</source_id><source_context>{doc_context}</source_context></source>\n"
  596. else:
  597. # If there is no source_id, then do not include the source_id tag
  598. context_string += f"<source><source_context>{doc_context}</source_context></source>\n"
  599. context_string = context_string.strip()
  600. prompt = get_last_user_message(body["messages"])
  601. if prompt is None:
  602. raise Exception("No user message found")
  603. if (
  604. retrieval_app.state.config.RELEVANCE_THRESHOLD == 0
  605. and context_string.strip() == ""
  606. ):
  607. log.debug(
  608. f"With a 0 relevancy threshold for RAG, the context cannot be empty"
  609. )
  610. # Workaround for Ollama 2.0+ system prompt issue
  611. # TODO: replace with add_or_update_system_message
  612. if model["owned_by"] == "ollama":
  613. body["messages"] = prepend_to_first_user_message_content(
  614. rag_template(
  615. retrieval_app.state.config.RAG_TEMPLATE, context_string, prompt
  616. ),
  617. body["messages"],
  618. )
  619. else:
  620. body["messages"] = add_or_update_system_message(
  621. rag_template(
  622. retrieval_app.state.config.RAG_TEMPLATE, context_string, prompt
  623. ),
  624. body["messages"],
  625. )
  626. # If there are citations, add them to the data_items
  627. sources = [
  628. source for source in sources if source.get("source", {}).get("name", "")
  629. ]
  630. if len(sources) > 0:
  631. data_items.append({"sources": sources})
  632. modified_body_bytes = json.dumps(body).encode("utf-8")
  633. # Replace the request body with the modified one
  634. request._body = modified_body_bytes
  635. # Set custom header to ensure content-length matches new body length
  636. request.headers.__dict__["_list"] = [
  637. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  638. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  639. ]
  640. response = await call_next(request)
  641. if not isinstance(response, StreamingResponse):
  642. return response
  643. content_type = response.headers["Content-Type"]
  644. is_openai = "text/event-stream" in content_type
  645. is_ollama = "application/x-ndjson" in content_type
  646. if not is_openai and not is_ollama:
  647. return response
  648. def wrap_item(item):
  649. return f"data: {item}\n\n" if is_openai else f"{item}\n"
  650. async def stream_wrapper(original_generator, data_items):
  651. for item in data_items:
  652. yield wrap_item(json.dumps(item))
  653. async for data in original_generator:
  654. yield data
  655. return StreamingResponse(
  656. stream_wrapper(response.body_iterator, data_items),
  657. headers=dict(response.headers),
  658. )
  659. async def _receive(self, body: bytes):
  660. return {"type": "http.request", "body": body, "more_body": False}
  661. app.add_middleware(ChatCompletionMiddleware)
  662. ##################################
  663. #
  664. # Pipeline Middleware
  665. #
  666. ##################################
  667. def get_sorted_filters(model_id, models):
  668. filters = [
  669. model
  670. for model in models.values()
  671. if "pipeline" in model
  672. and "type" in model["pipeline"]
  673. and model["pipeline"]["type"] == "filter"
  674. and (
  675. model["pipeline"]["pipelines"] == ["*"]
  676. or any(
  677. model_id == target_model_id
  678. for target_model_id in model["pipeline"]["pipelines"]
  679. )
  680. )
  681. ]
  682. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  683. return sorted_filters
  684. def filter_pipeline(payload, user, models):
  685. user = {"id": user.id, "email": user.email, "name": user.name, "role": user.role}
  686. model_id = payload["model"]
  687. sorted_filters = get_sorted_filters(model_id, models)
  688. model = models[model_id]
  689. if "pipeline" in model:
  690. sorted_filters.append(model)
  691. for filter in sorted_filters:
  692. r = None
  693. try:
  694. urlIdx = filter["urlIdx"]
  695. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  696. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  697. if key == "":
  698. continue
  699. headers = {"Authorization": f"Bearer {key}"}
  700. r = requests.post(
  701. f"{url}/{filter['id']}/filter/inlet",
  702. headers=headers,
  703. json={
  704. "user": user,
  705. "body": payload,
  706. },
  707. )
  708. r.raise_for_status()
  709. payload = r.json()
  710. except Exception as e:
  711. # Handle connection error here
  712. print(f"Connection error: {e}")
  713. if r is not None:
  714. res = r.json()
  715. if "detail" in res:
  716. raise Exception(r.status_code, res["detail"])
  717. return payload
  718. class PipelineMiddleware(BaseHTTPMiddleware):
  719. async def dispatch(self, request: Request, call_next):
  720. if not is_chat_completion_request(request):
  721. return await call_next(request)
  722. log.debug(f"request.url.path: {request.url.path}")
  723. # Read the original request body
  724. body = await request.body()
  725. # Decode body to string
  726. body_str = body.decode("utf-8")
  727. # Parse string to JSON
  728. data = json.loads(body_str) if body_str else {}
  729. try:
  730. user = get_current_user(
  731. request,
  732. get_http_authorization_cred(request.headers["Authorization"]),
  733. )
  734. except KeyError as e:
  735. if len(e.args) > 1:
  736. return JSONResponse(
  737. status_code=e.args[0],
  738. content={"detail": e.args[1]},
  739. )
  740. else:
  741. return JSONResponse(
  742. status_code=status.HTTP_401_UNAUTHORIZED,
  743. content={"detail": "Not authenticated"},
  744. )
  745. except HTTPException as e:
  746. return JSONResponse(
  747. status_code=e.status_code,
  748. content={"detail": e.detail},
  749. )
  750. model_list = await get_all_models()
  751. models = {model["id"]: model for model in model_list}
  752. try:
  753. data = filter_pipeline(data, user, models)
  754. except Exception as e:
  755. if len(e.args) > 1:
  756. return JSONResponse(
  757. status_code=e.args[0],
  758. content={"detail": e.args[1]},
  759. )
  760. else:
  761. return JSONResponse(
  762. status_code=status.HTTP_400_BAD_REQUEST,
  763. content={"detail": str(e)},
  764. )
  765. modified_body_bytes = json.dumps(data).encode("utf-8")
  766. # Replace the request body with the modified one
  767. request._body = modified_body_bytes
  768. # Set custom header to ensure content-length matches new body length
  769. request.headers.__dict__["_list"] = [
  770. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  771. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  772. ]
  773. response = await call_next(request)
  774. return response
  775. async def _receive(self, body: bytes):
  776. return {"type": "http.request", "body": body, "more_body": False}
  777. app.add_middleware(PipelineMiddleware)
  778. from urllib.parse import urlencode, parse_qs, urlparse
  779. class RedirectMiddleware(BaseHTTPMiddleware):
  780. async def dispatch(self, request: Request, call_next):
  781. # Check if the request is a GET request
  782. if request.method == "GET":
  783. path = request.url.path
  784. query_params = dict(parse_qs(urlparse(str(request.url)).query))
  785. # Check for the specific watch path and the presence of 'v' parameter
  786. if path.endswith("/watch") and "v" in query_params:
  787. video_id = query_params["v"][0] # Extract the first 'v' parameter
  788. encoded_video_id = urlencode({"youtube": video_id})
  789. redirect_url = f"/?{encoded_video_id}"
  790. return RedirectResponse(url=redirect_url)
  791. # Proceed with the normal flow of other requests
  792. response = await call_next(request)
  793. return response
  794. # Add the middleware to the app
  795. app.add_middleware(RedirectMiddleware)
  796. app.add_middleware(
  797. CORSMiddleware,
  798. allow_origins=CORS_ALLOW_ORIGIN,
  799. allow_credentials=True,
  800. allow_methods=["*"],
  801. allow_headers=["*"],
  802. )
  803. app.add_middleware(SecurityHeadersMiddleware)
  804. @app.middleware("http")
  805. async def commit_session_after_request(request: Request, call_next):
  806. response = await call_next(request)
  807. # log.debug("Commit session after request")
  808. Session.commit()
  809. return response
  810. @app.middleware("http")
  811. async def check_url(request: Request, call_next):
  812. start_time = int(time.time())
  813. request.state.enable_api_key = webui_app.state.config.ENABLE_API_KEY
  814. response = await call_next(request)
  815. process_time = int(time.time()) - start_time
  816. response.headers["X-Process-Time"] = str(process_time)
  817. return response
  818. @app.middleware("http")
  819. async def update_embedding_function(request: Request, call_next):
  820. response = await call_next(request)
  821. if "/embedding/update" in request.url.path:
  822. webui_app.state.EMBEDDING_FUNCTION = retrieval_app.state.EMBEDDING_FUNCTION
  823. return response
  824. @app.middleware("http")
  825. async def inspect_websocket(request: Request, call_next):
  826. if (
  827. "/ws/socket.io" in request.url.path
  828. and request.query_params.get("transport") == "websocket"
  829. ):
  830. upgrade = (request.headers.get("Upgrade") or "").lower()
  831. connection = (request.headers.get("Connection") or "").lower().split(",")
  832. # Check that there's the correct headers for an upgrade, else reject the connection
  833. # This is to work around this upstream issue: https://github.com/miguelgrinberg/python-engineio/issues/367
  834. if upgrade != "websocket" or "upgrade" not in connection:
  835. return JSONResponse(
  836. status_code=status.HTTP_400_BAD_REQUEST,
  837. content={"detail": "Invalid WebSocket upgrade request"},
  838. )
  839. return await call_next(request)
  840. app.mount("/ws", socket_app)
  841. app.mount("/ollama", ollama_app)
  842. app.mount("/openai", openai_app)
  843. app.mount("/images/api/v1", images_app)
  844. app.mount("/audio/api/v1", audio_app)
  845. app.mount("/retrieval/api/v1", retrieval_app)
  846. app.mount("/api/v1", webui_app)
  847. webui_app.state.EMBEDDING_FUNCTION = retrieval_app.state.EMBEDDING_FUNCTION
  848. async def get_all_base_models():
  849. open_webui_models = []
  850. openai_models = []
  851. ollama_models = []
  852. if app.state.config.ENABLE_OPENAI_API:
  853. openai_models = await get_openai_models()
  854. openai_models = openai_models["data"]
  855. if app.state.config.ENABLE_OLLAMA_API:
  856. ollama_models = await get_ollama_models()
  857. ollama_models = [
  858. {
  859. "id": model["model"],
  860. "name": model["name"],
  861. "object": "model",
  862. "created": int(time.time()),
  863. "owned_by": "ollama",
  864. "ollama": model,
  865. }
  866. for model in ollama_models["models"]
  867. ]
  868. open_webui_models = await get_open_webui_models()
  869. models = open_webui_models + openai_models + ollama_models
  870. return models
  871. @cached(ttl=3)
  872. async def get_all_models():
  873. models = await get_all_base_models()
  874. # If there are no models, return an empty list
  875. if len([model for model in models if not model.get("arena", False)]) == 0:
  876. return []
  877. global_action_ids = [
  878. function.id for function in Functions.get_global_action_functions()
  879. ]
  880. enabled_action_ids = [
  881. function.id
  882. for function in Functions.get_functions_by_type("action", active_only=True)
  883. ]
  884. custom_models = Models.get_all_models()
  885. for custom_model in custom_models:
  886. if custom_model.base_model_id is None:
  887. for model in models:
  888. if (
  889. custom_model.id == model["id"]
  890. or custom_model.id == model["id"].split(":")[0]
  891. ):
  892. if custom_model.is_active:
  893. model["name"] = custom_model.name
  894. model["info"] = custom_model.model_dump()
  895. action_ids = []
  896. if "info" in model and "meta" in model["info"]:
  897. action_ids.extend(
  898. model["info"]["meta"].get("actionIds", [])
  899. )
  900. model["action_ids"] = action_ids
  901. else:
  902. models.remove(model)
  903. elif custom_model.is_active and (
  904. custom_model.id not in [model["id"] for model in models]
  905. ):
  906. owned_by = "openai"
  907. pipe = None
  908. action_ids = []
  909. for model in models:
  910. if (
  911. custom_model.base_model_id == model["id"]
  912. or custom_model.base_model_id == model["id"].split(":")[0]
  913. ):
  914. owned_by = model["owned_by"]
  915. if "pipe" in model:
  916. pipe = model["pipe"]
  917. break
  918. if custom_model.meta:
  919. meta = custom_model.meta.model_dump()
  920. if "actionIds" in meta:
  921. action_ids.extend(meta["actionIds"])
  922. models.append(
  923. {
  924. "id": f"{custom_model.id}",
  925. "name": custom_model.name,
  926. "object": "model",
  927. "created": custom_model.created_at,
  928. "owned_by": owned_by,
  929. "info": custom_model.model_dump(),
  930. "preset": True,
  931. **({"pipe": pipe} if pipe is not None else {}),
  932. "action_ids": action_ids,
  933. }
  934. )
  935. # Process action_ids to get the actions
  936. def get_action_items_from_module(function, module):
  937. actions = []
  938. if hasattr(module, "actions"):
  939. actions = module.actions
  940. return [
  941. {
  942. "id": f"{function.id}.{action['id']}",
  943. "name": action.get("name", f"{function.name} ({action['id']})"),
  944. "description": function.meta.description,
  945. "icon_url": action.get(
  946. "icon_url", function.meta.manifest.get("icon_url", None)
  947. ),
  948. }
  949. for action in actions
  950. ]
  951. else:
  952. return [
  953. {
  954. "id": function.id,
  955. "name": function.name,
  956. "description": function.meta.description,
  957. "icon_url": function.meta.manifest.get("icon_url", None),
  958. }
  959. ]
  960. def get_function_module_by_id(function_id):
  961. if function_id in webui_app.state.FUNCTIONS:
  962. function_module = webui_app.state.FUNCTIONS[function_id]
  963. else:
  964. function_module, _, _ = load_function_module_by_id(function_id)
  965. webui_app.state.FUNCTIONS[function_id] = function_module
  966. for model in models:
  967. action_ids = [
  968. action_id
  969. for action_id in list(set(model.pop("action_ids", []) + global_action_ids))
  970. if action_id in enabled_action_ids
  971. ]
  972. model["actions"] = []
  973. for action_id in action_ids:
  974. action_function = Functions.get_function_by_id(action_id)
  975. if action_function is None:
  976. raise Exception(f"Action not found: {action_id}")
  977. function_module = get_function_module_by_id(action_id)
  978. model["actions"].extend(
  979. get_action_items_from_module(action_function, function_module)
  980. )
  981. log.debug(f"get_all_models() returned {len(models)} models")
  982. return models
  983. @app.get("/api/models")
  984. async def get_models(user=Depends(get_verified_user)):
  985. models = await get_all_models()
  986. # Filter out filter pipelines
  987. models = [
  988. model
  989. for model in models
  990. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  991. ]
  992. # Filter out models that the user does not have access to
  993. if user.role == "user":
  994. filtered_models = []
  995. for model in models:
  996. if model.get("arena"):
  997. if has_access(
  998. user.id,
  999. type="read",
  1000. access_control=model.get("info", {})
  1001. .get("meta", {})
  1002. .get("access_control", {}),
  1003. ):
  1004. filtered_models.append(model)
  1005. continue
  1006. model_info = Models.get_model_by_id(model["id"])
  1007. if model_info:
  1008. if user.id == model_info.user_id or has_access(
  1009. user.id, type="read", access_control=model_info.access_control
  1010. ):
  1011. filtered_models.append(model)
  1012. models = filtered_models
  1013. log.debug(
  1014. f"/api/models returned filtered models accessible to the user: {json.dumps([model['id'] for model in models])}"
  1015. )
  1016. return {"data": models}
  1017. @app.get("/api/models/base")
  1018. async def get_base_models(user=Depends(get_admin_user)):
  1019. models = await get_all_base_models()
  1020. # Filter out arena models
  1021. models = [model for model in models if not model.get("arena", False)]
  1022. return {"data": models}
  1023. @app.post("/api/chat/completions")
  1024. async def generate_chat_completions(
  1025. form_data: dict, user=Depends(get_verified_user), bypass_filter: bool = False
  1026. ):
  1027. model_list = await get_all_models()
  1028. models = {model["id"]: model for model in model_list}
  1029. model_id = form_data["model"]
  1030. if model_id not in models:
  1031. raise HTTPException(
  1032. status_code=status.HTTP_404_NOT_FOUND,
  1033. detail="Model not found",
  1034. )
  1035. model = models[model_id]
  1036. # Check if user has access to the model
  1037. if not bypass_filter and user.role == "user":
  1038. if model.get("arena"):
  1039. if not has_access(
  1040. user.id,
  1041. type="read",
  1042. access_control=model.get("info", {})
  1043. .get("meta", {})
  1044. .get("access_control", {}),
  1045. ):
  1046. raise HTTPException(
  1047. status_code=403,
  1048. detail="Model not found",
  1049. )
  1050. else:
  1051. model_info = Models.get_model_by_id(model_id)
  1052. if not model_info:
  1053. raise HTTPException(
  1054. status_code=404,
  1055. detail="Model not found",
  1056. )
  1057. elif not (
  1058. user.id == model_info.user_id
  1059. or has_access(
  1060. user.id, type="read", access_control=model_info.access_control
  1061. )
  1062. ):
  1063. raise HTTPException(
  1064. status_code=403,
  1065. detail="Model not found",
  1066. )
  1067. if model["owned_by"] == "arena":
  1068. model_ids = model.get("info", {}).get("meta", {}).get("model_ids")
  1069. filter_mode = model.get("info", {}).get("meta", {}).get("filter_mode")
  1070. if model_ids and filter_mode == "exclude":
  1071. model_ids = [
  1072. model["id"]
  1073. for model in await get_all_models()
  1074. if model.get("owned_by") != "arena" and model["id"] not in model_ids
  1075. ]
  1076. selected_model_id = None
  1077. if isinstance(model_ids, list) and model_ids:
  1078. selected_model_id = random.choice(model_ids)
  1079. else:
  1080. model_ids = [
  1081. model["id"]
  1082. for model in await get_all_models()
  1083. if model.get("owned_by") != "arena"
  1084. ]
  1085. selected_model_id = random.choice(model_ids)
  1086. form_data["model"] = selected_model_id
  1087. if form_data.get("stream") == True:
  1088. async def stream_wrapper(stream):
  1089. yield f"data: {json.dumps({'selected_model_id': selected_model_id})}\n\n"
  1090. async for chunk in stream:
  1091. yield chunk
  1092. response = await generate_chat_completions(
  1093. form_data, user, bypass_filter=True
  1094. )
  1095. return StreamingResponse(
  1096. stream_wrapper(response.body_iterator), media_type="text/event-stream"
  1097. )
  1098. else:
  1099. return {
  1100. **(
  1101. await generate_chat_completions(form_data, user, bypass_filter=True)
  1102. ),
  1103. "selected_model_id": selected_model_id,
  1104. }
  1105. if model.get("pipe"):
  1106. # Below does not require bypass_filter because this is the only route the uses this function and it is already bypassing the filter
  1107. return await generate_function_chat_completion(
  1108. form_data, user=user, models=models
  1109. )
  1110. if model["owned_by"] == "ollama":
  1111. # Using /ollama/api/chat endpoint
  1112. form_data = convert_payload_openai_to_ollama(form_data)
  1113. form_data = GenerateChatCompletionForm(**form_data)
  1114. response = await generate_ollama_chat_completion(
  1115. form_data=form_data, user=user, bypass_filter=bypass_filter
  1116. )
  1117. if form_data.stream:
  1118. response.headers["content-type"] = "text/event-stream"
  1119. return StreamingResponse(
  1120. convert_streaming_response_ollama_to_openai(response),
  1121. headers=dict(response.headers),
  1122. )
  1123. else:
  1124. return convert_response_ollama_to_openai(response)
  1125. else:
  1126. return await generate_openai_chat_completion(
  1127. form_data, user=user, bypass_filter=bypass_filter
  1128. )
  1129. @app.post("/api/chat/completed")
  1130. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  1131. model_list = await get_all_models()
  1132. models = {model["id"]: model for model in model_list}
  1133. data = form_data
  1134. model_id = data["model"]
  1135. if model_id not in models:
  1136. raise HTTPException(
  1137. status_code=status.HTTP_404_NOT_FOUND,
  1138. detail="Model not found",
  1139. )
  1140. model = models[model_id]
  1141. sorted_filters = get_sorted_filters(model_id, models)
  1142. if "pipeline" in model:
  1143. sorted_filters = [model] + sorted_filters
  1144. for filter in sorted_filters:
  1145. r = None
  1146. try:
  1147. urlIdx = filter["urlIdx"]
  1148. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1149. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1150. if key != "":
  1151. headers = {"Authorization": f"Bearer {key}"}
  1152. r = requests.post(
  1153. f"{url}/{filter['id']}/filter/outlet",
  1154. headers=headers,
  1155. json={
  1156. "user": {
  1157. "id": user.id,
  1158. "name": user.name,
  1159. "email": user.email,
  1160. "role": user.role,
  1161. },
  1162. "body": data,
  1163. },
  1164. )
  1165. r.raise_for_status()
  1166. data = r.json()
  1167. except Exception as e:
  1168. # Handle connection error here
  1169. print(f"Connection error: {e}")
  1170. if r is not None:
  1171. try:
  1172. res = r.json()
  1173. if "detail" in res:
  1174. return JSONResponse(
  1175. status_code=r.status_code,
  1176. content=res,
  1177. )
  1178. except Exception:
  1179. pass
  1180. else:
  1181. pass
  1182. __event_emitter__ = get_event_emitter(
  1183. {
  1184. "chat_id": data["chat_id"],
  1185. "message_id": data["id"],
  1186. "session_id": data["session_id"],
  1187. }
  1188. )
  1189. __event_call__ = get_event_call(
  1190. {
  1191. "chat_id": data["chat_id"],
  1192. "message_id": data["id"],
  1193. "session_id": data["session_id"],
  1194. }
  1195. )
  1196. def get_priority(function_id):
  1197. function = Functions.get_function_by_id(function_id)
  1198. if function is not None and hasattr(function, "valves"):
  1199. # TODO: Fix FunctionModel to include vavles
  1200. return (function.valves if function.valves else {}).get("priority", 0)
  1201. return 0
  1202. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  1203. if "info" in model and "meta" in model["info"]:
  1204. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  1205. filter_ids = list(set(filter_ids))
  1206. enabled_filter_ids = [
  1207. function.id
  1208. for function in Functions.get_functions_by_type("filter", active_only=True)
  1209. ]
  1210. filter_ids = [
  1211. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  1212. ]
  1213. # Sort filter_ids by priority, using the get_priority function
  1214. filter_ids.sort(key=get_priority)
  1215. for filter_id in filter_ids:
  1216. filter = Functions.get_function_by_id(filter_id)
  1217. if not filter:
  1218. continue
  1219. if filter_id in webui_app.state.FUNCTIONS:
  1220. function_module = webui_app.state.FUNCTIONS[filter_id]
  1221. else:
  1222. function_module, _, _ = load_function_module_by_id(filter_id)
  1223. webui_app.state.FUNCTIONS[filter_id] = function_module
  1224. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  1225. valves = Functions.get_function_valves_by_id(filter_id)
  1226. function_module.valves = function_module.Valves(
  1227. **(valves if valves else {})
  1228. )
  1229. if not hasattr(function_module, "outlet"):
  1230. continue
  1231. try:
  1232. outlet = function_module.outlet
  1233. # Get the signature of the function
  1234. sig = inspect.signature(outlet)
  1235. params = {"body": data}
  1236. # Extra parameters to be passed to the function
  1237. extra_params = {
  1238. "__model__": model,
  1239. "__id__": filter_id,
  1240. "__event_emitter__": __event_emitter__,
  1241. "__event_call__": __event_call__,
  1242. }
  1243. # Add extra params in contained in function signature
  1244. for key, value in extra_params.items():
  1245. if key in sig.parameters:
  1246. params[key] = value
  1247. if "__user__" in sig.parameters:
  1248. __user__ = {
  1249. "id": user.id,
  1250. "email": user.email,
  1251. "name": user.name,
  1252. "role": user.role,
  1253. }
  1254. try:
  1255. if hasattr(function_module, "UserValves"):
  1256. __user__["valves"] = function_module.UserValves(
  1257. **Functions.get_user_valves_by_id_and_user_id(
  1258. filter_id, user.id
  1259. )
  1260. )
  1261. except Exception as e:
  1262. print(e)
  1263. params = {**params, "__user__": __user__}
  1264. if inspect.iscoroutinefunction(outlet):
  1265. data = await outlet(**params)
  1266. else:
  1267. data = outlet(**params)
  1268. except Exception as e:
  1269. print(f"Error: {e}")
  1270. return JSONResponse(
  1271. status_code=status.HTTP_400_BAD_REQUEST,
  1272. content={"detail": str(e)},
  1273. )
  1274. return data
  1275. @app.post("/api/chat/actions/{action_id}")
  1276. async def chat_action(action_id: str, form_data: dict, user=Depends(get_verified_user)):
  1277. if "." in action_id:
  1278. action_id, sub_action_id = action_id.split(".")
  1279. else:
  1280. sub_action_id = None
  1281. action = Functions.get_function_by_id(action_id)
  1282. if not action:
  1283. raise HTTPException(
  1284. status_code=status.HTTP_404_NOT_FOUND,
  1285. detail="Action not found",
  1286. )
  1287. model_list = await get_all_models()
  1288. models = {model["id"]: model for model in model_list}
  1289. data = form_data
  1290. model_id = data["model"]
  1291. if model_id not in models:
  1292. raise HTTPException(
  1293. status_code=status.HTTP_404_NOT_FOUND,
  1294. detail="Model not found",
  1295. )
  1296. model = models[model_id]
  1297. __event_emitter__ = get_event_emitter(
  1298. {
  1299. "chat_id": data["chat_id"],
  1300. "message_id": data["id"],
  1301. "session_id": data["session_id"],
  1302. }
  1303. )
  1304. __event_call__ = get_event_call(
  1305. {
  1306. "chat_id": data["chat_id"],
  1307. "message_id": data["id"],
  1308. "session_id": data["session_id"],
  1309. }
  1310. )
  1311. if action_id in webui_app.state.FUNCTIONS:
  1312. function_module = webui_app.state.FUNCTIONS[action_id]
  1313. else:
  1314. function_module, _, _ = load_function_module_by_id(action_id)
  1315. webui_app.state.FUNCTIONS[action_id] = function_module
  1316. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  1317. valves = Functions.get_function_valves_by_id(action_id)
  1318. function_module.valves = function_module.Valves(**(valves if valves else {}))
  1319. if hasattr(function_module, "action"):
  1320. try:
  1321. action = function_module.action
  1322. # Get the signature of the function
  1323. sig = inspect.signature(action)
  1324. params = {"body": data}
  1325. # Extra parameters to be passed to the function
  1326. extra_params = {
  1327. "__model__": model,
  1328. "__id__": sub_action_id if sub_action_id is not None else action_id,
  1329. "__event_emitter__": __event_emitter__,
  1330. "__event_call__": __event_call__,
  1331. }
  1332. # Add extra params in contained in function signature
  1333. for key, value in extra_params.items():
  1334. if key in sig.parameters:
  1335. params[key] = value
  1336. if "__user__" in sig.parameters:
  1337. __user__ = {
  1338. "id": user.id,
  1339. "email": user.email,
  1340. "name": user.name,
  1341. "role": user.role,
  1342. }
  1343. try:
  1344. if hasattr(function_module, "UserValves"):
  1345. __user__["valves"] = function_module.UserValves(
  1346. **Functions.get_user_valves_by_id_and_user_id(
  1347. action_id, user.id
  1348. )
  1349. )
  1350. except Exception as e:
  1351. print(e)
  1352. params = {**params, "__user__": __user__}
  1353. if inspect.iscoroutinefunction(action):
  1354. data = await action(**params)
  1355. else:
  1356. data = action(**params)
  1357. except Exception as e:
  1358. print(f"Error: {e}")
  1359. return JSONResponse(
  1360. status_code=status.HTTP_400_BAD_REQUEST,
  1361. content={"detail": str(e)},
  1362. )
  1363. return data
  1364. ##################################
  1365. #
  1366. # Task Endpoints
  1367. #
  1368. ##################################
  1369. # TODO: Refactor task API endpoints below into a separate file
  1370. @app.get("/api/task/config")
  1371. async def get_task_config(user=Depends(get_verified_user)):
  1372. return {
  1373. "TASK_MODEL": app.state.config.TASK_MODEL,
  1374. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1375. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1376. "TAGS_GENERATION_PROMPT_TEMPLATE": app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE,
  1377. "ENABLE_TAGS_GENERATION": app.state.config.ENABLE_TAGS_GENERATION,
  1378. "ENABLE_SEARCH_QUERY_GENERATION": app.state.config.ENABLE_SEARCH_QUERY_GENERATION,
  1379. "ENABLE_RETRIEVAL_QUERY_GENERATION": app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION,
  1380. "QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE,
  1381. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1382. }
  1383. class TaskConfigForm(BaseModel):
  1384. TASK_MODEL: Optional[str]
  1385. TASK_MODEL_EXTERNAL: Optional[str]
  1386. TITLE_GENERATION_PROMPT_TEMPLATE: str
  1387. TAGS_GENERATION_PROMPT_TEMPLATE: str
  1388. ENABLE_TAGS_GENERATION: bool
  1389. ENABLE_SEARCH_QUERY_GENERATION: bool
  1390. ENABLE_RETRIEVAL_QUERY_GENERATION: bool
  1391. QUERY_GENERATION_PROMPT_TEMPLATE: str
  1392. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  1393. @app.post("/api/task/config/update")
  1394. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  1395. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  1396. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  1397. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  1398. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  1399. )
  1400. app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE = (
  1401. form_data.TAGS_GENERATION_PROMPT_TEMPLATE
  1402. )
  1403. app.state.config.ENABLE_TAGS_GENERATION = form_data.ENABLE_TAGS_GENERATION
  1404. app.state.config.ENABLE_SEARCH_QUERY_GENERATION = (
  1405. form_data.ENABLE_SEARCH_QUERY_GENERATION
  1406. )
  1407. app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION = (
  1408. form_data.ENABLE_RETRIEVAL_QUERY_GENERATION
  1409. )
  1410. app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE = (
  1411. form_data.QUERY_GENERATION_PROMPT_TEMPLATE
  1412. )
  1413. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  1414. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  1415. )
  1416. return {
  1417. "TASK_MODEL": app.state.config.TASK_MODEL,
  1418. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1419. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1420. "TAGS_GENERATION_PROMPT_TEMPLATE": app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE,
  1421. "ENABLE_TAGS_GENERATION": app.state.config.ENABLE_TAGS_GENERATION,
  1422. "ENABLE_SEARCH_QUERY_GENERATION": app.state.config.ENABLE_SEARCH_QUERY_GENERATION,
  1423. "ENABLE_RETRIEVAL_QUERY_GENERATION": app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION,
  1424. "QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE,
  1425. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1426. }
  1427. @app.post("/api/task/title/completions")
  1428. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  1429. model_list = await get_all_models()
  1430. models = {model["id"]: model for model in model_list}
  1431. model_id = form_data["model"]
  1432. if model_id not in models:
  1433. raise HTTPException(
  1434. status_code=status.HTTP_404_NOT_FOUND,
  1435. detail="Model not found",
  1436. )
  1437. # Check if the user has a custom task model
  1438. # If the user has a custom task model, use that model
  1439. task_model_id = get_task_model_id(
  1440. model_id,
  1441. app.state.config.TASK_MODEL,
  1442. app.state.config.TASK_MODEL_EXTERNAL,
  1443. models,
  1444. )
  1445. log.debug(
  1446. f"generating chat title using model {task_model_id} for user {user.email} "
  1447. )
  1448. if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
  1449. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  1450. else:
  1451. template = """Create a concise, 3-5 word title with an emoji as a title for the chat history, in the given language. Suitable Emojis for the summary can be used to enhance understanding but avoid quotation marks or special formatting. RESPOND ONLY WITH THE TITLE TEXT.
  1452. Examples of titles:
  1453. 📉 Stock Market Trends
  1454. 🍪 Perfect Chocolate Chip Recipe
  1455. Evolution of Music Streaming
  1456. Remote Work Productivity Tips
  1457. Artificial Intelligence in Healthcare
  1458. 🎮 Video Game Development Insights
  1459. <chat_history>
  1460. {{MESSAGES:END:2}}
  1461. </chat_history>"""
  1462. content = title_generation_template(
  1463. template,
  1464. form_data["messages"],
  1465. {
  1466. "name": user.name,
  1467. "location": user.info.get("location") if user.info else None,
  1468. },
  1469. )
  1470. payload = {
  1471. "model": task_model_id,
  1472. "messages": [{"role": "user", "content": content}],
  1473. "stream": False,
  1474. **(
  1475. {"max_tokens": 50}
  1476. if models[task_model_id]["owned_by"] == "ollama"
  1477. else {
  1478. "max_completion_tokens": 50,
  1479. }
  1480. ),
  1481. "metadata": {
  1482. "task": str(TASKS.TITLE_GENERATION),
  1483. "task_body": form_data,
  1484. "chat_id": form_data.get("chat_id", None),
  1485. },
  1486. }
  1487. # Handle pipeline filters
  1488. try:
  1489. payload = filter_pipeline(payload, user, models)
  1490. except Exception as e:
  1491. if len(e.args) > 1:
  1492. return JSONResponse(
  1493. status_code=e.args[0],
  1494. content={"detail": e.args[1]},
  1495. )
  1496. else:
  1497. return JSONResponse(
  1498. status_code=status.HTTP_400_BAD_REQUEST,
  1499. content={"detail": str(e)},
  1500. )
  1501. if "chat_id" in payload:
  1502. del payload["chat_id"]
  1503. return await generate_chat_completions(form_data=payload, user=user)
  1504. @app.post("/api/task/tags/completions")
  1505. async def generate_chat_tags(form_data: dict, user=Depends(get_verified_user)):
  1506. if not app.state.config.ENABLE_TAGS_GENERATION:
  1507. return JSONResponse(
  1508. status_code=status.HTTP_200_OK,
  1509. content={"detail": "Tags generation is disabled"},
  1510. )
  1511. model_list = await get_all_models()
  1512. models = {model["id"]: model for model in model_list}
  1513. model_id = form_data["model"]
  1514. if model_id not in models:
  1515. raise HTTPException(
  1516. status_code=status.HTTP_404_NOT_FOUND,
  1517. detail="Model not found",
  1518. )
  1519. # Check if the user has a custom task model
  1520. # If the user has a custom task model, use that model
  1521. task_model_id = get_task_model_id(
  1522. model_id,
  1523. app.state.config.TASK_MODEL,
  1524. app.state.config.TASK_MODEL_EXTERNAL,
  1525. models,
  1526. )
  1527. log.debug(
  1528. f"generating chat tags using model {task_model_id} for user {user.email} "
  1529. )
  1530. if app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE != "":
  1531. template = app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE
  1532. else:
  1533. template = """### Task:
  1534. Generate 1-3 broad tags categorizing the main themes of the chat history, along with 1-3 more specific subtopic tags.
  1535. ### Guidelines:
  1536. - Start with high-level domains (e.g. Science, Technology, Philosophy, Arts, Politics, Business, Health, Sports, Entertainment, Education)
  1537. - Consider including relevant subfields/subdomains if they are strongly represented throughout the conversation
  1538. - If content is too short (less than 3 messages) or too diverse, use only ["General"]
  1539. - Use the chat's primary language; default to English if multilingual
  1540. - Prioritize accuracy over specificity
  1541. ### Output:
  1542. JSON format: { "tags": ["tag1", "tag2", "tag3"] }
  1543. ### Chat History:
  1544. <chat_history>
  1545. {{MESSAGES:END:6}}
  1546. </chat_history>"""
  1547. content = tags_generation_template(
  1548. template, form_data["messages"], {"name": user.name}
  1549. )
  1550. payload = {
  1551. "model": task_model_id,
  1552. "messages": [{"role": "user", "content": content}],
  1553. "stream": False,
  1554. "metadata": {
  1555. "task": str(TASKS.TAGS_GENERATION),
  1556. "task_body": form_data,
  1557. "chat_id": form_data.get("chat_id", None),
  1558. },
  1559. }
  1560. # Handle pipeline filters
  1561. try:
  1562. payload = filter_pipeline(payload, user, models)
  1563. except Exception as e:
  1564. if len(e.args) > 1:
  1565. return JSONResponse(
  1566. status_code=e.args[0],
  1567. content={"detail": e.args[1]},
  1568. )
  1569. else:
  1570. return JSONResponse(
  1571. status_code=status.HTTP_400_BAD_REQUEST,
  1572. content={"detail": str(e)},
  1573. )
  1574. if "chat_id" in payload:
  1575. del payload["chat_id"]
  1576. return await generate_chat_completions(form_data=payload, user=user)
  1577. @app.post("/api/task/queries/completions")
  1578. async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
  1579. type = form_data.get("type")
  1580. if type == "web_search":
  1581. if not app.state.config.ENABLE_SEARCH_QUERY_GENERATION:
  1582. raise HTTPException(
  1583. status_code=status.HTTP_400_BAD_REQUEST,
  1584. detail=f"Search query generation is disabled",
  1585. )
  1586. elif type == "retrieval":
  1587. if not app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION:
  1588. raise HTTPException(
  1589. status_code=status.HTTP_400_BAD_REQUEST,
  1590. detail=f"Query generation is disabled",
  1591. )
  1592. model_list = await get_all_models()
  1593. models = {model["id"]: model for model in model_list}
  1594. model_id = form_data["model"]
  1595. if model_id not in models:
  1596. raise HTTPException(
  1597. status_code=status.HTTP_404_NOT_FOUND,
  1598. detail="Model not found",
  1599. )
  1600. # Check if the user has a custom task model
  1601. # If the user has a custom task model, use that model
  1602. task_model_id = get_task_model_id(
  1603. model_id,
  1604. app.state.config.TASK_MODEL,
  1605. app.state.config.TASK_MODEL_EXTERNAL,
  1606. models,
  1607. )
  1608. log.debug(
  1609. f"generating {type} queries using model {task_model_id} for user {user.email}"
  1610. )
  1611. if app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE != "":
  1612. template = app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE
  1613. else:
  1614. template = DEFAULT_QUERY_GENERATION_PROMPT_TEMPLATE
  1615. content = query_generation_template(
  1616. template, form_data["messages"], {"name": user.name}
  1617. )
  1618. payload = {
  1619. "model": task_model_id,
  1620. "messages": [{"role": "user", "content": content}],
  1621. "stream": False,
  1622. "metadata": {
  1623. "task": str(TASKS.QUERY_GENERATION),
  1624. "task_body": form_data,
  1625. "chat_id": form_data.get("chat_id", None),
  1626. },
  1627. }
  1628. # Handle pipeline filters
  1629. try:
  1630. payload = filter_pipeline(payload, user, models)
  1631. except Exception as e:
  1632. if len(e.args) > 1:
  1633. return JSONResponse(
  1634. status_code=e.args[0],
  1635. content={"detail": e.args[1]},
  1636. )
  1637. else:
  1638. return JSONResponse(
  1639. status_code=status.HTTP_400_BAD_REQUEST,
  1640. content={"detail": str(e)},
  1641. )
  1642. if "chat_id" in payload:
  1643. del payload["chat_id"]
  1644. return await generate_chat_completions(form_data=payload, user=user)
  1645. @app.post("/api/task/emoji/completions")
  1646. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  1647. model_list = await get_all_models()
  1648. models = {model["id"]: model for model in model_list}
  1649. model_id = form_data["model"]
  1650. if model_id not in models:
  1651. raise HTTPException(
  1652. status_code=status.HTTP_404_NOT_FOUND,
  1653. detail="Model not found",
  1654. )
  1655. # Check if the user has a custom task model
  1656. # If the user has a custom task model, use that model
  1657. task_model_id = get_task_model_id(
  1658. model_id,
  1659. app.state.config.TASK_MODEL,
  1660. app.state.config.TASK_MODEL_EXTERNAL,
  1661. models,
  1662. )
  1663. log.debug(f"generating emoji using model {task_model_id} for user {user.email} ")
  1664. template = '''
  1665. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  1666. Message: """{{prompt}}"""
  1667. '''
  1668. content = emoji_generation_template(
  1669. template,
  1670. form_data["prompt"],
  1671. {
  1672. "name": user.name,
  1673. "location": user.info.get("location") if user.info else None,
  1674. },
  1675. )
  1676. payload = {
  1677. "model": task_model_id,
  1678. "messages": [{"role": "user", "content": content}],
  1679. "stream": False,
  1680. **(
  1681. {"max_tokens": 4}
  1682. if models[task_model_id]["owned_by"] == "ollama"
  1683. else {
  1684. "max_completion_tokens": 4,
  1685. }
  1686. ),
  1687. "chat_id": form_data.get("chat_id", None),
  1688. "metadata": {"task": str(TASKS.EMOJI_GENERATION), "task_body": form_data},
  1689. }
  1690. # Handle pipeline filters
  1691. try:
  1692. payload = filter_pipeline(payload, user, models)
  1693. except Exception as e:
  1694. if len(e.args) > 1:
  1695. return JSONResponse(
  1696. status_code=e.args[0],
  1697. content={"detail": e.args[1]},
  1698. )
  1699. else:
  1700. return JSONResponse(
  1701. status_code=status.HTTP_400_BAD_REQUEST,
  1702. content={"detail": str(e)},
  1703. )
  1704. if "chat_id" in payload:
  1705. del payload["chat_id"]
  1706. return await generate_chat_completions(form_data=payload, user=user)
  1707. @app.post("/api/task/moa/completions")
  1708. async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)):
  1709. model_list = await get_all_models()
  1710. models = {model["id"]: model for model in model_list}
  1711. model_id = form_data["model"]
  1712. if model_id not in models:
  1713. raise HTTPException(
  1714. status_code=status.HTTP_404_NOT_FOUND,
  1715. detail="Model not found",
  1716. )
  1717. # Check if the user has a custom task model
  1718. # If the user has a custom task model, use that model
  1719. task_model_id = get_task_model_id(
  1720. model_id,
  1721. app.state.config.TASK_MODEL,
  1722. app.state.config.TASK_MODEL_EXTERNAL,
  1723. models,
  1724. )
  1725. log.debug(f"generating MOA model {task_model_id} for user {user.email} ")
  1726. template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
  1727. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.
  1728. Responses from models: {{responses}}"""
  1729. content = moa_response_generation_template(
  1730. template,
  1731. form_data["prompt"],
  1732. form_data["responses"],
  1733. )
  1734. payload = {
  1735. "model": task_model_id,
  1736. "messages": [{"role": "user", "content": content}],
  1737. "stream": form_data.get("stream", False),
  1738. "chat_id": form_data.get("chat_id", None),
  1739. "metadata": {
  1740. "task": str(TASKS.MOA_RESPONSE_GENERATION),
  1741. "task_body": form_data,
  1742. },
  1743. }
  1744. try:
  1745. payload = filter_pipeline(payload, user, models)
  1746. except Exception as e:
  1747. if len(e.args) > 1:
  1748. return JSONResponse(
  1749. status_code=e.args[0],
  1750. content={"detail": e.args[1]},
  1751. )
  1752. else:
  1753. return JSONResponse(
  1754. status_code=status.HTTP_400_BAD_REQUEST,
  1755. content={"detail": str(e)},
  1756. )
  1757. if "chat_id" in payload:
  1758. del payload["chat_id"]
  1759. return await generate_chat_completions(form_data=payload, user=user)
  1760. ##################################
  1761. #
  1762. # Pipelines Endpoints
  1763. #
  1764. ##################################
  1765. # TODO: Refactor pipelines API endpoints below into a separate file
  1766. @app.get("/api/pipelines/list")
  1767. async def get_pipelines_list(user=Depends(get_admin_user)):
  1768. responses = await get_openai_models_responses()
  1769. log.debug(f"get_pipelines_list: get_openai_models_responses returned {responses}")
  1770. urlIdxs = [
  1771. idx
  1772. for idx, response in enumerate(responses)
  1773. if response is not None and "pipelines" in response
  1774. ]
  1775. return {
  1776. "data": [
  1777. {
  1778. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  1779. "idx": urlIdx,
  1780. }
  1781. for urlIdx in urlIdxs
  1782. ]
  1783. }
  1784. @app.post("/api/pipelines/upload")
  1785. async def upload_pipeline(
  1786. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  1787. ):
  1788. print("upload_pipeline", urlIdx, file.filename)
  1789. # Check if the uploaded file is a python file
  1790. if not (file.filename and file.filename.endswith(".py")):
  1791. raise HTTPException(
  1792. status_code=status.HTTP_400_BAD_REQUEST,
  1793. detail="Only Python (.py) files are allowed.",
  1794. )
  1795. upload_folder = f"{CACHE_DIR}/pipelines"
  1796. os.makedirs(upload_folder, exist_ok=True)
  1797. file_path = os.path.join(upload_folder, file.filename)
  1798. r = None
  1799. try:
  1800. # Save the uploaded file
  1801. with open(file_path, "wb") as buffer:
  1802. shutil.copyfileobj(file.file, buffer)
  1803. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1804. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1805. headers = {"Authorization": f"Bearer {key}"}
  1806. with open(file_path, "rb") as f:
  1807. files = {"file": f}
  1808. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  1809. r.raise_for_status()
  1810. data = r.json()
  1811. return {**data}
  1812. except Exception as e:
  1813. # Handle connection error here
  1814. print(f"Connection error: {e}")
  1815. detail = "Pipeline not found"
  1816. status_code = status.HTTP_404_NOT_FOUND
  1817. if r is not None:
  1818. status_code = r.status_code
  1819. try:
  1820. res = r.json()
  1821. if "detail" in res:
  1822. detail = res["detail"]
  1823. except Exception:
  1824. pass
  1825. raise HTTPException(
  1826. status_code=status_code,
  1827. detail=detail,
  1828. )
  1829. finally:
  1830. # Ensure the file is deleted after the upload is completed or on failure
  1831. if os.path.exists(file_path):
  1832. os.remove(file_path)
  1833. class AddPipelineForm(BaseModel):
  1834. url: str
  1835. urlIdx: int
  1836. @app.post("/api/pipelines/add")
  1837. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  1838. r = None
  1839. try:
  1840. urlIdx = form_data.urlIdx
  1841. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1842. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1843. headers = {"Authorization": f"Bearer {key}"}
  1844. r = requests.post(
  1845. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  1846. )
  1847. r.raise_for_status()
  1848. data = r.json()
  1849. return {**data}
  1850. except Exception as e:
  1851. # Handle connection error here
  1852. print(f"Connection error: {e}")
  1853. detail = "Pipeline not found"
  1854. if r is not None:
  1855. try:
  1856. res = r.json()
  1857. if "detail" in res:
  1858. detail = res["detail"]
  1859. except Exception:
  1860. pass
  1861. raise HTTPException(
  1862. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1863. detail=detail,
  1864. )
  1865. class DeletePipelineForm(BaseModel):
  1866. id: str
  1867. urlIdx: int
  1868. @app.delete("/api/pipelines/delete")
  1869. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1870. r = None
  1871. try:
  1872. urlIdx = form_data.urlIdx
  1873. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1874. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1875. headers = {"Authorization": f"Bearer {key}"}
  1876. r = requests.delete(
  1877. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1878. )
  1879. r.raise_for_status()
  1880. data = r.json()
  1881. return {**data}
  1882. except Exception as e:
  1883. # Handle connection error here
  1884. print(f"Connection error: {e}")
  1885. detail = "Pipeline not found"
  1886. if r is not None:
  1887. try:
  1888. res = r.json()
  1889. if "detail" in res:
  1890. detail = res["detail"]
  1891. except Exception:
  1892. pass
  1893. raise HTTPException(
  1894. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1895. detail=detail,
  1896. )
  1897. @app.get("/api/pipelines")
  1898. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1899. r = None
  1900. try:
  1901. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1902. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1903. headers = {"Authorization": f"Bearer {key}"}
  1904. r = requests.get(f"{url}/pipelines", headers=headers)
  1905. r.raise_for_status()
  1906. data = r.json()
  1907. return {**data}
  1908. except Exception as e:
  1909. # Handle connection error here
  1910. print(f"Connection error: {e}")
  1911. detail = "Pipeline not found"
  1912. if r is not None:
  1913. try:
  1914. res = r.json()
  1915. if "detail" in res:
  1916. detail = res["detail"]
  1917. except Exception:
  1918. pass
  1919. raise HTTPException(
  1920. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1921. detail=detail,
  1922. )
  1923. @app.get("/api/pipelines/{pipeline_id}/valves")
  1924. async def get_pipeline_valves(
  1925. urlIdx: Optional[int],
  1926. pipeline_id: str,
  1927. user=Depends(get_admin_user),
  1928. ):
  1929. r = None
  1930. try:
  1931. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1932. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1933. headers = {"Authorization": f"Bearer {key}"}
  1934. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1935. r.raise_for_status()
  1936. data = r.json()
  1937. return {**data}
  1938. except Exception as e:
  1939. # Handle connection error here
  1940. print(f"Connection error: {e}")
  1941. detail = "Pipeline not found"
  1942. if r is not None:
  1943. try:
  1944. res = r.json()
  1945. if "detail" in res:
  1946. detail = res["detail"]
  1947. except Exception:
  1948. pass
  1949. raise HTTPException(
  1950. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1951. detail=detail,
  1952. )
  1953. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1954. async def get_pipeline_valves_spec(
  1955. urlIdx: Optional[int],
  1956. pipeline_id: str,
  1957. user=Depends(get_admin_user),
  1958. ):
  1959. r = None
  1960. try:
  1961. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1962. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1963. headers = {"Authorization": f"Bearer {key}"}
  1964. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1965. r.raise_for_status()
  1966. data = r.json()
  1967. return {**data}
  1968. except Exception as e:
  1969. # Handle connection error here
  1970. print(f"Connection error: {e}")
  1971. detail = "Pipeline not found"
  1972. if r is not None:
  1973. try:
  1974. res = r.json()
  1975. if "detail" in res:
  1976. detail = res["detail"]
  1977. except Exception:
  1978. pass
  1979. raise HTTPException(
  1980. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1981. detail=detail,
  1982. )
  1983. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1984. async def update_pipeline_valves(
  1985. urlIdx: Optional[int],
  1986. pipeline_id: str,
  1987. form_data: dict,
  1988. user=Depends(get_admin_user),
  1989. ):
  1990. r = None
  1991. try:
  1992. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1993. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1994. headers = {"Authorization": f"Bearer {key}"}
  1995. r = requests.post(
  1996. f"{url}/{pipeline_id}/valves/update",
  1997. headers=headers,
  1998. json={**form_data},
  1999. )
  2000. r.raise_for_status()
  2001. data = r.json()
  2002. return {**data}
  2003. except Exception as e:
  2004. # Handle connection error here
  2005. print(f"Connection error: {e}")
  2006. detail = "Pipeline not found"
  2007. if r is not None:
  2008. try:
  2009. res = r.json()
  2010. if "detail" in res:
  2011. detail = res["detail"]
  2012. except Exception:
  2013. pass
  2014. raise HTTPException(
  2015. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  2016. detail=detail,
  2017. )
  2018. ##################################
  2019. #
  2020. # Config Endpoints
  2021. #
  2022. ##################################
  2023. @app.get("/api/config")
  2024. async def get_app_config(request: Request):
  2025. user = None
  2026. if "token" in request.cookies:
  2027. token = request.cookies.get("token")
  2028. try:
  2029. data = decode_token(token)
  2030. except Exception as e:
  2031. log.debug(e)
  2032. raise HTTPException(
  2033. status_code=status.HTTP_401_UNAUTHORIZED,
  2034. detail="Invalid token",
  2035. )
  2036. if data is not None and "id" in data:
  2037. user = Users.get_user_by_id(data["id"])
  2038. onboarding = False
  2039. if user is None:
  2040. user_count = Users.get_num_users()
  2041. onboarding = user_count == 0
  2042. return {
  2043. **({"onboarding": True} if onboarding else {}),
  2044. "status": True,
  2045. "name": WEBUI_NAME,
  2046. "version": VERSION,
  2047. "default_locale": str(DEFAULT_LOCALE),
  2048. "oauth": {
  2049. "providers": {
  2050. name: config.get("name", name)
  2051. for name, config in OAUTH_PROVIDERS.items()
  2052. }
  2053. },
  2054. "features": {
  2055. "auth": WEBUI_AUTH,
  2056. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  2057. "enable_ldap": webui_app.state.config.ENABLE_LDAP,
  2058. "enable_api_key": webui_app.state.config.ENABLE_API_KEY,
  2059. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  2060. "enable_login_form": webui_app.state.config.ENABLE_LOGIN_FORM,
  2061. **(
  2062. {
  2063. "enable_web_search": retrieval_app.state.config.ENABLE_RAG_WEB_SEARCH,
  2064. "enable_image_generation": images_app.state.config.ENABLED,
  2065. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  2066. "enable_message_rating": webui_app.state.config.ENABLE_MESSAGE_RATING,
  2067. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  2068. "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS,
  2069. }
  2070. if user is not None
  2071. else {}
  2072. ),
  2073. },
  2074. **(
  2075. {
  2076. "default_models": webui_app.state.config.DEFAULT_MODELS,
  2077. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  2078. "audio": {
  2079. "tts": {
  2080. "engine": audio_app.state.config.TTS_ENGINE,
  2081. "voice": audio_app.state.config.TTS_VOICE,
  2082. "split_on": audio_app.state.config.TTS_SPLIT_ON,
  2083. },
  2084. "stt": {
  2085. "engine": audio_app.state.config.STT_ENGINE,
  2086. },
  2087. },
  2088. "file": {
  2089. "max_size": retrieval_app.state.config.FILE_MAX_SIZE,
  2090. "max_count": retrieval_app.state.config.FILE_MAX_COUNT,
  2091. },
  2092. "permissions": {**webui_app.state.config.USER_PERMISSIONS},
  2093. }
  2094. if user is not None
  2095. else {}
  2096. ),
  2097. }
  2098. # TODO: webhook endpoint should be under config endpoints
  2099. @app.get("/api/webhook")
  2100. async def get_webhook_url(user=Depends(get_admin_user)):
  2101. return {
  2102. "url": app.state.config.WEBHOOK_URL,
  2103. }
  2104. class UrlForm(BaseModel):
  2105. url: str
  2106. @app.post("/api/webhook")
  2107. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  2108. app.state.config.WEBHOOK_URL = form_data.url
  2109. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  2110. return {"url": app.state.config.WEBHOOK_URL}
  2111. @app.get("/api/version")
  2112. async def get_app_version():
  2113. return {
  2114. "version": VERSION,
  2115. }
  2116. @app.get("/api/changelog")
  2117. async def get_app_changelog():
  2118. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  2119. @app.get("/api/version/updates")
  2120. async def get_app_latest_release_version():
  2121. if OFFLINE_MODE:
  2122. log.debug(
  2123. f"Offline mode is enabled, returning current version as latest version"
  2124. )
  2125. return {"current": VERSION, "latest": VERSION}
  2126. try:
  2127. timeout = aiohttp.ClientTimeout(total=1)
  2128. async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
  2129. async with session.get(
  2130. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  2131. ) as response:
  2132. response.raise_for_status()
  2133. data = await response.json()
  2134. latest_version = data["tag_name"]
  2135. return {"current": VERSION, "latest": latest_version[1:]}
  2136. except Exception as e:
  2137. log.debug(e)
  2138. return {"current": VERSION, "latest": VERSION}
  2139. ############################
  2140. # OAuth Login & Callback
  2141. ############################
  2142. # SessionMiddleware is used by authlib for oauth
  2143. if len(OAUTH_PROVIDERS) > 0:
  2144. app.add_middleware(
  2145. SessionMiddleware,
  2146. secret_key=WEBUI_SECRET_KEY,
  2147. session_cookie="oui-session",
  2148. same_site=WEBUI_SESSION_COOKIE_SAME_SITE,
  2149. https_only=WEBUI_SESSION_COOKIE_SECURE,
  2150. )
  2151. @app.get("/oauth/{provider}/login")
  2152. async def oauth_login(provider: str, request: Request):
  2153. return await oauth_manager.handle_login(provider, request)
  2154. # OAuth login logic is as follows:
  2155. # 1. Attempt to find a user with matching subject ID, tied to the provider
  2156. # 2. If OAUTH_MERGE_ACCOUNTS_BY_EMAIL is true, find a user with the email address provided via OAuth
  2157. # - This is considered insecure in general, as OAuth providers do not always verify email addresses
  2158. # 3. If there is no user, and ENABLE_OAUTH_SIGNUP is true, create a user
  2159. # - Email addresses are considered unique, so we fail registration if the email address is already taken
  2160. @app.get("/oauth/{provider}/callback")
  2161. async def oauth_callback(provider: str, request: Request, response: Response):
  2162. return await oauth_manager.handle_callback(provider, request, response)
  2163. @app.get("/manifest.json")
  2164. async def get_manifest_json():
  2165. return {
  2166. "name": WEBUI_NAME,
  2167. "short_name": WEBUI_NAME,
  2168. "description": "Open WebUI is an open, extensible, user-friendly interface for AI that adapts to your workflow.",
  2169. "start_url": "/",
  2170. "display": "standalone",
  2171. "background_color": "#343541",
  2172. "orientation": "natural",
  2173. "icons": [
  2174. {
  2175. "src": "/static/logo.png",
  2176. "type": "image/png",
  2177. "sizes": "500x500",
  2178. "purpose": "any",
  2179. },
  2180. {
  2181. "src": "/static/logo.png",
  2182. "type": "image/png",
  2183. "sizes": "500x500",
  2184. "purpose": "maskable",
  2185. },
  2186. ],
  2187. }
  2188. @app.get("/opensearch.xml")
  2189. async def get_opensearch_xml():
  2190. xml_content = rf"""
  2191. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  2192. <ShortName>{WEBUI_NAME}</ShortName>
  2193. <Description>Search {WEBUI_NAME}</Description>
  2194. <InputEncoding>UTF-8</InputEncoding>
  2195. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/static/favicon.png</Image>
  2196. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  2197. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  2198. </OpenSearchDescription>
  2199. """
  2200. return Response(content=xml_content, media_type="application/xml")
  2201. @app.get("/health")
  2202. async def healthcheck():
  2203. return {"status": True}
  2204. @app.get("/health/db")
  2205. async def healthcheck_with_db():
  2206. Session.execute(text("SELECT 1;")).all()
  2207. return {"status": True}
  2208. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  2209. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  2210. if os.path.exists(FRONTEND_BUILD_DIR):
  2211. mimetypes.add_type("text/javascript", ".js")
  2212. app.mount(
  2213. "/",
  2214. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  2215. name="spa-static-files",
  2216. )
  2217. else:
  2218. log.warning(
  2219. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  2220. )