main.py 76 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385
  1. import asyncio
  2. import inspect
  3. import json
  4. import logging
  5. import mimetypes
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. from contextlib import asynccontextmanager
  11. from typing import Optional
  12. import aiohttp
  13. import requests
  14. from fastapi import (
  15. Depends,
  16. FastAPI,
  17. File,
  18. Form,
  19. HTTPException,
  20. Request,
  21. UploadFile,
  22. status,
  23. )
  24. from fastapi.middleware.cors import CORSMiddleware
  25. from fastapi.responses import JSONResponse
  26. from fastapi.staticfiles import StaticFiles
  27. from pydantic import BaseModel
  28. from sqlalchemy import text
  29. from starlette.exceptions import HTTPException as StarletteHTTPException
  30. from starlette.middleware.base import BaseHTTPMiddleware
  31. from starlette.middleware.sessions import SessionMiddleware
  32. from starlette.responses import Response, StreamingResponse
  33. from open_webui.apps.audio.main import app as audio_app
  34. from open_webui.apps.images.main import app as images_app
  35. from open_webui.apps.ollama.main import (
  36. app as ollama_app,
  37. get_all_models as get_ollama_models,
  38. generate_chat_completion as generate_ollama_chat_completion,
  39. GenerateChatCompletionForm,
  40. )
  41. from open_webui.apps.openai.main import (
  42. app as openai_app,
  43. generate_chat_completion as generate_openai_chat_completion,
  44. get_all_models as get_openai_models,
  45. )
  46. from open_webui.apps.retrieval.main import app as retrieval_app
  47. from open_webui.apps.retrieval.utils import get_rag_context, rag_template
  48. from open_webui.apps.socket.main import (
  49. app as socket_app,
  50. periodic_usage_pool_cleanup,
  51. get_event_call,
  52. get_event_emitter,
  53. )
  54. from open_webui.apps.webui.internal.db import Session
  55. from open_webui.apps.webui.main import (
  56. app as webui_app,
  57. generate_function_chat_completion,
  58. get_pipe_models,
  59. )
  60. from open_webui.apps.webui.models.functions import Functions
  61. from open_webui.apps.webui.models.models import Models
  62. from open_webui.apps.webui.models.users import UserModel, Users
  63. from open_webui.apps.webui.utils import load_function_module_by_id
  64. from open_webui.config import (
  65. CACHE_DIR,
  66. CORS_ALLOW_ORIGIN,
  67. DEFAULT_LOCALE,
  68. ENABLE_ADMIN_CHAT_ACCESS,
  69. ENABLE_ADMIN_EXPORT,
  70. ENABLE_MODEL_FILTER,
  71. ENABLE_OLLAMA_API,
  72. ENABLE_OPENAI_API,
  73. ENV,
  74. FRONTEND_BUILD_DIR,
  75. MODEL_FILTER_LIST,
  76. OAUTH_PROVIDERS,
  77. ENABLE_SEARCH_QUERY,
  78. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  79. STATIC_DIR,
  80. TASK_MODEL,
  81. TASK_MODEL_EXTERNAL,
  82. TITLE_GENERATION_PROMPT_TEMPLATE,
  83. TAGS_GENERATION_PROMPT_TEMPLATE,
  84. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  85. WEBHOOK_URL,
  86. WEBUI_AUTH,
  87. WEBUI_NAME,
  88. AppConfig,
  89. reset_config,
  90. )
  91. from open_webui.constants import TASKS
  92. from open_webui.env import (
  93. CHANGELOG,
  94. GLOBAL_LOG_LEVEL,
  95. SAFE_MODE,
  96. SRC_LOG_LEVELS,
  97. VERSION,
  98. WEBUI_BUILD_HASH,
  99. WEBUI_SECRET_KEY,
  100. WEBUI_SESSION_COOKIE_SAME_SITE,
  101. WEBUI_SESSION_COOKIE_SECURE,
  102. WEBUI_URL,
  103. RESET_CONFIG_ON_START,
  104. OFFLINE_MODE,
  105. )
  106. from open_webui.utils.misc import (
  107. add_or_update_system_message,
  108. get_last_user_message,
  109. prepend_to_first_user_message_content,
  110. )
  111. from open_webui.utils.oauth import oauth_manager
  112. from open_webui.utils.payload import convert_payload_openai_to_ollama
  113. from open_webui.utils.response import (
  114. convert_response_ollama_to_openai,
  115. convert_streaming_response_ollama_to_openai,
  116. )
  117. from open_webui.utils.security_headers import SecurityHeadersMiddleware
  118. from open_webui.utils.task import (
  119. moa_response_generation_template,
  120. tags_generation_template,
  121. search_query_generation_template,
  122. title_generation_template,
  123. tools_function_calling_generation_template,
  124. )
  125. from open_webui.utils.tools import get_tools
  126. from open_webui.utils.utils import (
  127. decode_token,
  128. get_admin_user,
  129. get_current_user,
  130. get_http_authorization_cred,
  131. get_verified_user,
  132. )
  133. if SAFE_MODE:
  134. print("SAFE MODE ENABLED")
  135. Functions.deactivate_all_functions()
  136. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  137. log = logging.getLogger(__name__)
  138. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  139. class SPAStaticFiles(StaticFiles):
  140. async def get_response(self, path: str, scope):
  141. try:
  142. return await super().get_response(path, scope)
  143. except (HTTPException, StarletteHTTPException) as ex:
  144. if ex.status_code == 404:
  145. return await super().get_response("index.html", scope)
  146. else:
  147. raise ex
  148. print(
  149. rf"""
  150. ___ __ __ _ _ _ ___
  151. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  152. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  153. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  154. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  155. |_|
  156. v{VERSION} - building the best open-source AI user interface.
  157. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  158. https://github.com/open-webui/open-webui
  159. """
  160. )
  161. @asynccontextmanager
  162. async def lifespan(app: FastAPI):
  163. if RESET_CONFIG_ON_START:
  164. reset_config()
  165. asyncio.create_task(periodic_usage_pool_cleanup())
  166. yield
  167. app = FastAPI(
  168. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  169. )
  170. app.state.config = AppConfig()
  171. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  172. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  173. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  174. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  175. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  176. app.state.config.TASK_MODEL = TASK_MODEL
  177. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  178. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  179. app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE = TAGS_GENERATION_PROMPT_TEMPLATE
  180. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  181. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  182. )
  183. app.state.config.ENABLE_SEARCH_QUERY = ENABLE_SEARCH_QUERY
  184. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  185. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  186. )
  187. app.state.MODELS = {}
  188. ##################################
  189. #
  190. # ChatCompletion Middleware
  191. #
  192. ##################################
  193. def get_task_model_id(default_model_id):
  194. # Set the task model
  195. task_model_id = default_model_id
  196. # Check if the user has a custom task model and use that model
  197. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  198. if (
  199. app.state.config.TASK_MODEL
  200. and app.state.config.TASK_MODEL in app.state.MODELS
  201. ):
  202. task_model_id = app.state.config.TASK_MODEL
  203. else:
  204. if (
  205. app.state.config.TASK_MODEL_EXTERNAL
  206. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  207. ):
  208. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  209. return task_model_id
  210. def get_filter_function_ids(model):
  211. def get_priority(function_id):
  212. function = Functions.get_function_by_id(function_id)
  213. if function is not None and hasattr(function, "valves"):
  214. # TODO: Fix FunctionModel
  215. return (function.valves if function.valves else {}).get("priority", 0)
  216. return 0
  217. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  218. if "info" in model and "meta" in model["info"]:
  219. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  220. filter_ids = list(set(filter_ids))
  221. enabled_filter_ids = [
  222. function.id
  223. for function in Functions.get_functions_by_type("filter", active_only=True)
  224. ]
  225. filter_ids = [
  226. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  227. ]
  228. filter_ids.sort(key=get_priority)
  229. return filter_ids
  230. async def chat_completion_filter_functions_handler(body, model, extra_params):
  231. skip_files = None
  232. filter_ids = get_filter_function_ids(model)
  233. for filter_id in filter_ids:
  234. filter = Functions.get_function_by_id(filter_id)
  235. if not filter:
  236. continue
  237. if filter_id in webui_app.state.FUNCTIONS:
  238. function_module = webui_app.state.FUNCTIONS[filter_id]
  239. else:
  240. function_module, _, _ = load_function_module_by_id(filter_id)
  241. webui_app.state.FUNCTIONS[filter_id] = function_module
  242. # Check if the function has a file_handler variable
  243. if hasattr(function_module, "file_handler"):
  244. skip_files = function_module.file_handler
  245. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  246. valves = Functions.get_function_valves_by_id(filter_id)
  247. function_module.valves = function_module.Valves(
  248. **(valves if valves else {})
  249. )
  250. if not hasattr(function_module, "inlet"):
  251. continue
  252. try:
  253. inlet = function_module.inlet
  254. # Get the signature of the function
  255. sig = inspect.signature(inlet)
  256. params = {"body": body} | {
  257. k: v
  258. for k, v in {
  259. **extra_params,
  260. "__model__": model,
  261. "__id__": filter_id,
  262. }.items()
  263. if k in sig.parameters
  264. }
  265. if "__user__" in params and hasattr(function_module, "UserValves"):
  266. try:
  267. params["__user__"]["valves"] = function_module.UserValves(
  268. **Functions.get_user_valves_by_id_and_user_id(
  269. filter_id, params["__user__"]["id"]
  270. )
  271. )
  272. except Exception as e:
  273. print(e)
  274. if inspect.iscoroutinefunction(inlet):
  275. body = await inlet(**params)
  276. else:
  277. body = inlet(**params)
  278. except Exception as e:
  279. print(f"Error: {e}")
  280. raise e
  281. if skip_files and "files" in body.get("metadata", {}):
  282. del body["metadata"]["files"]
  283. return body, {}
  284. def get_tools_function_calling_payload(messages, task_model_id, content):
  285. user_message = get_last_user_message(messages)
  286. history = "\n".join(
  287. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  288. for message in messages[::-1][:4]
  289. )
  290. prompt = f"History:\n{history}\nQuery: {user_message}"
  291. return {
  292. "model": task_model_id,
  293. "messages": [
  294. {"role": "system", "content": content},
  295. {"role": "user", "content": f"Query: {prompt}"},
  296. ],
  297. "stream": False,
  298. "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
  299. }
  300. async def get_content_from_response(response) -> Optional[str]:
  301. content = None
  302. if hasattr(response, "body_iterator"):
  303. async for chunk in response.body_iterator:
  304. data = json.loads(chunk.decode("utf-8"))
  305. content = data["choices"][0]["message"]["content"]
  306. # Cleanup any remaining background tasks if necessary
  307. if response.background is not None:
  308. await response.background()
  309. else:
  310. content = response["choices"][0]["message"]["content"]
  311. return content
  312. async def chat_completion_tools_handler(
  313. body: dict, user: UserModel, extra_params: dict
  314. ) -> tuple[dict, dict]:
  315. # If tool_ids field is present, call the functions
  316. metadata = body.get("metadata", {})
  317. tool_ids = metadata.get("tool_ids", None)
  318. log.debug(f"{tool_ids=}")
  319. if not tool_ids:
  320. return body, {}
  321. skip_files = False
  322. contexts = []
  323. citations = []
  324. task_model_id = get_task_model_id(body["model"])
  325. tools = get_tools(
  326. webui_app,
  327. tool_ids,
  328. user,
  329. {
  330. **extra_params,
  331. "__model__": app.state.MODELS[task_model_id],
  332. "__messages__": body["messages"],
  333. "__files__": metadata.get("files", []),
  334. },
  335. )
  336. log.info(f"{tools=}")
  337. specs = [tool["spec"] for tool in tools.values()]
  338. tools_specs = json.dumps(specs)
  339. if app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE != "":
  340. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  341. else:
  342. template = """Available Tools: {{TOOLS}}\nReturn an empty string if no tools match the query. If a function tool matches, construct and return a JSON object in the format {\"name\": \"functionName\", \"parameters\": {\"requiredFunctionParamKey\": \"requiredFunctionParamValue\"}} using the appropriate tool and its parameters. Only return the object and limit the response to the JSON object without additional text."""
  343. tools_function_calling_prompt = tools_function_calling_generation_template(
  344. template, tools_specs
  345. )
  346. log.info(f"{tools_function_calling_prompt=}")
  347. payload = get_tools_function_calling_payload(
  348. body["messages"], task_model_id, tools_function_calling_prompt
  349. )
  350. try:
  351. payload = filter_pipeline(payload, user)
  352. except Exception as e:
  353. raise e
  354. try:
  355. response = await generate_chat_completions(form_data=payload, user=user)
  356. log.debug(f"{response=}")
  357. content = await get_content_from_response(response)
  358. log.debug(f"{content=}")
  359. if not content:
  360. return body, {}
  361. try:
  362. content = content[content.find("{") : content.rfind("}") + 1]
  363. if not content:
  364. raise Exception("No JSON object found in the response")
  365. result = json.loads(content)
  366. tool_function_name = result.get("name", None)
  367. if tool_function_name not in tools:
  368. return body, {}
  369. tool_function_params = result.get("parameters", {})
  370. try:
  371. tool_output = await tools[tool_function_name]["callable"](
  372. **tool_function_params
  373. )
  374. except Exception as e:
  375. tool_output = str(e)
  376. if tools[tool_function_name]["citation"]:
  377. citations.append(
  378. {
  379. "source": {
  380. "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  381. },
  382. "document": [tool_output],
  383. "metadata": [{"source": tool_function_name}],
  384. }
  385. )
  386. if tools[tool_function_name]["file_handler"]:
  387. skip_files = True
  388. if isinstance(tool_output, str):
  389. contexts.append(tool_output)
  390. except Exception as e:
  391. log.exception(f"Error: {e}")
  392. content = None
  393. except Exception as e:
  394. log.exception(f"Error: {e}")
  395. content = None
  396. log.debug(f"tool_contexts: {contexts}")
  397. if skip_files and "files" in body.get("metadata", {}):
  398. del body["metadata"]["files"]
  399. return body, {"contexts": contexts, "citations": citations}
  400. async def chat_completion_files_handler(body) -> tuple[dict, dict[str, list]]:
  401. contexts = []
  402. citations = []
  403. if files := body.get("metadata", {}).get("files", None):
  404. contexts, citations = get_rag_context(
  405. files=files,
  406. messages=body["messages"],
  407. embedding_function=retrieval_app.state.EMBEDDING_FUNCTION,
  408. k=retrieval_app.state.config.TOP_K,
  409. reranking_function=retrieval_app.state.sentence_transformer_rf,
  410. r=retrieval_app.state.config.RELEVANCE_THRESHOLD,
  411. hybrid_search=retrieval_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  412. )
  413. log.debug(f"rag_contexts: {contexts}, citations: {citations}")
  414. return body, {"contexts": contexts, "citations": citations}
  415. def is_chat_completion_request(request):
  416. return request.method == "POST" and any(
  417. endpoint in request.url.path
  418. for endpoint in ["/ollama/api/chat", "/chat/completions"]
  419. )
  420. async def get_body_and_model_and_user(request):
  421. # Read the original request body
  422. body = await request.body()
  423. body_str = body.decode("utf-8")
  424. body = json.loads(body_str) if body_str else {}
  425. model_id = body["model"]
  426. if model_id not in app.state.MODELS:
  427. raise Exception("Model not found")
  428. model = app.state.MODELS[model_id]
  429. user = get_current_user(
  430. request,
  431. get_http_authorization_cred(request.headers.get("Authorization")),
  432. )
  433. return body, model, user
  434. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  435. async def dispatch(self, request: Request, call_next):
  436. if not is_chat_completion_request(request):
  437. return await call_next(request)
  438. log.debug(f"request.url.path: {request.url.path}")
  439. try:
  440. body, model, user = await get_body_and_model_and_user(request)
  441. except Exception as e:
  442. return JSONResponse(
  443. status_code=status.HTTP_400_BAD_REQUEST,
  444. content={"detail": str(e)},
  445. )
  446. metadata = {
  447. "chat_id": body.pop("chat_id", None),
  448. "message_id": body.pop("id", None),
  449. "session_id": body.pop("session_id", None),
  450. "tool_ids": body.get("tool_ids", None),
  451. "files": body.get("files", None),
  452. }
  453. body["metadata"] = metadata
  454. extra_params = {
  455. "__event_emitter__": get_event_emitter(metadata),
  456. "__event_call__": get_event_call(metadata),
  457. "__user__": {
  458. "id": user.id,
  459. "email": user.email,
  460. "name": user.name,
  461. "role": user.role,
  462. },
  463. }
  464. # Initialize data_items to store additional data to be sent to the client
  465. # Initialize contexts and citation
  466. data_items = []
  467. contexts = []
  468. citations = []
  469. try:
  470. body, flags = await chat_completion_filter_functions_handler(
  471. body, model, extra_params
  472. )
  473. except Exception as e:
  474. return JSONResponse(
  475. status_code=status.HTTP_400_BAD_REQUEST,
  476. content={"detail": str(e)},
  477. )
  478. metadata = {
  479. **metadata,
  480. "tool_ids": body.pop("tool_ids", None),
  481. "files": body.pop("files", None),
  482. }
  483. body["metadata"] = metadata
  484. try:
  485. body, flags = await chat_completion_tools_handler(body, user, extra_params)
  486. contexts.extend(flags.get("contexts", []))
  487. citations.extend(flags.get("citations", []))
  488. except Exception as e:
  489. log.exception(e)
  490. try:
  491. body, flags = await chat_completion_files_handler(body)
  492. contexts.extend(flags.get("contexts", []))
  493. citations.extend(flags.get("citations", []))
  494. except Exception as e:
  495. log.exception(e)
  496. # If context is not empty, insert it into the messages
  497. if len(contexts) > 0:
  498. context_string = "/n".join(contexts).strip()
  499. prompt = get_last_user_message(body["messages"])
  500. if prompt is None:
  501. raise Exception("No user message found")
  502. if (
  503. retrieval_app.state.config.RELEVANCE_THRESHOLD == 0
  504. and context_string.strip() == ""
  505. ):
  506. log.debug(
  507. f"With a 0 relevancy threshold for RAG, the context cannot be empty"
  508. )
  509. # Workaround for Ollama 2.0+ system prompt issue
  510. # TODO: replace with add_or_update_system_message
  511. if model["owned_by"] == "ollama":
  512. body["messages"] = prepend_to_first_user_message_content(
  513. rag_template(
  514. retrieval_app.state.config.RAG_TEMPLATE, context_string, prompt
  515. ),
  516. body["messages"],
  517. )
  518. else:
  519. body["messages"] = add_or_update_system_message(
  520. rag_template(
  521. retrieval_app.state.config.RAG_TEMPLATE, context_string, prompt
  522. ),
  523. body["messages"],
  524. )
  525. # If there are citations, add them to the data_items
  526. if len(citations) > 0:
  527. data_items.append({"citations": citations})
  528. modified_body_bytes = json.dumps(body).encode("utf-8")
  529. # Replace the request body with the modified one
  530. request._body = modified_body_bytes
  531. # Set custom header to ensure content-length matches new body length
  532. request.headers.__dict__["_list"] = [
  533. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  534. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  535. ]
  536. response = await call_next(request)
  537. if not isinstance(response, StreamingResponse):
  538. return response
  539. content_type = response.headers["Content-Type"]
  540. is_openai = "text/event-stream" in content_type
  541. is_ollama = "application/x-ndjson" in content_type
  542. if not is_openai and not is_ollama:
  543. return response
  544. def wrap_item(item):
  545. return f"data: {item}\n\n" if is_openai else f"{item}\n"
  546. async def stream_wrapper(original_generator, data_items):
  547. for item in data_items:
  548. yield wrap_item(json.dumps(item))
  549. async for data in original_generator:
  550. yield data
  551. return StreamingResponse(
  552. stream_wrapper(response.body_iterator, data_items),
  553. headers=dict(response.headers),
  554. )
  555. async def _receive(self, body: bytes):
  556. return {"type": "http.request", "body": body, "more_body": False}
  557. app.add_middleware(ChatCompletionMiddleware)
  558. ##################################
  559. #
  560. # Pipeline Middleware
  561. #
  562. ##################################
  563. def get_sorted_filters(model_id):
  564. filters = [
  565. model
  566. for model in app.state.MODELS.values()
  567. if "pipeline" in model
  568. and "type" in model["pipeline"]
  569. and model["pipeline"]["type"] == "filter"
  570. and (
  571. model["pipeline"]["pipelines"] == ["*"]
  572. or any(
  573. model_id == target_model_id
  574. for target_model_id in model["pipeline"]["pipelines"]
  575. )
  576. )
  577. ]
  578. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  579. return sorted_filters
  580. def filter_pipeline(payload, user):
  581. user = {"id": user.id, "email": user.email, "name": user.name, "role": user.role}
  582. model_id = payload["model"]
  583. sorted_filters = get_sorted_filters(model_id)
  584. model = app.state.MODELS[model_id]
  585. if "pipeline" in model:
  586. sorted_filters.append(model)
  587. for filter in sorted_filters:
  588. r = None
  589. try:
  590. urlIdx = filter["urlIdx"]
  591. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  592. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  593. if key == "":
  594. continue
  595. headers = {"Authorization": f"Bearer {key}"}
  596. r = requests.post(
  597. f"{url}/{filter['id']}/filter/inlet",
  598. headers=headers,
  599. json={
  600. "user": user,
  601. "body": payload,
  602. },
  603. )
  604. r.raise_for_status()
  605. payload = r.json()
  606. except Exception as e:
  607. # Handle connection error here
  608. print(f"Connection error: {e}")
  609. if r is not None:
  610. res = r.json()
  611. if "detail" in res:
  612. raise Exception(r.status_code, res["detail"])
  613. return payload
  614. class PipelineMiddleware(BaseHTTPMiddleware):
  615. async def dispatch(self, request: Request, call_next):
  616. if not is_chat_completion_request(request):
  617. return await call_next(request)
  618. log.debug(f"request.url.path: {request.url.path}")
  619. # Read the original request body
  620. body = await request.body()
  621. # Decode body to string
  622. body_str = body.decode("utf-8")
  623. # Parse string to JSON
  624. data = json.loads(body_str) if body_str else {}
  625. try:
  626. user = get_current_user(
  627. request,
  628. get_http_authorization_cred(request.headers["Authorization"]),
  629. )
  630. except KeyError as e:
  631. if len(e.args) > 1:
  632. return JSONResponse(
  633. status_code=e.args[0],
  634. content={"detail": e.args[1]},
  635. )
  636. else:
  637. return JSONResponse(
  638. status_code=status.HTTP_401_UNAUTHORIZED,
  639. content={"detail": "Not authenticated"},
  640. )
  641. try:
  642. data = filter_pipeline(data, user)
  643. except Exception as e:
  644. if len(e.args) > 1:
  645. return JSONResponse(
  646. status_code=e.args[0],
  647. content={"detail": e.args[1]},
  648. )
  649. else:
  650. return JSONResponse(
  651. status_code=status.HTTP_400_BAD_REQUEST,
  652. content={"detail": str(e)},
  653. )
  654. modified_body_bytes = json.dumps(data).encode("utf-8")
  655. # Replace the request body with the modified one
  656. request._body = modified_body_bytes
  657. # Set custom header to ensure content-length matches new body length
  658. request.headers.__dict__["_list"] = [
  659. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  660. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  661. ]
  662. response = await call_next(request)
  663. return response
  664. async def _receive(self, body: bytes):
  665. return {"type": "http.request", "body": body, "more_body": False}
  666. app.add_middleware(PipelineMiddleware)
  667. from urllib.parse import urlencode, parse_qs, urlparse
  668. class RedirectMiddleware(BaseHTTPMiddleware):
  669. async def dispatch(self, request: Request, call_next):
  670. # Check if the request is a GET request
  671. if request.method == "GET":
  672. path = request.url.path
  673. query_params = dict(parse_qs(urlparse(str(request.url)).query))
  674. # Check for the specific watch path and the presence of 'v' parameter
  675. if path.endswith("/watch") and "v" in query_params:
  676. video_id = query_params["v"][0] # Extract the first 'v' parameter
  677. encoded_video_id = urlencode({"youtube": video_id})
  678. redirect_url = f"/?{encoded_video_id}"
  679. return RedirectResponse(url=redirect_url)
  680. # Proceed with the normal flow of other requests
  681. response = await call_next(request)
  682. return response
  683. # Add the middleware to the app
  684. app.add_middleware(RedirectMiddleware)
  685. app.add_middleware(
  686. CORSMiddleware,
  687. allow_origins=CORS_ALLOW_ORIGIN,
  688. allow_credentials=True,
  689. allow_methods=["*"],
  690. allow_headers=["*"],
  691. )
  692. app.add_middleware(SecurityHeadersMiddleware)
  693. @app.middleware("http")
  694. async def commit_session_after_request(request: Request, call_next):
  695. response = await call_next(request)
  696. log.debug("Commit session after request")
  697. Session.commit()
  698. return response
  699. @app.middleware("http")
  700. async def check_url(request: Request, call_next):
  701. if len(app.state.MODELS) == 0:
  702. await get_all_models()
  703. else:
  704. pass
  705. start_time = int(time.time())
  706. response = await call_next(request)
  707. process_time = int(time.time()) - start_time
  708. response.headers["X-Process-Time"] = str(process_time)
  709. return response
  710. @app.middleware("http")
  711. async def update_embedding_function(request: Request, call_next):
  712. response = await call_next(request)
  713. if "/embedding/update" in request.url.path:
  714. webui_app.state.EMBEDDING_FUNCTION = retrieval_app.state.EMBEDDING_FUNCTION
  715. return response
  716. @app.middleware("http")
  717. async def inspect_websocket(request: Request, call_next):
  718. if (
  719. "/ws/socket.io" in request.url.path
  720. and request.query_params.get("transport") == "websocket"
  721. ):
  722. upgrade = (request.headers.get("Upgrade") or "").lower()
  723. connection = (request.headers.get("Connection") or "").lower().split(",")
  724. # Check that there's the correct headers for an upgrade, else reject the connection
  725. # This is to work around this upstream issue: https://github.com/miguelgrinberg/python-engineio/issues/367
  726. if upgrade != "websocket" or "upgrade" not in connection:
  727. return JSONResponse(
  728. status_code=status.HTTP_400_BAD_REQUEST,
  729. content={"detail": "Invalid WebSocket upgrade request"},
  730. )
  731. return await call_next(request)
  732. app.mount("/ws", socket_app)
  733. app.mount("/ollama", ollama_app)
  734. app.mount("/openai", openai_app)
  735. app.mount("/images/api/v1", images_app)
  736. app.mount("/audio/api/v1", audio_app)
  737. app.mount("/retrieval/api/v1", retrieval_app)
  738. app.mount("/api/v1", webui_app)
  739. webui_app.state.EMBEDDING_FUNCTION = retrieval_app.state.EMBEDDING_FUNCTION
  740. async def get_all_models():
  741. # TODO: Optimize this function
  742. pipe_models = []
  743. openai_models = []
  744. ollama_models = []
  745. pipe_models = await get_pipe_models()
  746. if app.state.config.ENABLE_OPENAI_API:
  747. openai_models = await get_openai_models()
  748. openai_models = openai_models["data"]
  749. if app.state.config.ENABLE_OLLAMA_API:
  750. ollama_models = await get_ollama_models()
  751. ollama_models = [
  752. {
  753. "id": model["model"],
  754. "name": model["name"],
  755. "object": "model",
  756. "created": int(time.time()),
  757. "owned_by": "ollama",
  758. "ollama": model,
  759. }
  760. for model in ollama_models["models"]
  761. ]
  762. models = pipe_models + openai_models + ollama_models
  763. global_action_ids = [
  764. function.id for function in Functions.get_global_action_functions()
  765. ]
  766. enabled_action_ids = [
  767. function.id
  768. for function in Functions.get_functions_by_type("action", active_only=True)
  769. ]
  770. custom_models = Models.get_all_models()
  771. for custom_model in custom_models:
  772. if custom_model.base_model_id is None:
  773. for model in models:
  774. if (
  775. custom_model.id == model["id"]
  776. or custom_model.id == model["id"].split(":")[0]
  777. ):
  778. model["name"] = custom_model.name
  779. model["info"] = custom_model.model_dump()
  780. action_ids = []
  781. if "info" in model and "meta" in model["info"]:
  782. action_ids.extend(model["info"]["meta"].get("actionIds", []))
  783. model["action_ids"] = action_ids
  784. else:
  785. owned_by = "openai"
  786. pipe = None
  787. action_ids = []
  788. for model in models:
  789. if (
  790. custom_model.base_model_id == model["id"]
  791. or custom_model.base_model_id == model["id"].split(":")[0]
  792. ):
  793. owned_by = model["owned_by"]
  794. if "pipe" in model:
  795. pipe = model["pipe"]
  796. break
  797. if custom_model.meta:
  798. meta = custom_model.meta.model_dump()
  799. if "actionIds" in meta:
  800. action_ids.extend(meta["actionIds"])
  801. models.append(
  802. {
  803. "id": custom_model.id,
  804. "name": custom_model.name,
  805. "object": "model",
  806. "created": custom_model.created_at,
  807. "owned_by": owned_by,
  808. "info": custom_model.model_dump(),
  809. "preset": True,
  810. **({"pipe": pipe} if pipe is not None else {}),
  811. "action_ids": action_ids,
  812. }
  813. )
  814. for model in models:
  815. action_ids = []
  816. if "action_ids" in model:
  817. action_ids = model["action_ids"]
  818. del model["action_ids"]
  819. action_ids = action_ids + global_action_ids
  820. action_ids = list(set(action_ids))
  821. action_ids = [
  822. action_id for action_id in action_ids if action_id in enabled_action_ids
  823. ]
  824. model["actions"] = []
  825. for action_id in action_ids:
  826. action = Functions.get_function_by_id(action_id)
  827. if action is None:
  828. raise Exception(f"Action not found: {action_id}")
  829. if action_id in webui_app.state.FUNCTIONS:
  830. function_module = webui_app.state.FUNCTIONS[action_id]
  831. else:
  832. function_module, _, _ = load_function_module_by_id(action_id)
  833. webui_app.state.FUNCTIONS[action_id] = function_module
  834. __webui__ = False
  835. if hasattr(function_module, "__webui__"):
  836. __webui__ = function_module.__webui__
  837. if hasattr(function_module, "actions"):
  838. actions = function_module.actions
  839. model["actions"].extend(
  840. [
  841. {
  842. "id": f"{action_id}.{_action['id']}",
  843. "name": _action.get(
  844. "name", f"{action.name} ({_action['id']})"
  845. ),
  846. "description": action.meta.description,
  847. "icon_url": _action.get(
  848. "icon_url", action.meta.manifest.get("icon_url", None)
  849. ),
  850. **({"__webui__": __webui__} if __webui__ else {}),
  851. }
  852. for _action in actions
  853. ]
  854. )
  855. else:
  856. model["actions"].append(
  857. {
  858. "id": action_id,
  859. "name": action.name,
  860. "description": action.meta.description,
  861. "icon_url": action.meta.manifest.get("icon_url", None),
  862. **({"__webui__": __webui__} if __webui__ else {}),
  863. }
  864. )
  865. app.state.MODELS = {model["id"]: model for model in models}
  866. webui_app.state.MODELS = app.state.MODELS
  867. return models
  868. @app.get("/api/models")
  869. async def get_models(user=Depends(get_verified_user)):
  870. models = await get_all_models()
  871. # Filter out filter pipelines
  872. models = [
  873. model
  874. for model in models
  875. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  876. ]
  877. if app.state.config.ENABLE_MODEL_FILTER:
  878. if user.role == "user":
  879. models = list(
  880. filter(
  881. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  882. models,
  883. )
  884. )
  885. return {"data": models}
  886. return {"data": models}
  887. @app.post("/api/chat/completions")
  888. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  889. model_id = form_data["model"]
  890. if model_id not in app.state.MODELS:
  891. raise HTTPException(
  892. status_code=status.HTTP_404_NOT_FOUND,
  893. detail="Model not found",
  894. )
  895. if app.state.config.ENABLE_MODEL_FILTER:
  896. if user.role == "user" and model_id not in app.state.config.MODEL_FILTER_LIST:
  897. raise HTTPException(
  898. status_code=status.HTTP_403_FORBIDDEN,
  899. detail="Model not found",
  900. )
  901. model = app.state.MODELS[model_id]
  902. if model.get("pipe"):
  903. return await generate_function_chat_completion(form_data, user=user)
  904. if model["owned_by"] == "ollama":
  905. # Using /ollama/api/chat endpoint
  906. form_data = convert_payload_openai_to_ollama(form_data)
  907. form_data = GenerateChatCompletionForm(**form_data)
  908. response = await generate_ollama_chat_completion(form_data=form_data, user=user)
  909. if form_data.stream:
  910. response.headers["content-type"] = "text/event-stream"
  911. return StreamingResponse(
  912. convert_streaming_response_ollama_to_openai(response),
  913. headers=dict(response.headers),
  914. )
  915. else:
  916. return convert_response_ollama_to_openai(response)
  917. else:
  918. return await generate_openai_chat_completion(form_data, user=user)
  919. @app.post("/api/chat/completed")
  920. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  921. data = form_data
  922. model_id = data["model"]
  923. if model_id not in app.state.MODELS:
  924. raise HTTPException(
  925. status_code=status.HTTP_404_NOT_FOUND,
  926. detail="Model not found",
  927. )
  928. model = app.state.MODELS[model_id]
  929. sorted_filters = get_sorted_filters(model_id)
  930. if "pipeline" in model:
  931. sorted_filters = [model] + sorted_filters
  932. for filter in sorted_filters:
  933. r = None
  934. try:
  935. urlIdx = filter["urlIdx"]
  936. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  937. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  938. if key != "":
  939. headers = {"Authorization": f"Bearer {key}"}
  940. r = requests.post(
  941. f"{url}/{filter['id']}/filter/outlet",
  942. headers=headers,
  943. json={
  944. "user": {
  945. "id": user.id,
  946. "name": user.name,
  947. "email": user.email,
  948. "role": user.role,
  949. },
  950. "body": data,
  951. },
  952. )
  953. r.raise_for_status()
  954. data = r.json()
  955. except Exception as e:
  956. # Handle connection error here
  957. print(f"Connection error: {e}")
  958. if r is not None:
  959. try:
  960. res = r.json()
  961. if "detail" in res:
  962. return JSONResponse(
  963. status_code=r.status_code,
  964. content=res,
  965. )
  966. except Exception:
  967. pass
  968. else:
  969. pass
  970. __event_emitter__ = get_event_emitter(
  971. {
  972. "chat_id": data["chat_id"],
  973. "message_id": data["id"],
  974. "session_id": data["session_id"],
  975. }
  976. )
  977. __event_call__ = get_event_call(
  978. {
  979. "chat_id": data["chat_id"],
  980. "message_id": data["id"],
  981. "session_id": data["session_id"],
  982. }
  983. )
  984. def get_priority(function_id):
  985. function = Functions.get_function_by_id(function_id)
  986. if function is not None and hasattr(function, "valves"):
  987. # TODO: Fix FunctionModel to include vavles
  988. return (function.valves if function.valves else {}).get("priority", 0)
  989. return 0
  990. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  991. if "info" in model and "meta" in model["info"]:
  992. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  993. filter_ids = list(set(filter_ids))
  994. enabled_filter_ids = [
  995. function.id
  996. for function in Functions.get_functions_by_type("filter", active_only=True)
  997. ]
  998. filter_ids = [
  999. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  1000. ]
  1001. # Sort filter_ids by priority, using the get_priority function
  1002. filter_ids.sort(key=get_priority)
  1003. for filter_id in filter_ids:
  1004. filter = Functions.get_function_by_id(filter_id)
  1005. if not filter:
  1006. continue
  1007. if filter_id in webui_app.state.FUNCTIONS:
  1008. function_module = webui_app.state.FUNCTIONS[filter_id]
  1009. else:
  1010. function_module, _, _ = load_function_module_by_id(filter_id)
  1011. webui_app.state.FUNCTIONS[filter_id] = function_module
  1012. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  1013. valves = Functions.get_function_valves_by_id(filter_id)
  1014. function_module.valves = function_module.Valves(
  1015. **(valves if valves else {})
  1016. )
  1017. if not hasattr(function_module, "outlet"):
  1018. continue
  1019. try:
  1020. outlet = function_module.outlet
  1021. # Get the signature of the function
  1022. sig = inspect.signature(outlet)
  1023. params = {"body": data}
  1024. # Extra parameters to be passed to the function
  1025. extra_params = {
  1026. "__model__": model,
  1027. "__id__": filter_id,
  1028. "__event_emitter__": __event_emitter__,
  1029. "__event_call__": __event_call__,
  1030. }
  1031. # Add extra params in contained in function signature
  1032. for key, value in extra_params.items():
  1033. if key in sig.parameters:
  1034. params[key] = value
  1035. if "__user__" in sig.parameters:
  1036. __user__ = {
  1037. "id": user.id,
  1038. "email": user.email,
  1039. "name": user.name,
  1040. "role": user.role,
  1041. }
  1042. try:
  1043. if hasattr(function_module, "UserValves"):
  1044. __user__["valves"] = function_module.UserValves(
  1045. **Functions.get_user_valves_by_id_and_user_id(
  1046. filter_id, user.id
  1047. )
  1048. )
  1049. except Exception as e:
  1050. print(e)
  1051. params = {**params, "__user__": __user__}
  1052. if inspect.iscoroutinefunction(outlet):
  1053. data = await outlet(**params)
  1054. else:
  1055. data = outlet(**params)
  1056. except Exception as e:
  1057. print(f"Error: {e}")
  1058. return JSONResponse(
  1059. status_code=status.HTTP_400_BAD_REQUEST,
  1060. content={"detail": str(e)},
  1061. )
  1062. return data
  1063. @app.post("/api/chat/actions/{action_id}")
  1064. async def chat_action(action_id: str, form_data: dict, user=Depends(get_verified_user)):
  1065. if "." in action_id:
  1066. action_id, sub_action_id = action_id.split(".")
  1067. else:
  1068. sub_action_id = None
  1069. action = Functions.get_function_by_id(action_id)
  1070. if not action:
  1071. raise HTTPException(
  1072. status_code=status.HTTP_404_NOT_FOUND,
  1073. detail="Action not found",
  1074. )
  1075. data = form_data
  1076. model_id = data["model"]
  1077. if model_id not in app.state.MODELS:
  1078. raise HTTPException(
  1079. status_code=status.HTTP_404_NOT_FOUND,
  1080. detail="Model not found",
  1081. )
  1082. model = app.state.MODELS[model_id]
  1083. __event_emitter__ = get_event_emitter(
  1084. {
  1085. "chat_id": data["chat_id"],
  1086. "message_id": data["id"],
  1087. "session_id": data["session_id"],
  1088. }
  1089. )
  1090. __event_call__ = get_event_call(
  1091. {
  1092. "chat_id": data["chat_id"],
  1093. "message_id": data["id"],
  1094. "session_id": data["session_id"],
  1095. }
  1096. )
  1097. if action_id in webui_app.state.FUNCTIONS:
  1098. function_module = webui_app.state.FUNCTIONS[action_id]
  1099. else:
  1100. function_module, _, _ = load_function_module_by_id(action_id)
  1101. webui_app.state.FUNCTIONS[action_id] = function_module
  1102. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  1103. valves = Functions.get_function_valves_by_id(action_id)
  1104. function_module.valves = function_module.Valves(**(valves if valves else {}))
  1105. if hasattr(function_module, "action"):
  1106. try:
  1107. action = function_module.action
  1108. # Get the signature of the function
  1109. sig = inspect.signature(action)
  1110. params = {"body": data}
  1111. # Extra parameters to be passed to the function
  1112. extra_params = {
  1113. "__model__": model,
  1114. "__id__": sub_action_id if sub_action_id is not None else action_id,
  1115. "__event_emitter__": __event_emitter__,
  1116. "__event_call__": __event_call__,
  1117. }
  1118. # Add extra params in contained in function signature
  1119. for key, value in extra_params.items():
  1120. if key in sig.parameters:
  1121. params[key] = value
  1122. if "__user__" in sig.parameters:
  1123. __user__ = {
  1124. "id": user.id,
  1125. "email": user.email,
  1126. "name": user.name,
  1127. "role": user.role,
  1128. }
  1129. try:
  1130. if hasattr(function_module, "UserValves"):
  1131. __user__["valves"] = function_module.UserValves(
  1132. **Functions.get_user_valves_by_id_and_user_id(
  1133. action_id, user.id
  1134. )
  1135. )
  1136. except Exception as e:
  1137. print(e)
  1138. params = {**params, "__user__": __user__}
  1139. if inspect.iscoroutinefunction(action):
  1140. data = await action(**params)
  1141. else:
  1142. data = action(**params)
  1143. except Exception as e:
  1144. print(f"Error: {e}")
  1145. return JSONResponse(
  1146. status_code=status.HTTP_400_BAD_REQUEST,
  1147. content={"detail": str(e)},
  1148. )
  1149. return data
  1150. ##################################
  1151. #
  1152. # Task Endpoints
  1153. #
  1154. ##################################
  1155. # TODO: Refactor task API endpoints below into a separate file
  1156. @app.get("/api/task/config")
  1157. async def get_task_config(user=Depends(get_verified_user)):
  1158. return {
  1159. "TASK_MODEL": app.state.config.TASK_MODEL,
  1160. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1161. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1162. "TAGS_GENERATION_PROMPT_TEMPLATE": app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE,
  1163. "ENABLE_SEARCH_QUERY": app.state.config.ENABLE_SEARCH_QUERY,
  1164. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  1165. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1166. }
  1167. class TaskConfigForm(BaseModel):
  1168. TASK_MODEL: Optional[str]
  1169. TASK_MODEL_EXTERNAL: Optional[str]
  1170. TITLE_GENERATION_PROMPT_TEMPLATE: str
  1171. TAGS_GENERATION_PROMPT_TEMPLATE: str
  1172. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  1173. ENABLE_SEARCH_QUERY: bool
  1174. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  1175. @app.post("/api/task/config/update")
  1176. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  1177. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  1178. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  1179. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  1180. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  1181. )
  1182. app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE = (
  1183. form_data.TAGS_GENERATION_PROMPT_TEMPLATE
  1184. )
  1185. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  1186. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  1187. )
  1188. app.state.config.ENABLE_SEARCH_QUERY = form_data.ENABLE_SEARCH_QUERY
  1189. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  1190. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  1191. )
  1192. return {
  1193. "TASK_MODEL": app.state.config.TASK_MODEL,
  1194. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1195. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1196. "TAGS_GENERATION_PROMPT_TEMPLATE": app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE,
  1197. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  1198. "ENABLE_SEARCH_QUERY": app.state.config.ENABLE_SEARCH_QUERY,
  1199. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1200. }
  1201. @app.post("/api/task/title/completions")
  1202. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  1203. print("generate_title")
  1204. model_id = form_data["model"]
  1205. if model_id not in app.state.MODELS:
  1206. raise HTTPException(
  1207. status_code=status.HTTP_404_NOT_FOUND,
  1208. detail="Model not found",
  1209. )
  1210. # Check if the user has a custom task model
  1211. # If the user has a custom task model, use that model
  1212. task_model_id = get_task_model_id(model_id)
  1213. print(task_model_id)
  1214. model = app.state.MODELS[task_model_id]
  1215. if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
  1216. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  1217. else:
  1218. template = """Create a concise, 3-5 word title with an emoji as a title for the prompt in the given language. Suitable Emojis for the summary can be used to enhance understanding but avoid quotation marks or special formatting. RESPOND ONLY WITH THE TITLE TEXT.
  1219. Examples of titles:
  1220. 📉 Stock Market Trends
  1221. 🍪 Perfect Chocolate Chip Recipe
  1222. Evolution of Music Streaming
  1223. Remote Work Productivity Tips
  1224. Artificial Intelligence in Healthcare
  1225. 🎮 Video Game Development Insights
  1226. Prompt: {{prompt:middletruncate:8000}}"""
  1227. content = title_generation_template(
  1228. template,
  1229. form_data["prompt"],
  1230. {
  1231. "name": user.name,
  1232. "location": user.info.get("location") if user.info else None,
  1233. },
  1234. )
  1235. payload = {
  1236. "model": task_model_id,
  1237. "messages": [{"role": "user", "content": content}],
  1238. "stream": False,
  1239. **(
  1240. {"max_tokens": 50}
  1241. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1242. else {
  1243. "max_completion_tokens": 50,
  1244. }
  1245. ),
  1246. "chat_id": form_data.get("chat_id", None),
  1247. "metadata": {"task": str(TASKS.TITLE_GENERATION), "task_body": form_data},
  1248. }
  1249. log.debug(payload)
  1250. # Handle pipeline filters
  1251. try:
  1252. payload = filter_pipeline(payload, user)
  1253. except Exception as e:
  1254. if len(e.args) > 1:
  1255. return JSONResponse(
  1256. status_code=e.args[0],
  1257. content={"detail": e.args[1]},
  1258. )
  1259. else:
  1260. return JSONResponse(
  1261. status_code=status.HTTP_400_BAD_REQUEST,
  1262. content={"detail": str(e)},
  1263. )
  1264. if "chat_id" in payload:
  1265. del payload["chat_id"]
  1266. return await generate_chat_completions(form_data=payload, user=user)
  1267. @app.post("/api/task/tags/completions")
  1268. async def generate_chat_tags(form_data: dict, user=Depends(get_verified_user)):
  1269. print("generate_chat_tags")
  1270. model_id = form_data["model"]
  1271. if model_id not in app.state.MODELS:
  1272. raise HTTPException(
  1273. status_code=status.HTTP_404_NOT_FOUND,
  1274. detail="Model not found",
  1275. )
  1276. # Check if the user has a custom task model
  1277. # If the user has a custom task model, use that model
  1278. task_model_id = get_task_model_id(model_id)
  1279. print(task_model_id)
  1280. if app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE != "":
  1281. template = app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE
  1282. else:
  1283. template = """### Task:
  1284. Generate 1-3 broad tags categorizing the main themes of the chat history, along with 1-3 more specific subtopic tags.
  1285. ### Guidelines:
  1286. - Start with high-level domains (e.g. Science, Technology, Philosophy, Arts, Politics, Business, Health, Sports, Entertainment, Education)
  1287. - Consider including relevant subfields/subdomains if they are strongly represented throughout the conversation
  1288. - If content is too short (less than 3 messages) or too diverse, use only ["General"]
  1289. - Use the chat's primary language; default to English if multilingual
  1290. - Prioritize accuracy over specificity
  1291. ### Output:
  1292. JSON format: { "tags": ["tag1", "tag2", "tag3"] }
  1293. ### Chat History:
  1294. <chat_history>
  1295. {{MESSAGES:END:6}}
  1296. </chat_history>"""
  1297. content = tags_generation_template(
  1298. template, form_data["messages"], {"name": user.name}
  1299. )
  1300. print("content", content)
  1301. payload = {
  1302. "model": task_model_id,
  1303. "messages": [{"role": "user", "content": content}],
  1304. "stream": False,
  1305. "metadata": {"task": str(TASKS.TAGS_GENERATION), "task_body": form_data},
  1306. }
  1307. log.debug(payload)
  1308. # Handle pipeline filters
  1309. try:
  1310. payload = filter_pipeline(payload, user)
  1311. except Exception as e:
  1312. if len(e.args) > 1:
  1313. return JSONResponse(
  1314. status_code=e.args[0],
  1315. content={"detail": e.args[1]},
  1316. )
  1317. else:
  1318. return JSONResponse(
  1319. status_code=status.HTTP_400_BAD_REQUEST,
  1320. content={"detail": str(e)},
  1321. )
  1322. if "chat_id" in payload:
  1323. del payload["chat_id"]
  1324. return await generate_chat_completions(form_data=payload, user=user)
  1325. @app.post("/api/task/query/completions")
  1326. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  1327. print("generate_search_query")
  1328. if not app.state.config.ENABLE_SEARCH_QUERY:
  1329. raise HTTPException(
  1330. status_code=status.HTTP_400_BAD_REQUEST,
  1331. detail=f"Search query generation is disabled",
  1332. )
  1333. model_id = form_data["model"]
  1334. if model_id not in app.state.MODELS:
  1335. raise HTTPException(
  1336. status_code=status.HTTP_404_NOT_FOUND,
  1337. detail="Model not found",
  1338. )
  1339. # Check if the user has a custom task model
  1340. # If the user has a custom task model, use that model
  1341. task_model_id = get_task_model_id(model_id)
  1342. print(task_model_id)
  1343. model = app.state.MODELS[task_model_id]
  1344. if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
  1345. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  1346. else:
  1347. template = """Given the user's message and interaction history, decide if a web search is necessary. You must be concise and exclusively provide a search query if one is necessary. Refrain from verbose responses or any additional commentary. Prefer suggesting a search if uncertain to provide comprehensive or updated information. If a search isn't needed at all, respond with an empty string. Default to a search query when in doubt. Today's date is {{CURRENT_DATE}}.
  1348. User Message:
  1349. {{prompt:end:4000}}
  1350. Interaction History:
  1351. {{MESSAGES:END:6}}
  1352. Search Query:"""
  1353. content = search_query_generation_template(
  1354. template, form_data["messages"], {"name": user.name}
  1355. )
  1356. print("content", content)
  1357. payload = {
  1358. "model": task_model_id,
  1359. "messages": [{"role": "user", "content": content}],
  1360. "stream": False,
  1361. **(
  1362. {"max_tokens": 30}
  1363. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1364. else {
  1365. "max_completion_tokens": 30,
  1366. }
  1367. ),
  1368. "metadata": {"task": str(TASKS.QUERY_GENERATION), "task_body": form_data},
  1369. }
  1370. log.debug(payload)
  1371. # Handle pipeline filters
  1372. try:
  1373. payload = filter_pipeline(payload, user)
  1374. except Exception as e:
  1375. if len(e.args) > 1:
  1376. return JSONResponse(
  1377. status_code=e.args[0],
  1378. content={"detail": e.args[1]},
  1379. )
  1380. else:
  1381. return JSONResponse(
  1382. status_code=status.HTTP_400_BAD_REQUEST,
  1383. content={"detail": str(e)},
  1384. )
  1385. if "chat_id" in payload:
  1386. del payload["chat_id"]
  1387. return await generate_chat_completions(form_data=payload, user=user)
  1388. @app.post("/api/task/emoji/completions")
  1389. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  1390. print("generate_emoji")
  1391. model_id = form_data["model"]
  1392. if model_id not in app.state.MODELS:
  1393. raise HTTPException(
  1394. status_code=status.HTTP_404_NOT_FOUND,
  1395. detail="Model not found",
  1396. )
  1397. # Check if the user has a custom task model
  1398. # If the user has a custom task model, use that model
  1399. task_model_id = get_task_model_id(model_id)
  1400. print(task_model_id)
  1401. model = app.state.MODELS[task_model_id]
  1402. template = '''
  1403. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  1404. Message: """{{prompt}}"""
  1405. '''
  1406. content = title_generation_template(
  1407. template,
  1408. form_data["prompt"],
  1409. {
  1410. "name": user.name,
  1411. "location": user.info.get("location") if user.info else None,
  1412. },
  1413. )
  1414. payload = {
  1415. "model": task_model_id,
  1416. "messages": [{"role": "user", "content": content}],
  1417. "stream": False,
  1418. **(
  1419. {"max_tokens": 4}
  1420. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1421. else {
  1422. "max_completion_tokens": 4,
  1423. }
  1424. ),
  1425. "chat_id": form_data.get("chat_id", None),
  1426. "metadata": {"task": str(TASKS.EMOJI_GENERATION), "task_body": form_data},
  1427. }
  1428. log.debug(payload)
  1429. # Handle pipeline filters
  1430. try:
  1431. payload = filter_pipeline(payload, user)
  1432. except Exception as e:
  1433. if len(e.args) > 1:
  1434. return JSONResponse(
  1435. status_code=e.args[0],
  1436. content={"detail": e.args[1]},
  1437. )
  1438. else:
  1439. return JSONResponse(
  1440. status_code=status.HTTP_400_BAD_REQUEST,
  1441. content={"detail": str(e)},
  1442. )
  1443. if "chat_id" in payload:
  1444. del payload["chat_id"]
  1445. return await generate_chat_completions(form_data=payload, user=user)
  1446. @app.post("/api/task/moa/completions")
  1447. async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)):
  1448. print("generate_moa_response")
  1449. model_id = form_data["model"]
  1450. if model_id not in app.state.MODELS:
  1451. raise HTTPException(
  1452. status_code=status.HTTP_404_NOT_FOUND,
  1453. detail="Model not found",
  1454. )
  1455. # Check if the user has a custom task model
  1456. # If the user has a custom task model, use that model
  1457. task_model_id = get_task_model_id(model_id)
  1458. print(task_model_id)
  1459. model = app.state.MODELS[task_model_id]
  1460. template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
  1461. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.
  1462. Responses from models: {{responses}}"""
  1463. content = moa_response_generation_template(
  1464. template,
  1465. form_data["prompt"],
  1466. form_data["responses"],
  1467. )
  1468. payload = {
  1469. "model": task_model_id,
  1470. "messages": [{"role": "user", "content": content}],
  1471. "stream": form_data.get("stream", False),
  1472. "chat_id": form_data.get("chat_id", None),
  1473. "metadata": {
  1474. "task": str(TASKS.MOA_RESPONSE_GENERATION),
  1475. "task_body": form_data,
  1476. },
  1477. }
  1478. log.debug(payload)
  1479. try:
  1480. payload = filter_pipeline(payload, user)
  1481. except Exception as e:
  1482. if len(e.args) > 1:
  1483. return JSONResponse(
  1484. status_code=e.args[0],
  1485. content={"detail": e.args[1]},
  1486. )
  1487. else:
  1488. return JSONResponse(
  1489. status_code=status.HTTP_400_BAD_REQUEST,
  1490. content={"detail": str(e)},
  1491. )
  1492. if "chat_id" in payload:
  1493. del payload["chat_id"]
  1494. return await generate_chat_completions(form_data=payload, user=user)
  1495. ##################################
  1496. #
  1497. # Pipelines Endpoints
  1498. #
  1499. ##################################
  1500. # TODO: Refactor pipelines API endpoints below into a separate file
  1501. @app.get("/api/pipelines/list")
  1502. async def get_pipelines_list(user=Depends(get_admin_user)):
  1503. responses = await get_openai_models(raw=True)
  1504. print(responses)
  1505. urlIdxs = [
  1506. idx
  1507. for idx, response in enumerate(responses)
  1508. if response is not None and "pipelines" in response
  1509. ]
  1510. return {
  1511. "data": [
  1512. {
  1513. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  1514. "idx": urlIdx,
  1515. }
  1516. for urlIdx in urlIdxs
  1517. ]
  1518. }
  1519. @app.post("/api/pipelines/upload")
  1520. async def upload_pipeline(
  1521. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  1522. ):
  1523. print("upload_pipeline", urlIdx, file.filename)
  1524. # Check if the uploaded file is a python file
  1525. if not (file.filename and file.filename.endswith(".py")):
  1526. raise HTTPException(
  1527. status_code=status.HTTP_400_BAD_REQUEST,
  1528. detail="Only Python (.py) files are allowed.",
  1529. )
  1530. upload_folder = f"{CACHE_DIR}/pipelines"
  1531. os.makedirs(upload_folder, exist_ok=True)
  1532. file_path = os.path.join(upload_folder, file.filename)
  1533. r = None
  1534. try:
  1535. # Save the uploaded file
  1536. with open(file_path, "wb") as buffer:
  1537. shutil.copyfileobj(file.file, buffer)
  1538. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1539. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1540. headers = {"Authorization": f"Bearer {key}"}
  1541. with open(file_path, "rb") as f:
  1542. files = {"file": f}
  1543. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  1544. r.raise_for_status()
  1545. data = r.json()
  1546. return {**data}
  1547. except Exception as e:
  1548. # Handle connection error here
  1549. print(f"Connection error: {e}")
  1550. detail = "Pipeline not found"
  1551. status_code = status.HTTP_404_NOT_FOUND
  1552. if r is not None:
  1553. status_code = r.status_code
  1554. try:
  1555. res = r.json()
  1556. if "detail" in res:
  1557. detail = res["detail"]
  1558. except Exception:
  1559. pass
  1560. raise HTTPException(
  1561. status_code=status_code,
  1562. detail=detail,
  1563. )
  1564. finally:
  1565. # Ensure the file is deleted after the upload is completed or on failure
  1566. if os.path.exists(file_path):
  1567. os.remove(file_path)
  1568. class AddPipelineForm(BaseModel):
  1569. url: str
  1570. urlIdx: int
  1571. @app.post("/api/pipelines/add")
  1572. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  1573. r = None
  1574. try:
  1575. urlIdx = form_data.urlIdx
  1576. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1577. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1578. headers = {"Authorization": f"Bearer {key}"}
  1579. r = requests.post(
  1580. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  1581. )
  1582. r.raise_for_status()
  1583. data = r.json()
  1584. return {**data}
  1585. except Exception as e:
  1586. # Handle connection error here
  1587. print(f"Connection error: {e}")
  1588. detail = "Pipeline not found"
  1589. if r is not None:
  1590. try:
  1591. res = r.json()
  1592. if "detail" in res:
  1593. detail = res["detail"]
  1594. except Exception:
  1595. pass
  1596. raise HTTPException(
  1597. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1598. detail=detail,
  1599. )
  1600. class DeletePipelineForm(BaseModel):
  1601. id: str
  1602. urlIdx: int
  1603. @app.delete("/api/pipelines/delete")
  1604. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1605. r = None
  1606. try:
  1607. urlIdx = form_data.urlIdx
  1608. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1609. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1610. headers = {"Authorization": f"Bearer {key}"}
  1611. r = requests.delete(
  1612. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1613. )
  1614. r.raise_for_status()
  1615. data = r.json()
  1616. return {**data}
  1617. except Exception as e:
  1618. # Handle connection error here
  1619. print(f"Connection error: {e}")
  1620. detail = "Pipeline not found"
  1621. if r is not None:
  1622. try:
  1623. res = r.json()
  1624. if "detail" in res:
  1625. detail = res["detail"]
  1626. except Exception:
  1627. pass
  1628. raise HTTPException(
  1629. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1630. detail=detail,
  1631. )
  1632. @app.get("/api/pipelines")
  1633. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1634. r = None
  1635. try:
  1636. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1637. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1638. headers = {"Authorization": f"Bearer {key}"}
  1639. r = requests.get(f"{url}/pipelines", headers=headers)
  1640. r.raise_for_status()
  1641. data = r.json()
  1642. return {**data}
  1643. except Exception as e:
  1644. # Handle connection error here
  1645. print(f"Connection error: {e}")
  1646. detail = "Pipeline not found"
  1647. if r is not None:
  1648. try:
  1649. res = r.json()
  1650. if "detail" in res:
  1651. detail = res["detail"]
  1652. except Exception:
  1653. pass
  1654. raise HTTPException(
  1655. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1656. detail=detail,
  1657. )
  1658. @app.get("/api/pipelines/{pipeline_id}/valves")
  1659. async def get_pipeline_valves(
  1660. urlIdx: Optional[int],
  1661. pipeline_id: str,
  1662. user=Depends(get_admin_user),
  1663. ):
  1664. r = None
  1665. try:
  1666. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1667. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1668. headers = {"Authorization": f"Bearer {key}"}
  1669. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1670. r.raise_for_status()
  1671. data = r.json()
  1672. return {**data}
  1673. except Exception as e:
  1674. # Handle connection error here
  1675. print(f"Connection error: {e}")
  1676. detail = "Pipeline not found"
  1677. if r is not None:
  1678. try:
  1679. res = r.json()
  1680. if "detail" in res:
  1681. detail = res["detail"]
  1682. except Exception:
  1683. pass
  1684. raise HTTPException(
  1685. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1686. detail=detail,
  1687. )
  1688. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1689. async def get_pipeline_valves_spec(
  1690. urlIdx: Optional[int],
  1691. pipeline_id: str,
  1692. user=Depends(get_admin_user),
  1693. ):
  1694. r = None
  1695. try:
  1696. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1697. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1698. headers = {"Authorization": f"Bearer {key}"}
  1699. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1700. r.raise_for_status()
  1701. data = r.json()
  1702. return {**data}
  1703. except Exception as e:
  1704. # Handle connection error here
  1705. print(f"Connection error: {e}")
  1706. detail = "Pipeline not found"
  1707. if r is not None:
  1708. try:
  1709. res = r.json()
  1710. if "detail" in res:
  1711. detail = res["detail"]
  1712. except Exception:
  1713. pass
  1714. raise HTTPException(
  1715. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1716. detail=detail,
  1717. )
  1718. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1719. async def update_pipeline_valves(
  1720. urlIdx: Optional[int],
  1721. pipeline_id: str,
  1722. form_data: dict,
  1723. user=Depends(get_admin_user),
  1724. ):
  1725. r = None
  1726. try:
  1727. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1728. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1729. headers = {"Authorization": f"Bearer {key}"}
  1730. r = requests.post(
  1731. f"{url}/{pipeline_id}/valves/update",
  1732. headers=headers,
  1733. json={**form_data},
  1734. )
  1735. r.raise_for_status()
  1736. data = r.json()
  1737. return {**data}
  1738. except Exception as e:
  1739. # Handle connection error here
  1740. print(f"Connection error: {e}")
  1741. detail = "Pipeline not found"
  1742. if r is not None:
  1743. try:
  1744. res = r.json()
  1745. if "detail" in res:
  1746. detail = res["detail"]
  1747. except Exception:
  1748. pass
  1749. raise HTTPException(
  1750. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1751. detail=detail,
  1752. )
  1753. ##################################
  1754. #
  1755. # Config Endpoints
  1756. #
  1757. ##################################
  1758. @app.get("/api/config")
  1759. async def get_app_config(request: Request):
  1760. user = None
  1761. if "token" in request.cookies:
  1762. token = request.cookies.get("token")
  1763. data = decode_token(token)
  1764. if data is not None and "id" in data:
  1765. user = Users.get_user_by_id(data["id"])
  1766. return {
  1767. "status": True,
  1768. "name": WEBUI_NAME,
  1769. "version": VERSION,
  1770. "default_locale": str(DEFAULT_LOCALE),
  1771. "oauth": {
  1772. "providers": {
  1773. name: config.get("name", name)
  1774. for name, config in OAUTH_PROVIDERS.items()
  1775. }
  1776. },
  1777. "features": {
  1778. "auth": WEBUI_AUTH,
  1779. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1780. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1781. "enable_login_form": webui_app.state.config.ENABLE_LOGIN_FORM,
  1782. **(
  1783. {
  1784. "enable_web_search": retrieval_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1785. "enable_image_generation": images_app.state.config.ENABLED,
  1786. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1787. "enable_message_rating": webui_app.state.config.ENABLE_MESSAGE_RATING,
  1788. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1789. "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS,
  1790. }
  1791. if user is not None
  1792. else {}
  1793. ),
  1794. },
  1795. **(
  1796. {
  1797. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1798. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1799. "audio": {
  1800. "tts": {
  1801. "engine": audio_app.state.config.TTS_ENGINE,
  1802. "voice": audio_app.state.config.TTS_VOICE,
  1803. "split_on": audio_app.state.config.TTS_SPLIT_ON,
  1804. },
  1805. "stt": {
  1806. "engine": audio_app.state.config.STT_ENGINE,
  1807. },
  1808. },
  1809. "file": {
  1810. "max_size": retrieval_app.state.config.FILE_MAX_SIZE,
  1811. "max_count": retrieval_app.state.config.FILE_MAX_COUNT,
  1812. },
  1813. "permissions": {**webui_app.state.config.USER_PERMISSIONS},
  1814. }
  1815. if user is not None
  1816. else {}
  1817. ),
  1818. }
  1819. @app.get("/api/config/model/filter")
  1820. async def get_model_filter_config(user=Depends(get_admin_user)):
  1821. return {
  1822. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1823. "models": app.state.config.MODEL_FILTER_LIST,
  1824. }
  1825. class ModelFilterConfigForm(BaseModel):
  1826. enabled: bool
  1827. models: list[str]
  1828. @app.post("/api/config/model/filter")
  1829. async def update_model_filter_config(
  1830. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1831. ):
  1832. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1833. app.state.config.MODEL_FILTER_LIST = form_data.models
  1834. return {
  1835. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1836. "models": app.state.config.MODEL_FILTER_LIST,
  1837. }
  1838. # TODO: webhook endpoint should be under config endpoints
  1839. @app.get("/api/webhook")
  1840. async def get_webhook_url(user=Depends(get_admin_user)):
  1841. return {
  1842. "url": app.state.config.WEBHOOK_URL,
  1843. }
  1844. class UrlForm(BaseModel):
  1845. url: str
  1846. @app.post("/api/webhook")
  1847. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1848. app.state.config.WEBHOOK_URL = form_data.url
  1849. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1850. return {"url": app.state.config.WEBHOOK_URL}
  1851. @app.get("/api/version")
  1852. async def get_app_version():
  1853. return {
  1854. "version": VERSION,
  1855. }
  1856. @app.get("/api/changelog")
  1857. async def get_app_changelog():
  1858. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1859. @app.get("/api/version/updates")
  1860. async def get_app_latest_release_version():
  1861. if OFFLINE_MODE:
  1862. log.debug(
  1863. f"Offline mode is enabled, returning current version as latest version"
  1864. )
  1865. return {"current": VERSION, "latest": VERSION}
  1866. try:
  1867. timeout = aiohttp.ClientTimeout(total=1)
  1868. async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
  1869. async with session.get(
  1870. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1871. ) as response:
  1872. response.raise_for_status()
  1873. data = await response.json()
  1874. latest_version = data["tag_name"]
  1875. return {"current": VERSION, "latest": latest_version[1:]}
  1876. except Exception as e:
  1877. log.debug(e)
  1878. return {"current": VERSION, "latest": VERSION}
  1879. ############################
  1880. # OAuth Login & Callback
  1881. ############################
  1882. # SessionMiddleware is used by authlib for oauth
  1883. if len(OAUTH_PROVIDERS) > 0:
  1884. app.add_middleware(
  1885. SessionMiddleware,
  1886. secret_key=WEBUI_SECRET_KEY,
  1887. session_cookie="oui-session",
  1888. same_site=WEBUI_SESSION_COOKIE_SAME_SITE,
  1889. https_only=WEBUI_SESSION_COOKIE_SECURE,
  1890. )
  1891. @app.get("/oauth/{provider}/login")
  1892. async def oauth_login(provider: str, request: Request):
  1893. return await oauth_manager.handle_login(provider, request)
  1894. # OAuth login logic is as follows:
  1895. # 1. Attempt to find a user with matching subject ID, tied to the provider
  1896. # 2. If OAUTH_MERGE_ACCOUNTS_BY_EMAIL is true, find a user with the email address provided via OAuth
  1897. # - This is considered insecure in general, as OAuth providers do not always verify email addresses
  1898. # 3. If there is no user, and ENABLE_OAUTH_SIGNUP is true, create a user
  1899. # - Email addresses are considered unique, so we fail registration if the email address is already taken
  1900. @app.get("/oauth/{provider}/callback")
  1901. async def oauth_callback(provider: str, request: Request, response: Response):
  1902. return await oauth_manager.handle_callback(provider, request, response)
  1903. @app.get("/manifest.json")
  1904. async def get_manifest_json():
  1905. return {
  1906. "name": WEBUI_NAME,
  1907. "short_name": WEBUI_NAME,
  1908. "description": "Open WebUI is an open, extensible, user-friendly interface for AI that adapts to your workflow.",
  1909. "start_url": "/",
  1910. "display": "standalone",
  1911. "background_color": "#343541",
  1912. "orientation": "any",
  1913. "icons": [
  1914. {
  1915. "src": "/static/logo.png",
  1916. "type": "image/png",
  1917. "sizes": "500x500",
  1918. "purpose": "any",
  1919. },
  1920. {
  1921. "src": "/static/logo.png",
  1922. "type": "image/png",
  1923. "sizes": "500x500",
  1924. "purpose": "maskable",
  1925. },
  1926. ],
  1927. }
  1928. @app.get("/opensearch.xml")
  1929. async def get_opensearch_xml():
  1930. xml_content = rf"""
  1931. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1932. <ShortName>{WEBUI_NAME}</ShortName>
  1933. <Description>Search {WEBUI_NAME}</Description>
  1934. <InputEncoding>UTF-8</InputEncoding>
  1935. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/static/favicon.png</Image>
  1936. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1937. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1938. </OpenSearchDescription>
  1939. """
  1940. return Response(content=xml_content, media_type="application/xml")
  1941. @app.get("/health")
  1942. async def healthcheck():
  1943. return {"status": True}
  1944. @app.get("/health/db")
  1945. async def healthcheck_with_db():
  1946. Session.execute(text("SELECT 1;")).all()
  1947. return {"status": True}
  1948. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1949. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1950. if os.path.exists(FRONTEND_BUILD_DIR):
  1951. mimetypes.add_type("text/javascript", ".js")
  1952. app.mount(
  1953. "/",
  1954. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1955. name="spa-static-files",
  1956. )
  1957. else:
  1958. log.warning(
  1959. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1960. )