main.py 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437
  1. import base64
  2. import inspect
  3. import json
  4. import logging
  5. import mimetypes
  6. import os
  7. import shutil
  8. import sys
  9. import time
  10. import uuid
  11. from contextlib import asynccontextmanager
  12. from typing import Optional
  13. import aiohttp
  14. import requests
  15. from open_webui.apps.audio.main import app as audio_app
  16. from open_webui.apps.images.main import app as images_app
  17. from open_webui.apps.ollama.main import app as ollama_app
  18. from open_webui.apps.ollama.main import (
  19. GenerateChatCompletionForm,
  20. generate_chat_completion as generate_ollama_chat_completion,
  21. generate_openai_chat_completion as generate_ollama_openai_chat_completion,
  22. )
  23. from open_webui.apps.ollama.main import get_all_models as get_ollama_models
  24. from open_webui.apps.openai.main import app as openai_app
  25. from open_webui.apps.openai.main import (
  26. generate_chat_completion as generate_openai_chat_completion,
  27. )
  28. from open_webui.apps.openai.main import get_all_models as get_openai_models
  29. from open_webui.apps.rag.main import app as rag_app
  30. from open_webui.apps.rag.utils import get_rag_context, rag_template
  31. from open_webui.apps.socket.main import app as socket_app
  32. from open_webui.apps.socket.main import get_event_call, get_event_emitter
  33. from open_webui.apps.webui.internal.db import Session
  34. from open_webui.apps.webui.main import app as webui_app
  35. from open_webui.apps.webui.main import (
  36. generate_function_chat_completion,
  37. get_pipe_models,
  38. )
  39. from open_webui.apps.webui.models.auths import Auths
  40. from open_webui.apps.webui.models.functions import Functions
  41. from open_webui.apps.webui.models.models import Models
  42. from open_webui.apps.webui.models.users import UserModel, Users
  43. from open_webui.apps.webui.utils import load_function_module_by_id
  44. from authlib.integrations.starlette_client import OAuth
  45. from authlib.oidc.core import UserInfo
  46. from open_webui.config import (
  47. CACHE_DIR,
  48. CORS_ALLOW_ORIGIN,
  49. DEFAULT_LOCALE,
  50. ENABLE_ADMIN_CHAT_ACCESS,
  51. ENABLE_ADMIN_EXPORT,
  52. ENABLE_MODEL_FILTER,
  53. ENABLE_OAUTH_SIGNUP,
  54. ENABLE_OLLAMA_API,
  55. ENABLE_OPENAI_API,
  56. ENV,
  57. FRONTEND_BUILD_DIR,
  58. MODEL_FILTER_LIST,
  59. OAUTH_MERGE_ACCOUNTS_BY_EMAIL,
  60. OAUTH_PROVIDERS,
  61. ENABLE_SEARCH_QUERY,
  62. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  63. STATIC_DIR,
  64. TASK_MODEL,
  65. TASK_MODEL_EXTERNAL,
  66. TITLE_GENERATION_PROMPT_TEMPLATE,
  67. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  68. WEBHOOK_URL,
  69. WEBUI_AUTH,
  70. WEBUI_NAME,
  71. AppConfig,
  72. run_migrations,
  73. )
  74. from open_webui.constants import ERROR_MESSAGES, TASKS, WEBHOOK_MESSAGES
  75. from open_webui.env import (
  76. CHANGELOG,
  77. GLOBAL_LOG_LEVEL,
  78. SAFE_MODE,
  79. SRC_LOG_LEVELS,
  80. VERSION,
  81. WEBUI_BUILD_HASH,
  82. WEBUI_SECRET_KEY,
  83. WEBUI_SESSION_COOKIE_SAME_SITE,
  84. WEBUI_SESSION_COOKIE_SECURE,
  85. WEBUI_URL,
  86. )
  87. from fastapi import (
  88. Depends,
  89. FastAPI,
  90. File,
  91. Form,
  92. HTTPException,
  93. Request,
  94. UploadFile,
  95. status,
  96. )
  97. from fastapi.middleware.cors import CORSMiddleware
  98. from fastapi.responses import JSONResponse
  99. from fastapi.staticfiles import StaticFiles
  100. from pydantic import BaseModel
  101. from sqlalchemy import text
  102. from starlette.exceptions import HTTPException as StarletteHTTPException
  103. from starlette.middleware.base import BaseHTTPMiddleware
  104. from starlette.middleware.sessions import SessionMiddleware
  105. from starlette.responses import RedirectResponse, Response, StreamingResponse
  106. from open_webui.utils.security_headers import SecurityHeadersMiddleware
  107. from open_webui.utils.misc import (
  108. add_or_update_system_message,
  109. get_last_user_message,
  110. parse_duration,
  111. prepend_to_first_user_message_content,
  112. )
  113. from open_webui.utils.task import (
  114. moa_response_generation_template,
  115. search_query_generation_template,
  116. title_generation_template,
  117. tools_function_calling_generation_template,
  118. )
  119. from open_webui.utils.tools import get_tools
  120. from open_webui.utils.utils import (
  121. create_token,
  122. decode_token,
  123. get_admin_user,
  124. get_current_user,
  125. get_http_authorization_cred,
  126. get_password_hash,
  127. get_verified_user,
  128. )
  129. from open_webui.utils.webhook import post_webhook
  130. from open_webui.utils.payload import convert_payload_openai_to_ollama
  131. from open_webui.utils.response import (
  132. convert_response_ollama_to_openai,
  133. convert_streaming_response_ollama_to_openai,
  134. )
  135. if SAFE_MODE:
  136. print("SAFE MODE ENABLED")
  137. Functions.deactivate_all_functions()
  138. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  139. log = logging.getLogger(__name__)
  140. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  141. class SPAStaticFiles(StaticFiles):
  142. async def get_response(self, path: str, scope):
  143. try:
  144. return await super().get_response(path, scope)
  145. except (HTTPException, StarletteHTTPException) as ex:
  146. if ex.status_code == 404:
  147. return await super().get_response("index.html", scope)
  148. else:
  149. raise ex
  150. print(
  151. rf"""
  152. ___ __ __ _ _ _ ___
  153. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  154. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  155. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  156. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  157. |_|
  158. v{VERSION} - building the best open-source AI user interface.
  159. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  160. https://github.com/open-webui/open-webui
  161. """
  162. )
  163. @asynccontextmanager
  164. async def lifespan(app: FastAPI):
  165. run_migrations()
  166. yield
  167. app = FastAPI(
  168. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  169. )
  170. app.state.config = AppConfig()
  171. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  172. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  173. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  174. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  175. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  176. app.state.config.TASK_MODEL = TASK_MODEL
  177. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  178. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  179. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  180. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  181. )
  182. app.state.config.ENABLE_SEARCH_QUERY = ENABLE_SEARCH_QUERY
  183. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  184. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  185. )
  186. app.state.MODELS = {}
  187. ##################################
  188. #
  189. # ChatCompletion Middleware
  190. #
  191. ##################################
  192. def get_task_model_id(default_model_id):
  193. # Set the task model
  194. task_model_id = default_model_id
  195. # Check if the user has a custom task model and use that model
  196. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  197. if (
  198. app.state.config.TASK_MODEL
  199. and app.state.config.TASK_MODEL in app.state.MODELS
  200. ):
  201. task_model_id = app.state.config.TASK_MODEL
  202. else:
  203. if (
  204. app.state.config.TASK_MODEL_EXTERNAL
  205. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  206. ):
  207. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  208. return task_model_id
  209. def get_filter_function_ids(model):
  210. def get_priority(function_id):
  211. function = Functions.get_function_by_id(function_id)
  212. if function is not None and hasattr(function, "valves"):
  213. # TODO: Fix FunctionModel
  214. return (function.valves if function.valves else {}).get("priority", 0)
  215. return 0
  216. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  217. if "info" in model and "meta" in model["info"]:
  218. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  219. filter_ids = list(set(filter_ids))
  220. enabled_filter_ids = [
  221. function.id
  222. for function in Functions.get_functions_by_type("filter", active_only=True)
  223. ]
  224. filter_ids = [
  225. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  226. ]
  227. filter_ids.sort(key=get_priority)
  228. return filter_ids
  229. async def chat_completion_filter_functions_handler(body, model, extra_params):
  230. skip_files = None
  231. filter_ids = get_filter_function_ids(model)
  232. for filter_id in filter_ids:
  233. filter = Functions.get_function_by_id(filter_id)
  234. if not filter:
  235. continue
  236. if filter_id in webui_app.state.FUNCTIONS:
  237. function_module = webui_app.state.FUNCTIONS[filter_id]
  238. else:
  239. function_module, _, _ = load_function_module_by_id(filter_id)
  240. webui_app.state.FUNCTIONS[filter_id] = function_module
  241. # Check if the function has a file_handler variable
  242. if hasattr(function_module, "file_handler"):
  243. skip_files = function_module.file_handler
  244. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  245. valves = Functions.get_function_valves_by_id(filter_id)
  246. function_module.valves = function_module.Valves(
  247. **(valves if valves else {})
  248. )
  249. if not hasattr(function_module, "inlet"):
  250. continue
  251. try:
  252. inlet = function_module.inlet
  253. # Get the signature of the function
  254. sig = inspect.signature(inlet)
  255. params = {"body": body} | {
  256. k: v
  257. for k, v in {
  258. **extra_params,
  259. "__model__": model,
  260. "__id__": filter_id,
  261. }.items()
  262. if k in sig.parameters
  263. }
  264. if "__user__" in params and hasattr(function_module, "UserValves"):
  265. try:
  266. params["__user__"]["valves"] = function_module.UserValves(
  267. **Functions.get_user_valves_by_id_and_user_id(
  268. filter_id, params["__user__"]["id"]
  269. )
  270. )
  271. except Exception as e:
  272. print(e)
  273. if inspect.iscoroutinefunction(inlet):
  274. body = await inlet(**params)
  275. else:
  276. body = inlet(**params)
  277. except Exception as e:
  278. print(f"Error: {e}")
  279. raise e
  280. if skip_files and "files" in body.get("metadata", {}):
  281. del body["metadata"]["files"]
  282. return body, {}
  283. def get_tools_function_calling_payload(messages, task_model_id, content):
  284. user_message = get_last_user_message(messages)
  285. history = "\n".join(
  286. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  287. for message in messages[::-1][:4]
  288. )
  289. prompt = f"History:\n{history}\nQuery: {user_message}"
  290. return {
  291. "model": task_model_id,
  292. "messages": [
  293. {"role": "system", "content": content},
  294. {"role": "user", "content": f"Query: {prompt}"},
  295. ],
  296. "stream": False,
  297. "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
  298. }
  299. async def get_content_from_response(response) -> Optional[str]:
  300. content = None
  301. if hasattr(response, "body_iterator"):
  302. async for chunk in response.body_iterator:
  303. data = json.loads(chunk.decode("utf-8"))
  304. content = data["choices"][0]["message"]["content"]
  305. # Cleanup any remaining background tasks if necessary
  306. if response.background is not None:
  307. await response.background()
  308. else:
  309. content = response["choices"][0]["message"]["content"]
  310. return content
  311. async def chat_completion_tools_handler(
  312. body: dict, user: UserModel, extra_params: dict
  313. ) -> tuple[dict, dict]:
  314. # If tool_ids field is present, call the functions
  315. metadata = body.get("metadata", {})
  316. tool_ids = metadata.get("tool_ids", None)
  317. log.debug(f"{tool_ids=}")
  318. if not tool_ids:
  319. return body, {}
  320. skip_files = False
  321. contexts = []
  322. citations = []
  323. task_model_id = get_task_model_id(body["model"])
  324. tools = get_tools(
  325. webui_app,
  326. tool_ids,
  327. user,
  328. {
  329. **extra_params,
  330. "__model__": app.state.MODELS[task_model_id],
  331. "__messages__": body["messages"],
  332. "__files__": metadata.get("files", []),
  333. },
  334. )
  335. log.info(f"{tools=}")
  336. specs = [tool["spec"] for tool in tools.values()]
  337. tools_specs = json.dumps(specs)
  338. if app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE != "":
  339. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  340. else:
  341. template = """Available Tools: {{TOOLS}}\nReturn an empty string if no tools match the query. If a function tool matches, construct and return a JSON object in the format {\"name\": \"functionName\", \"parameters\": {\"requiredFunctionParamKey\": \"requiredFunctionParamValue\"}} using the appropriate tool and its parameters. Only return the object and limit the response to the JSON object without additional text."""
  342. tools_function_calling_prompt = tools_function_calling_generation_template(
  343. template, tools_specs
  344. )
  345. log.info(f"{tools_function_calling_prompt=}")
  346. payload = get_tools_function_calling_payload(
  347. body["messages"], task_model_id, tools_function_calling_prompt
  348. )
  349. try:
  350. payload = filter_pipeline(payload, user)
  351. except Exception as e:
  352. raise e
  353. try:
  354. response = await generate_chat_completions(form_data=payload, user=user)
  355. log.debug(f"{response=}")
  356. content = await get_content_from_response(response)
  357. log.debug(f"{content=}")
  358. if not content:
  359. return body, {}
  360. result = json.loads(content)
  361. tool_function_name = result.get("name", None)
  362. if tool_function_name not in tools:
  363. return body, {}
  364. tool_function_params = result.get("parameters", {})
  365. try:
  366. tool_output = await tools[tool_function_name]["callable"](
  367. **tool_function_params
  368. )
  369. except Exception as e:
  370. tool_output = str(e)
  371. if tools[tool_function_name]["citation"]:
  372. citations.append(
  373. {
  374. "source": {
  375. "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  376. },
  377. "document": [tool_output],
  378. "metadata": [{"source": tool_function_name}],
  379. }
  380. )
  381. if tools[tool_function_name]["file_handler"]:
  382. skip_files = True
  383. if isinstance(tool_output, str):
  384. contexts.append(tool_output)
  385. except Exception as e:
  386. log.exception(f"Error: {e}")
  387. content = None
  388. log.debug(f"tool_contexts: {contexts}")
  389. if skip_files and "files" in body.get("metadata", {}):
  390. del body["metadata"]["files"]
  391. return body, {"contexts": contexts, "citations": citations}
  392. async def chat_completion_files_handler(body) -> tuple[dict, dict[str, list]]:
  393. contexts = []
  394. citations = []
  395. if files := body.get("metadata", {}).get("files", None):
  396. contexts, citations = get_rag_context(
  397. files=files,
  398. messages=body["messages"],
  399. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  400. k=rag_app.state.config.TOP_K,
  401. reranking_function=rag_app.state.sentence_transformer_rf,
  402. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  403. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  404. )
  405. log.debug(f"rag_contexts: {contexts}, citations: {citations}")
  406. return body, {"contexts": contexts, "citations": citations}
  407. def is_chat_completion_request(request):
  408. return request.method == "POST" and any(
  409. endpoint in request.url.path
  410. for endpoint in ["/ollama/api/chat", "/chat/completions"]
  411. )
  412. async def get_body_and_model_and_user(request):
  413. # Read the original request body
  414. body = await request.body()
  415. body_str = body.decode("utf-8")
  416. body = json.loads(body_str) if body_str else {}
  417. model_id = body["model"]
  418. if model_id not in app.state.MODELS:
  419. raise Exception("Model not found")
  420. model = app.state.MODELS[model_id]
  421. user = get_current_user(
  422. request,
  423. get_http_authorization_cred(request.headers.get("Authorization")),
  424. )
  425. return body, model, user
  426. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  427. async def dispatch(self, request: Request, call_next):
  428. if not is_chat_completion_request(request):
  429. return await call_next(request)
  430. log.debug(f"request.url.path: {request.url.path}")
  431. try:
  432. body, model, user = await get_body_and_model_and_user(request)
  433. except Exception as e:
  434. return JSONResponse(
  435. status_code=status.HTTP_400_BAD_REQUEST,
  436. content={"detail": str(e)},
  437. )
  438. metadata = {
  439. "chat_id": body.pop("chat_id", None),
  440. "message_id": body.pop("id", None),
  441. "session_id": body.pop("session_id", None),
  442. "tool_ids": body.get("tool_ids", None),
  443. "files": body.get("files", None),
  444. }
  445. body["metadata"] = metadata
  446. extra_params = {
  447. "__event_emitter__": get_event_emitter(metadata),
  448. "__event_call__": get_event_call(metadata),
  449. "__user__": {
  450. "id": user.id,
  451. "email": user.email,
  452. "name": user.name,
  453. "role": user.role,
  454. },
  455. }
  456. # Initialize data_items to store additional data to be sent to the client
  457. # Initalize contexts and citation
  458. data_items = []
  459. contexts = []
  460. citations = []
  461. try:
  462. body, flags = await chat_completion_filter_functions_handler(
  463. body, model, extra_params
  464. )
  465. except Exception as e:
  466. return JSONResponse(
  467. status_code=status.HTTP_400_BAD_REQUEST,
  468. content={"detail": str(e)},
  469. )
  470. metadata = {
  471. **metadata,
  472. "tool_ids": body.pop("tool_ids", None),
  473. "files": body.pop("files", None),
  474. }
  475. body["metadata"] = metadata
  476. try:
  477. body, flags = await chat_completion_tools_handler(body, user, extra_params)
  478. contexts.extend(flags.get("contexts", []))
  479. citations.extend(flags.get("citations", []))
  480. except Exception as e:
  481. log.exception(e)
  482. try:
  483. body, flags = await chat_completion_files_handler(body)
  484. contexts.extend(flags.get("contexts", []))
  485. citations.extend(flags.get("citations", []))
  486. except Exception as e:
  487. log.exception(e)
  488. # If context is not empty, insert it into the messages
  489. if len(contexts) > 0:
  490. context_string = "/n".join(contexts).strip()
  491. prompt = get_last_user_message(body["messages"])
  492. if prompt is None:
  493. raise Exception("No user message found")
  494. if (
  495. rag_app.state.config.RELEVANCE_THRESHOLD == 0
  496. and context_string.strip() == ""
  497. ):
  498. log.debug(
  499. f"With a 0 relevancy threshold for RAG, the context cannot be empty"
  500. )
  501. # Workaround for Ollama 2.0+ system prompt issue
  502. # TODO: replace with add_or_update_system_message
  503. if model["owned_by"] == "ollama":
  504. body["messages"] = prepend_to_first_user_message_content(
  505. rag_template(
  506. rag_app.state.config.RAG_TEMPLATE, context_string, prompt
  507. ),
  508. body["messages"],
  509. )
  510. else:
  511. body["messages"] = add_or_update_system_message(
  512. rag_template(
  513. rag_app.state.config.RAG_TEMPLATE, context_string, prompt
  514. ),
  515. body["messages"],
  516. )
  517. # If there are citations, add them to the data_items
  518. if len(citations) > 0:
  519. data_items.append({"citations": citations})
  520. modified_body_bytes = json.dumps(body).encode("utf-8")
  521. # Replace the request body with the modified one
  522. request._body = modified_body_bytes
  523. # Set custom header to ensure content-length matches new body length
  524. request.headers.__dict__["_list"] = [
  525. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  526. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  527. ]
  528. response = await call_next(request)
  529. if not isinstance(response, StreamingResponse):
  530. return response
  531. content_type = response.headers["Content-Type"]
  532. is_openai = "text/event-stream" in content_type
  533. is_ollama = "application/x-ndjson" in content_type
  534. if not is_openai and not is_ollama:
  535. return response
  536. def wrap_item(item):
  537. return f"data: {item}\n\n" if is_openai else f"{item}\n"
  538. async def stream_wrapper(original_generator, data_items):
  539. for item in data_items:
  540. yield wrap_item(json.dumps(item))
  541. async for data in original_generator:
  542. yield data
  543. return StreamingResponse(
  544. stream_wrapper(response.body_iterator, data_items),
  545. headers=dict(response.headers),
  546. )
  547. async def _receive(self, body: bytes):
  548. return {"type": "http.request", "body": body, "more_body": False}
  549. app.add_middleware(ChatCompletionMiddleware)
  550. ##################################
  551. #
  552. # Pipeline Middleware
  553. #
  554. ##################################
  555. def get_sorted_filters(model_id):
  556. filters = [
  557. model
  558. for model in app.state.MODELS.values()
  559. if "pipeline" in model
  560. and "type" in model["pipeline"]
  561. and model["pipeline"]["type"] == "filter"
  562. and (
  563. model["pipeline"]["pipelines"] == ["*"]
  564. or any(
  565. model_id == target_model_id
  566. for target_model_id in model["pipeline"]["pipelines"]
  567. )
  568. )
  569. ]
  570. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  571. return sorted_filters
  572. def filter_pipeline(payload, user):
  573. user = {"id": user.id, "email": user.email, "name": user.name, "role": user.role}
  574. model_id = payload["model"]
  575. sorted_filters = get_sorted_filters(model_id)
  576. model = app.state.MODELS[model_id]
  577. if "pipeline" in model:
  578. sorted_filters.append(model)
  579. for filter in sorted_filters:
  580. r = None
  581. try:
  582. urlIdx = filter["urlIdx"]
  583. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  584. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  585. if key == "":
  586. continue
  587. headers = {"Authorization": f"Bearer {key}"}
  588. r = requests.post(
  589. f"{url}/{filter['id']}/filter/inlet",
  590. headers=headers,
  591. json={
  592. "user": user,
  593. "body": payload,
  594. },
  595. )
  596. r.raise_for_status()
  597. payload = r.json()
  598. except Exception as e:
  599. # Handle connection error here
  600. print(f"Connection error: {e}")
  601. if r is not None:
  602. res = r.json()
  603. if "detail" in res:
  604. raise Exception(r.status_code, res["detail"])
  605. return payload
  606. class PipelineMiddleware(BaseHTTPMiddleware):
  607. async def dispatch(self, request: Request, call_next):
  608. if not is_chat_completion_request(request):
  609. return await call_next(request)
  610. log.debug(f"request.url.path: {request.url.path}")
  611. # Read the original request body
  612. body = await request.body()
  613. # Decode body to string
  614. body_str = body.decode("utf-8")
  615. # Parse string to JSON
  616. data = json.loads(body_str) if body_str else {}
  617. user = get_current_user(
  618. request,
  619. get_http_authorization_cred(request.headers["Authorization"]),
  620. )
  621. try:
  622. data = filter_pipeline(data, user)
  623. except Exception as e:
  624. if len(e.args) > 1:
  625. return JSONResponse(
  626. status_code=e.args[0],
  627. content={"detail": e.args[1]},
  628. )
  629. else:
  630. return JSONResponse(
  631. status_code=status.HTTP_400_BAD_REQUEST,
  632. content={"detail": str(e)},
  633. )
  634. modified_body_bytes = json.dumps(data).encode("utf-8")
  635. # Replace the request body with the modified one
  636. request._body = modified_body_bytes
  637. # Set custom header to ensure content-length matches new body length
  638. request.headers.__dict__["_list"] = [
  639. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  640. *[(k, v) for k, v in request.headers.raw if k.lower() != b"content-length"],
  641. ]
  642. response = await call_next(request)
  643. return response
  644. async def _receive(self, body: bytes):
  645. return {"type": "http.request", "body": body, "more_body": False}
  646. app.add_middleware(PipelineMiddleware)
  647. app.add_middleware(
  648. CORSMiddleware,
  649. allow_origins=CORS_ALLOW_ORIGIN,
  650. allow_credentials=True,
  651. allow_methods=["*"],
  652. allow_headers=["*"],
  653. )
  654. app.add_middleware(SecurityHeadersMiddleware)
  655. @app.middleware("http")
  656. async def commit_session_after_request(request: Request, call_next):
  657. response = await call_next(request)
  658. log.debug("Commit session after request")
  659. Session.commit()
  660. return response
  661. @app.middleware("http")
  662. async def check_url(request: Request, call_next):
  663. if len(app.state.MODELS) == 0:
  664. await get_all_models()
  665. else:
  666. pass
  667. start_time = int(time.time())
  668. response = await call_next(request)
  669. process_time = int(time.time()) - start_time
  670. response.headers["X-Process-Time"] = str(process_time)
  671. return response
  672. @app.middleware("http")
  673. async def update_embedding_function(request: Request, call_next):
  674. response = await call_next(request)
  675. if "/embedding/update" in request.url.path:
  676. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  677. return response
  678. @app.middleware("http")
  679. async def inspect_websocket(request: Request, call_next):
  680. if (
  681. "/ws/socket.io" in request.url.path
  682. and request.query_params.get("transport") == "websocket"
  683. ):
  684. upgrade = (request.headers.get("Upgrade") or "").lower()
  685. connection = (request.headers.get("Connection") or "").lower().split(",")
  686. # Check that there's the correct headers for an upgrade, else reject the connection
  687. # This is to work around this upstream issue: https://github.com/miguelgrinberg/python-engineio/issues/367
  688. if upgrade != "websocket" or "upgrade" not in connection:
  689. return JSONResponse(
  690. status_code=status.HTTP_400_BAD_REQUEST,
  691. content={"detail": "Invalid WebSocket upgrade request"},
  692. )
  693. return await call_next(request)
  694. app.mount("/ws", socket_app)
  695. app.mount("/ollama", ollama_app)
  696. app.mount("/openai", openai_app)
  697. app.mount("/images/api/v1", images_app)
  698. app.mount("/audio/api/v1", audio_app)
  699. app.mount("/rag/api/v1", rag_app)
  700. app.mount("/api/v1", webui_app)
  701. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  702. async def get_all_models():
  703. # TODO: Optimize this function
  704. pipe_models = []
  705. openai_models = []
  706. ollama_models = []
  707. pipe_models = await get_pipe_models()
  708. if app.state.config.ENABLE_OPENAI_API:
  709. openai_models = await get_openai_models()
  710. openai_models = openai_models["data"]
  711. if app.state.config.ENABLE_OLLAMA_API:
  712. ollama_models = await get_ollama_models()
  713. ollama_models = [
  714. {
  715. "id": model["model"],
  716. "name": model["name"],
  717. "object": "model",
  718. "created": int(time.time()),
  719. "owned_by": "ollama",
  720. "ollama": model,
  721. }
  722. for model in ollama_models["models"]
  723. ]
  724. models = pipe_models + openai_models + ollama_models
  725. global_action_ids = [
  726. function.id for function in Functions.get_global_action_functions()
  727. ]
  728. enabled_action_ids = [
  729. function.id
  730. for function in Functions.get_functions_by_type("action", active_only=True)
  731. ]
  732. custom_models = Models.get_all_models()
  733. for custom_model in custom_models:
  734. if custom_model.base_model_id is None:
  735. for model in models:
  736. if (
  737. custom_model.id == model["id"]
  738. or custom_model.id == model["id"].split(":")[0]
  739. ):
  740. model["name"] = custom_model.name
  741. model["info"] = custom_model.model_dump()
  742. action_ids = []
  743. if "info" in model and "meta" in model["info"]:
  744. action_ids.extend(model["info"]["meta"].get("actionIds", []))
  745. model["action_ids"] = action_ids
  746. else:
  747. owned_by = "openai"
  748. pipe = None
  749. action_ids = []
  750. for model in models:
  751. if (
  752. custom_model.base_model_id == model["id"]
  753. or custom_model.base_model_id == model["id"].split(":")[0]
  754. ):
  755. owned_by = model["owned_by"]
  756. if "pipe" in model:
  757. pipe = model["pipe"]
  758. if "info" in model and "meta" in model["info"]:
  759. action_ids.extend(model["info"]["meta"].get("actionIds", []))
  760. break
  761. models.append(
  762. {
  763. "id": custom_model.id,
  764. "name": custom_model.name,
  765. "object": "model",
  766. "created": custom_model.created_at,
  767. "owned_by": owned_by,
  768. "info": custom_model.model_dump(),
  769. "preset": True,
  770. **({"pipe": pipe} if pipe is not None else {}),
  771. "action_ids": action_ids,
  772. }
  773. )
  774. for model in models:
  775. action_ids = []
  776. if "action_ids" in model:
  777. action_ids = model["action_ids"]
  778. del model["action_ids"]
  779. action_ids = action_ids + global_action_ids
  780. action_ids = list(set(action_ids))
  781. action_ids = [
  782. action_id for action_id in action_ids if action_id in enabled_action_ids
  783. ]
  784. model["actions"] = []
  785. for action_id in action_ids:
  786. action = Functions.get_function_by_id(action_id)
  787. if action is None:
  788. raise Exception(f"Action not found: {action_id}")
  789. if action_id in webui_app.state.FUNCTIONS:
  790. function_module = webui_app.state.FUNCTIONS[action_id]
  791. else:
  792. function_module, _, _ = load_function_module_by_id(action_id)
  793. webui_app.state.FUNCTIONS[action_id] = function_module
  794. __webui__ = False
  795. if hasattr(function_module, "__webui__"):
  796. __webui__ = function_module.__webui__
  797. if hasattr(function_module, "actions"):
  798. actions = function_module.actions
  799. model["actions"].extend(
  800. [
  801. {
  802. "id": f"{action_id}.{_action['id']}",
  803. "name": _action.get(
  804. "name", f"{action.name} ({_action['id']})"
  805. ),
  806. "description": action.meta.description,
  807. "icon_url": _action.get(
  808. "icon_url", action.meta.manifest.get("icon_url", None)
  809. ),
  810. **({"__webui__": __webui__} if __webui__ else {}),
  811. }
  812. for _action in actions
  813. ]
  814. )
  815. else:
  816. model["actions"].append(
  817. {
  818. "id": action_id,
  819. "name": action.name,
  820. "description": action.meta.description,
  821. "icon_url": action.meta.manifest.get("icon_url", None),
  822. **({"__webui__": __webui__} if __webui__ else {}),
  823. }
  824. )
  825. app.state.MODELS = {model["id"]: model for model in models}
  826. webui_app.state.MODELS = app.state.MODELS
  827. return models
  828. @app.get("/api/models")
  829. async def get_models(user=Depends(get_verified_user)):
  830. models = await get_all_models()
  831. # Filter out filter pipelines
  832. models = [
  833. model
  834. for model in models
  835. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  836. ]
  837. if app.state.config.ENABLE_MODEL_FILTER:
  838. if user.role == "user":
  839. models = list(
  840. filter(
  841. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  842. models,
  843. )
  844. )
  845. return {"data": models}
  846. return {"data": models}
  847. @app.post("/api/chat/completions")
  848. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  849. model_id = form_data["model"]
  850. if model_id not in app.state.MODELS:
  851. raise HTTPException(
  852. status_code=status.HTTP_404_NOT_FOUND,
  853. detail="Model not found",
  854. )
  855. if app.state.config.ENABLE_MODEL_FILTER:
  856. if user.role == "user" and model_id not in app.state.config.MODEL_FILTER_LIST:
  857. raise HTTPException(
  858. status_code=status.HTTP_403_FORBIDDEN,
  859. detail="Model not found",
  860. )
  861. model = app.state.MODELS[model_id]
  862. if model.get("pipe"):
  863. return await generate_function_chat_completion(form_data, user=user)
  864. if model["owned_by"] == "ollama":
  865. return await generate_ollama_openai_chat_completion(form_data, user=user)
  866. else:
  867. return await generate_openai_chat_completion(form_data, user=user)
  868. @app.post("/api/chat/completed")
  869. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  870. data = form_data
  871. model_id = data["model"]
  872. if model_id not in app.state.MODELS:
  873. raise HTTPException(
  874. status_code=status.HTTP_404_NOT_FOUND,
  875. detail="Model not found",
  876. )
  877. model = app.state.MODELS[model_id]
  878. sorted_filters = get_sorted_filters(model_id)
  879. if "pipeline" in model:
  880. sorted_filters = [model] + sorted_filters
  881. for filter in sorted_filters:
  882. r = None
  883. try:
  884. urlIdx = filter["urlIdx"]
  885. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  886. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  887. if key != "":
  888. headers = {"Authorization": f"Bearer {key}"}
  889. r = requests.post(
  890. f"{url}/{filter['id']}/filter/outlet",
  891. headers=headers,
  892. json={
  893. "user": {
  894. "id": user.id,
  895. "name": user.name,
  896. "email": user.email,
  897. "role": user.role,
  898. },
  899. "body": data,
  900. },
  901. )
  902. r.raise_for_status()
  903. data = r.json()
  904. except Exception as e:
  905. # Handle connection error here
  906. print(f"Connection error: {e}")
  907. if r is not None:
  908. try:
  909. res = r.json()
  910. if "detail" in res:
  911. return JSONResponse(
  912. status_code=r.status_code,
  913. content=res,
  914. )
  915. except Exception:
  916. pass
  917. else:
  918. pass
  919. __event_emitter__ = get_event_emitter(
  920. {
  921. "chat_id": data["chat_id"],
  922. "message_id": data["id"],
  923. "session_id": data["session_id"],
  924. }
  925. )
  926. __event_call__ = get_event_call(
  927. {
  928. "chat_id": data["chat_id"],
  929. "message_id": data["id"],
  930. "session_id": data["session_id"],
  931. }
  932. )
  933. def get_priority(function_id):
  934. function = Functions.get_function_by_id(function_id)
  935. if function is not None and hasattr(function, "valves"):
  936. # TODO: Fix FunctionModel to include vavles
  937. return (function.valves if function.valves else {}).get("priority", 0)
  938. return 0
  939. filter_ids = [function.id for function in Functions.get_global_filter_functions()]
  940. if "info" in model and "meta" in model["info"]:
  941. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  942. filter_ids = list(set(filter_ids))
  943. enabled_filter_ids = [
  944. function.id
  945. for function in Functions.get_functions_by_type("filter", active_only=True)
  946. ]
  947. filter_ids = [
  948. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  949. ]
  950. # Sort filter_ids by priority, using the get_priority function
  951. filter_ids.sort(key=get_priority)
  952. for filter_id in filter_ids:
  953. filter = Functions.get_function_by_id(filter_id)
  954. if not filter:
  955. continue
  956. if filter_id in webui_app.state.FUNCTIONS:
  957. function_module = webui_app.state.FUNCTIONS[filter_id]
  958. else:
  959. function_module, _, _ = load_function_module_by_id(filter_id)
  960. webui_app.state.FUNCTIONS[filter_id] = function_module
  961. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  962. valves = Functions.get_function_valves_by_id(filter_id)
  963. function_module.valves = function_module.Valves(
  964. **(valves if valves else {})
  965. )
  966. if not hasattr(function_module, "outlet"):
  967. continue
  968. try:
  969. outlet = function_module.outlet
  970. # Get the signature of the function
  971. sig = inspect.signature(outlet)
  972. params = {"body": data}
  973. # Extra parameters to be passed to the function
  974. extra_params = {
  975. "__model__": model,
  976. "__id__": filter_id,
  977. "__event_emitter__": __event_emitter__,
  978. "__event_call__": __event_call__,
  979. }
  980. # Add extra params in contained in function signature
  981. for key, value in extra_params.items():
  982. if key in sig.parameters:
  983. params[key] = value
  984. if "__user__" in sig.parameters:
  985. __user__ = {
  986. "id": user.id,
  987. "email": user.email,
  988. "name": user.name,
  989. "role": user.role,
  990. }
  991. try:
  992. if hasattr(function_module, "UserValves"):
  993. __user__["valves"] = function_module.UserValves(
  994. **Functions.get_user_valves_by_id_and_user_id(
  995. filter_id, user.id
  996. )
  997. )
  998. except Exception as e:
  999. print(e)
  1000. params = {**params, "__user__": __user__}
  1001. if inspect.iscoroutinefunction(outlet):
  1002. data = await outlet(**params)
  1003. else:
  1004. data = outlet(**params)
  1005. except Exception as e:
  1006. print(f"Error: {e}")
  1007. return JSONResponse(
  1008. status_code=status.HTTP_400_BAD_REQUEST,
  1009. content={"detail": str(e)},
  1010. )
  1011. return data
  1012. @app.post("/api/chat/actions/{action_id}")
  1013. async def chat_action(action_id: str, form_data: dict, user=Depends(get_verified_user)):
  1014. if "." in action_id:
  1015. action_id, sub_action_id = action_id.split(".")
  1016. else:
  1017. sub_action_id = None
  1018. action = Functions.get_function_by_id(action_id)
  1019. if not action:
  1020. raise HTTPException(
  1021. status_code=status.HTTP_404_NOT_FOUND,
  1022. detail="Action not found",
  1023. )
  1024. data = form_data
  1025. model_id = data["model"]
  1026. if model_id not in app.state.MODELS:
  1027. raise HTTPException(
  1028. status_code=status.HTTP_404_NOT_FOUND,
  1029. detail="Model not found",
  1030. )
  1031. model = app.state.MODELS[model_id]
  1032. __event_emitter__ = get_event_emitter(
  1033. {
  1034. "chat_id": data["chat_id"],
  1035. "message_id": data["id"],
  1036. "session_id": data["session_id"],
  1037. }
  1038. )
  1039. __event_call__ = get_event_call(
  1040. {
  1041. "chat_id": data["chat_id"],
  1042. "message_id": data["id"],
  1043. "session_id": data["session_id"],
  1044. }
  1045. )
  1046. if action_id in webui_app.state.FUNCTIONS:
  1047. function_module = webui_app.state.FUNCTIONS[action_id]
  1048. else:
  1049. function_module, _, _ = load_function_module_by_id(action_id)
  1050. webui_app.state.FUNCTIONS[action_id] = function_module
  1051. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  1052. valves = Functions.get_function_valves_by_id(action_id)
  1053. function_module.valves = function_module.Valves(**(valves if valves else {}))
  1054. if hasattr(function_module, "action"):
  1055. try:
  1056. action = function_module.action
  1057. # Get the signature of the function
  1058. sig = inspect.signature(action)
  1059. params = {"body": data}
  1060. # Extra parameters to be passed to the function
  1061. extra_params = {
  1062. "__model__": model,
  1063. "__id__": sub_action_id if sub_action_id is not None else action_id,
  1064. "__event_emitter__": __event_emitter__,
  1065. "__event_call__": __event_call__,
  1066. }
  1067. # Add extra params in contained in function signature
  1068. for key, value in extra_params.items():
  1069. if key in sig.parameters:
  1070. params[key] = value
  1071. if "__user__" in sig.parameters:
  1072. __user__ = {
  1073. "id": user.id,
  1074. "email": user.email,
  1075. "name": user.name,
  1076. "role": user.role,
  1077. }
  1078. try:
  1079. if hasattr(function_module, "UserValves"):
  1080. __user__["valves"] = function_module.UserValves(
  1081. **Functions.get_user_valves_by_id_and_user_id(
  1082. action_id, user.id
  1083. )
  1084. )
  1085. except Exception as e:
  1086. print(e)
  1087. params = {**params, "__user__": __user__}
  1088. if inspect.iscoroutinefunction(action):
  1089. data = await action(**params)
  1090. else:
  1091. data = action(**params)
  1092. except Exception as e:
  1093. print(f"Error: {e}")
  1094. return JSONResponse(
  1095. status_code=status.HTTP_400_BAD_REQUEST,
  1096. content={"detail": str(e)},
  1097. )
  1098. return data
  1099. ##################################
  1100. #
  1101. # Task Endpoints
  1102. #
  1103. ##################################
  1104. # TODO: Refactor task API endpoints below into a separate file
  1105. @app.get("/api/task/config")
  1106. async def get_task_config(user=Depends(get_verified_user)):
  1107. return {
  1108. "TASK_MODEL": app.state.config.TASK_MODEL,
  1109. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1110. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1111. "ENABLE_SEARCH_QUERY": app.state.config.ENABLE_SEARCH_QUERY,
  1112. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  1113. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1114. }
  1115. class TaskConfigForm(BaseModel):
  1116. TASK_MODEL: Optional[str]
  1117. TASK_MODEL_EXTERNAL: Optional[str]
  1118. TITLE_GENERATION_PROMPT_TEMPLATE: str
  1119. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  1120. ENABLE_SEARCH_QUERY: bool
  1121. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  1122. @app.post("/api/task/config/update")
  1123. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  1124. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  1125. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  1126. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  1127. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  1128. )
  1129. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  1130. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  1131. )
  1132. app.state.config.ENABLE_SEARCH_QUERY = form_data.ENABLE_SEARCH_QUERY
  1133. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  1134. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  1135. )
  1136. return {
  1137. "TASK_MODEL": app.state.config.TASK_MODEL,
  1138. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  1139. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  1140. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  1141. "ENABLE_SEARCH_QUERY": app.state.config.ENABLE_SEARCH_QUERY,
  1142. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  1143. }
  1144. @app.post("/api/task/title/completions")
  1145. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  1146. print("generate_title")
  1147. model_id = form_data["model"]
  1148. if model_id not in app.state.MODELS:
  1149. raise HTTPException(
  1150. status_code=status.HTTP_404_NOT_FOUND,
  1151. detail="Model not found",
  1152. )
  1153. # Check if the user has a custom task model
  1154. # If the user has a custom task model, use that model
  1155. task_model_id = get_task_model_id(model_id)
  1156. print(task_model_id)
  1157. model = app.state.MODELS[task_model_id]
  1158. if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
  1159. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  1160. else:
  1161. template = """Create a concise, 3-5 word title with an emoji as a title for the prompt in the given language. Suitable Emojis for the summary can be used to enhance understanding but avoid quotation marks or special formatting. RESPOND ONLY WITH THE TITLE TEXT.
  1162. Examples of titles:
  1163. 📉 Stock Market Trends
  1164. 🍪 Perfect Chocolate Chip Recipe
  1165. Evolution of Music Streaming
  1166. Remote Work Productivity Tips
  1167. Artificial Intelligence in Healthcare
  1168. 🎮 Video Game Development Insights
  1169. Prompt: {{prompt:middletruncate:8000}}"""
  1170. content = title_generation_template(
  1171. template,
  1172. form_data["prompt"],
  1173. {
  1174. "name": user.name,
  1175. "location": user.info.get("location") if user.info else None,
  1176. },
  1177. )
  1178. payload = {
  1179. "model": task_model_id,
  1180. "messages": [{"role": "user", "content": content}],
  1181. "stream": False,
  1182. **(
  1183. {"max_tokens": 50}
  1184. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1185. else {
  1186. "max_completion_tokens": 50,
  1187. }
  1188. ),
  1189. "chat_id": form_data.get("chat_id", None),
  1190. "metadata": {"task": str(TASKS.TITLE_GENERATION)},
  1191. }
  1192. log.debug(payload)
  1193. # Handle pipeline filters
  1194. try:
  1195. payload = filter_pipeline(payload, user)
  1196. except Exception as e:
  1197. if len(e.args) > 1:
  1198. return JSONResponse(
  1199. status_code=e.args[0],
  1200. content={"detail": e.args[1]},
  1201. )
  1202. else:
  1203. return JSONResponse(
  1204. status_code=status.HTTP_400_BAD_REQUEST,
  1205. content={"detail": str(e)},
  1206. )
  1207. if "chat_id" in payload:
  1208. del payload["chat_id"]
  1209. # Check if task model is ollama model
  1210. if model["owned_by"] == "ollama":
  1211. payload = convert_payload_openai_to_ollama(payload)
  1212. form_data = GenerateChatCompletionForm(**payload)
  1213. response = await generate_ollama_chat_completion(form_data=form_data, user=user)
  1214. if form_data.stream:
  1215. response.headers["content-type"] = "text/event-stream"
  1216. return StreamingResponse(
  1217. convert_streaming_response_ollama_to_openai(response),
  1218. headers=dict(response.headers),
  1219. )
  1220. else:
  1221. return convert_response_ollama_to_openai(response)
  1222. else:
  1223. return await generate_chat_completions(form_data=payload, user=user)
  1224. @app.post("/api/task/query/completions")
  1225. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  1226. print("generate_search_query")
  1227. if not app.state.config.ENABLE_SEARCH_QUERY:
  1228. raise HTTPException(
  1229. status_code=status.HTTP_400_BAD_REQUEST,
  1230. detail=f"Search query generation is disabled",
  1231. )
  1232. model_id = form_data["model"]
  1233. if model_id not in app.state.MODELS:
  1234. raise HTTPException(
  1235. status_code=status.HTTP_404_NOT_FOUND,
  1236. detail="Model not found",
  1237. )
  1238. # Check if the user has a custom task model
  1239. # If the user has a custom task model, use that model
  1240. task_model_id = get_task_model_id(model_id)
  1241. print(task_model_id)
  1242. model = app.state.MODELS[task_model_id]
  1243. if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
  1244. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  1245. else:
  1246. template = """Given the user's message and interaction history, decide if a web search is necessary. You must be concise and exclusively provide a search query if one is necessary. Refrain from verbose responses or any additional commentary. Prefer suggesting a search if uncertain to provide comprehensive or updated information. If a search isn't needed at all, respond with an empty string. Default to a search query when in doubt. Today's date is {{CURRENT_DATE}}.
  1247. User Message:
  1248. {{prompt:end:4000}}
  1249. Interaction History:
  1250. {{MESSAGES:END:6}}
  1251. Search Query:"""
  1252. content = search_query_generation_template(
  1253. template, form_data["messages"], {"name": user.name}
  1254. )
  1255. print("content", content)
  1256. payload = {
  1257. "model": task_model_id,
  1258. "messages": [{"role": "user", "content": content}],
  1259. "stream": False,
  1260. **(
  1261. {"max_tokens": 30}
  1262. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1263. else {
  1264. "max_completion_tokens": 30,
  1265. }
  1266. ),
  1267. "metadata": {"task": str(TASKS.QUERY_GENERATION)},
  1268. }
  1269. log.debug(payload)
  1270. # Handle pipeline filters
  1271. try:
  1272. payload = filter_pipeline(payload, user)
  1273. except Exception as e:
  1274. if len(e.args) > 1:
  1275. return JSONResponse(
  1276. status_code=e.args[0],
  1277. content={"detail": e.args[1]},
  1278. )
  1279. else:
  1280. return JSONResponse(
  1281. status_code=status.HTTP_400_BAD_REQUEST,
  1282. content={"detail": str(e)},
  1283. )
  1284. if "chat_id" in payload:
  1285. del payload["chat_id"]
  1286. # Check if task model is ollama model
  1287. if model["owned_by"] == "ollama":
  1288. payload = convert_payload_openai_to_ollama(payload)
  1289. form_data = GenerateChatCompletionForm(**payload)
  1290. response = await generate_ollama_chat_completion(form_data=form_data, user=user)
  1291. if form_data.stream:
  1292. response.headers["content-type"] = "text/event-stream"
  1293. return StreamingResponse(
  1294. convert_streaming_response_ollama_to_openai(response),
  1295. headers=dict(response.headers),
  1296. )
  1297. else:
  1298. return convert_response_ollama_to_openai(response)
  1299. else:
  1300. return await generate_chat_completions(form_data=payload, user=user)
  1301. @app.post("/api/task/emoji/completions")
  1302. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  1303. print("generate_emoji")
  1304. model_id = form_data["model"]
  1305. if model_id not in app.state.MODELS:
  1306. raise HTTPException(
  1307. status_code=status.HTTP_404_NOT_FOUND,
  1308. detail="Model not found",
  1309. )
  1310. # Check if the user has a custom task model
  1311. # If the user has a custom task model, use that model
  1312. task_model_id = get_task_model_id(model_id)
  1313. print(task_model_id)
  1314. model = app.state.MODELS[task_model_id]
  1315. template = '''
  1316. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  1317. Message: """{{prompt}}"""
  1318. '''
  1319. content = title_generation_template(
  1320. template,
  1321. form_data["prompt"],
  1322. {
  1323. "name": user.name,
  1324. "location": user.info.get("location") if user.info else None,
  1325. },
  1326. )
  1327. payload = {
  1328. "model": task_model_id,
  1329. "messages": [{"role": "user", "content": content}],
  1330. "stream": False,
  1331. **(
  1332. {"max_tokens": 4}
  1333. if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
  1334. else {
  1335. "max_completion_tokens": 4,
  1336. }
  1337. ),
  1338. "chat_id": form_data.get("chat_id", None),
  1339. "metadata": {"task": str(TASKS.EMOJI_GENERATION)},
  1340. }
  1341. log.debug(payload)
  1342. # Handle pipeline filters
  1343. try:
  1344. payload = filter_pipeline(payload, user)
  1345. except Exception as e:
  1346. if len(e.args) > 1:
  1347. return JSONResponse(
  1348. status_code=e.args[0],
  1349. content={"detail": e.args[1]},
  1350. )
  1351. else:
  1352. return JSONResponse(
  1353. status_code=status.HTTP_400_BAD_REQUEST,
  1354. content={"detail": str(e)},
  1355. )
  1356. if "chat_id" in payload:
  1357. del payload["chat_id"]
  1358. # Check if task model is ollama model
  1359. if model["owned_by"] == "ollama":
  1360. payload = convert_payload_openai_to_ollama(payload)
  1361. form_data = GenerateChatCompletionForm(**payload)
  1362. response = await generate_ollama_chat_completion(form_data=form_data, user=user)
  1363. if form_data.stream:
  1364. response.headers["content-type"] = "text/event-stream"
  1365. return StreamingResponse(
  1366. convert_streaming_response_ollama_to_openai(response),
  1367. headers=dict(response.headers),
  1368. )
  1369. else:
  1370. return convert_response_ollama_to_openai(response)
  1371. else:
  1372. return await generate_chat_completions(form_data=payload, user=user)
  1373. @app.post("/api/task/moa/completions")
  1374. async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)):
  1375. print("generate_moa_response")
  1376. model_id = form_data["model"]
  1377. if model_id not in app.state.MODELS:
  1378. raise HTTPException(
  1379. status_code=status.HTTP_404_NOT_FOUND,
  1380. detail="Model not found",
  1381. )
  1382. # Check if the user has a custom task model
  1383. # If the user has a custom task model, use that model
  1384. task_model_id = get_task_model_id(model_id)
  1385. print(task_model_id)
  1386. model = app.state.MODELS[task_model_id]
  1387. template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
  1388. Your task is to synthesize these responses into a single, high-quality response. It is crucial to critically evaluate the information provided in these responses, recognizing that some of it may be biased or incorrect. Your response should not simply replicate the given answers but should offer a refined, accurate, and comprehensive reply to the instruction. Ensure your response is well-structured, coherent, and adheres to the highest standards of accuracy and reliability.
  1389. Responses from models: {{responses}}"""
  1390. content = moa_response_generation_template(
  1391. template,
  1392. form_data["prompt"],
  1393. form_data["responses"],
  1394. )
  1395. payload = {
  1396. "model": task_model_id,
  1397. "messages": [{"role": "user", "content": content}],
  1398. "stream": form_data.get("stream", False),
  1399. "chat_id": form_data.get("chat_id", None),
  1400. "metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)},
  1401. }
  1402. log.debug(payload)
  1403. try:
  1404. payload = filter_pipeline(payload, user)
  1405. except Exception as e:
  1406. if len(e.args) > 1:
  1407. return JSONResponse(
  1408. status_code=e.args[0],
  1409. content={"detail": e.args[1]},
  1410. )
  1411. else:
  1412. return JSONResponse(
  1413. status_code=status.HTTP_400_BAD_REQUEST,
  1414. content={"detail": str(e)},
  1415. )
  1416. if "chat_id" in payload:
  1417. del payload["chat_id"]
  1418. # Check if task model is ollama model
  1419. if model["owned_by"] == "ollama":
  1420. payload = convert_payload_openai_to_ollama(payload)
  1421. form_data = GenerateChatCompletionForm(**payload)
  1422. response = await generate_ollama_chat_completion(form_data=form_data, user=user)
  1423. if form_data.stream:
  1424. response.headers["content-type"] = "text/event-stream"
  1425. return StreamingResponse(
  1426. convert_streaming_response_ollama_to_openai(response),
  1427. headers=dict(response.headers),
  1428. )
  1429. else:
  1430. return convert_response_ollama_to_openai(response)
  1431. else:
  1432. return await generate_chat_completions(form_data=payload, user=user)
  1433. ##################################
  1434. #
  1435. # Pipelines Endpoints
  1436. #
  1437. ##################################
  1438. # TODO: Refactor pipelines API endpoints below into a separate file
  1439. @app.get("/api/pipelines/list")
  1440. async def get_pipelines_list(user=Depends(get_admin_user)):
  1441. responses = await get_openai_models(raw=True)
  1442. print(responses)
  1443. urlIdxs = [
  1444. idx
  1445. for idx, response in enumerate(responses)
  1446. if response is not None and "pipelines" in response
  1447. ]
  1448. return {
  1449. "data": [
  1450. {
  1451. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  1452. "idx": urlIdx,
  1453. }
  1454. for urlIdx in urlIdxs
  1455. ]
  1456. }
  1457. @app.post("/api/pipelines/upload")
  1458. async def upload_pipeline(
  1459. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  1460. ):
  1461. print("upload_pipeline", urlIdx, file.filename)
  1462. # Check if the uploaded file is a python file
  1463. if not (file.filename and file.filename.endswith(".py")):
  1464. raise HTTPException(
  1465. status_code=status.HTTP_400_BAD_REQUEST,
  1466. detail="Only Python (.py) files are allowed.",
  1467. )
  1468. upload_folder = f"{CACHE_DIR}/pipelines"
  1469. os.makedirs(upload_folder, exist_ok=True)
  1470. file_path = os.path.join(upload_folder, file.filename)
  1471. r = None
  1472. try:
  1473. # Save the uploaded file
  1474. with open(file_path, "wb") as buffer:
  1475. shutil.copyfileobj(file.file, buffer)
  1476. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1477. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1478. headers = {"Authorization": f"Bearer {key}"}
  1479. with open(file_path, "rb") as f:
  1480. files = {"file": f}
  1481. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  1482. r.raise_for_status()
  1483. data = r.json()
  1484. return {**data}
  1485. except Exception as e:
  1486. # Handle connection error here
  1487. print(f"Connection error: {e}")
  1488. detail = "Pipeline not found"
  1489. status_code = status.HTTP_404_NOT_FOUND
  1490. if r is not None:
  1491. status_code = r.status_code
  1492. try:
  1493. res = r.json()
  1494. if "detail" in res:
  1495. detail = res["detail"]
  1496. except Exception:
  1497. pass
  1498. raise HTTPException(
  1499. status_code=status_code,
  1500. detail=detail,
  1501. )
  1502. finally:
  1503. # Ensure the file is deleted after the upload is completed or on failure
  1504. if os.path.exists(file_path):
  1505. os.remove(file_path)
  1506. class AddPipelineForm(BaseModel):
  1507. url: str
  1508. urlIdx: int
  1509. @app.post("/api/pipelines/add")
  1510. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  1511. r = None
  1512. try:
  1513. urlIdx = form_data.urlIdx
  1514. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1515. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1516. headers = {"Authorization": f"Bearer {key}"}
  1517. r = requests.post(
  1518. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  1519. )
  1520. r.raise_for_status()
  1521. data = r.json()
  1522. return {**data}
  1523. except Exception as e:
  1524. # Handle connection error here
  1525. print(f"Connection error: {e}")
  1526. detail = "Pipeline not found"
  1527. if r is not None:
  1528. try:
  1529. res = r.json()
  1530. if "detail" in res:
  1531. detail = res["detail"]
  1532. except Exception:
  1533. pass
  1534. raise HTTPException(
  1535. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1536. detail=detail,
  1537. )
  1538. class DeletePipelineForm(BaseModel):
  1539. id: str
  1540. urlIdx: int
  1541. @app.delete("/api/pipelines/delete")
  1542. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1543. r = None
  1544. try:
  1545. urlIdx = form_data.urlIdx
  1546. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1547. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1548. headers = {"Authorization": f"Bearer {key}"}
  1549. r = requests.delete(
  1550. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1551. )
  1552. r.raise_for_status()
  1553. data = r.json()
  1554. return {**data}
  1555. except Exception as e:
  1556. # Handle connection error here
  1557. print(f"Connection error: {e}")
  1558. detail = "Pipeline not found"
  1559. if r is not None:
  1560. try:
  1561. res = r.json()
  1562. if "detail" in res:
  1563. detail = res["detail"]
  1564. except Exception:
  1565. pass
  1566. raise HTTPException(
  1567. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1568. detail=detail,
  1569. )
  1570. @app.get("/api/pipelines")
  1571. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1572. r = None
  1573. try:
  1574. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1575. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1576. headers = {"Authorization": f"Bearer {key}"}
  1577. r = requests.get(f"{url}/pipelines", headers=headers)
  1578. r.raise_for_status()
  1579. data = r.json()
  1580. return {**data}
  1581. except Exception as e:
  1582. # Handle connection error here
  1583. print(f"Connection error: {e}")
  1584. detail = "Pipeline not found"
  1585. if r is not None:
  1586. try:
  1587. res = r.json()
  1588. if "detail" in res:
  1589. detail = res["detail"]
  1590. except Exception:
  1591. pass
  1592. raise HTTPException(
  1593. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1594. detail=detail,
  1595. )
  1596. @app.get("/api/pipelines/{pipeline_id}/valves")
  1597. async def get_pipeline_valves(
  1598. urlIdx: Optional[int],
  1599. pipeline_id: str,
  1600. user=Depends(get_admin_user),
  1601. ):
  1602. r = None
  1603. try:
  1604. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1605. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1606. headers = {"Authorization": f"Bearer {key}"}
  1607. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1608. r.raise_for_status()
  1609. data = r.json()
  1610. return {**data}
  1611. except Exception as e:
  1612. # Handle connection error here
  1613. print(f"Connection error: {e}")
  1614. detail = "Pipeline not found"
  1615. if r is not None:
  1616. try:
  1617. res = r.json()
  1618. if "detail" in res:
  1619. detail = res["detail"]
  1620. except Exception:
  1621. pass
  1622. raise HTTPException(
  1623. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1624. detail=detail,
  1625. )
  1626. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1627. async def get_pipeline_valves_spec(
  1628. urlIdx: Optional[int],
  1629. pipeline_id: str,
  1630. user=Depends(get_admin_user),
  1631. ):
  1632. r = None
  1633. try:
  1634. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1635. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1636. headers = {"Authorization": f"Bearer {key}"}
  1637. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1638. r.raise_for_status()
  1639. data = r.json()
  1640. return {**data}
  1641. except Exception as e:
  1642. # Handle connection error here
  1643. print(f"Connection error: {e}")
  1644. detail = "Pipeline not found"
  1645. if r is not None:
  1646. try:
  1647. res = r.json()
  1648. if "detail" in res:
  1649. detail = res["detail"]
  1650. except Exception:
  1651. pass
  1652. raise HTTPException(
  1653. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1654. detail=detail,
  1655. )
  1656. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1657. async def update_pipeline_valves(
  1658. urlIdx: Optional[int],
  1659. pipeline_id: str,
  1660. form_data: dict,
  1661. user=Depends(get_admin_user),
  1662. ):
  1663. r = None
  1664. try:
  1665. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1666. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1667. headers = {"Authorization": f"Bearer {key}"}
  1668. r = requests.post(
  1669. f"{url}/{pipeline_id}/valves/update",
  1670. headers=headers,
  1671. json={**form_data},
  1672. )
  1673. r.raise_for_status()
  1674. data = r.json()
  1675. return {**data}
  1676. except Exception as e:
  1677. # Handle connection error here
  1678. print(f"Connection error: {e}")
  1679. detail = "Pipeline not found"
  1680. if r is not None:
  1681. try:
  1682. res = r.json()
  1683. if "detail" in res:
  1684. detail = res["detail"]
  1685. except Exception:
  1686. pass
  1687. raise HTTPException(
  1688. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1689. detail=detail,
  1690. )
  1691. ##################################
  1692. #
  1693. # Config Endpoints
  1694. #
  1695. ##################################
  1696. @app.get("/api/config")
  1697. async def get_app_config(request: Request):
  1698. user = None
  1699. if "token" in request.cookies:
  1700. token = request.cookies.get("token")
  1701. data = decode_token(token)
  1702. if data is not None and "id" in data:
  1703. user = Users.get_user_by_id(data["id"])
  1704. return {
  1705. "status": True,
  1706. "name": WEBUI_NAME,
  1707. "version": VERSION,
  1708. "default_locale": str(DEFAULT_LOCALE),
  1709. "oauth": {
  1710. "providers": {
  1711. name: config.get("name", name)
  1712. for name, config in OAUTH_PROVIDERS.items()
  1713. }
  1714. },
  1715. "features": {
  1716. "auth": WEBUI_AUTH,
  1717. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1718. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1719. "enable_login_form": webui_app.state.config.ENABLE_LOGIN_FORM,
  1720. **(
  1721. {
  1722. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1723. "enable_image_generation": images_app.state.config.ENABLED,
  1724. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1725. "enable_message_rating": webui_app.state.config.ENABLE_MESSAGE_RATING,
  1726. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1727. "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS,
  1728. }
  1729. if user is not None
  1730. else {}
  1731. ),
  1732. },
  1733. **(
  1734. {
  1735. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1736. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1737. "audio": {
  1738. "tts": {
  1739. "engine": audio_app.state.config.TTS_ENGINE,
  1740. "voice": audio_app.state.config.TTS_VOICE,
  1741. "split_on": audio_app.state.config.TTS_SPLIT_ON,
  1742. },
  1743. "stt": {
  1744. "engine": audio_app.state.config.STT_ENGINE,
  1745. },
  1746. },
  1747. "file": {
  1748. "max_size": rag_app.state.config.FILE_MAX_SIZE,
  1749. "max_count": rag_app.state.config.FILE_MAX_COUNT,
  1750. },
  1751. "permissions": {**webui_app.state.config.USER_PERMISSIONS},
  1752. }
  1753. if user is not None
  1754. else {}
  1755. ),
  1756. }
  1757. @app.get("/api/config/model/filter")
  1758. async def get_model_filter_config(user=Depends(get_admin_user)):
  1759. return {
  1760. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1761. "models": app.state.config.MODEL_FILTER_LIST,
  1762. }
  1763. class ModelFilterConfigForm(BaseModel):
  1764. enabled: bool
  1765. models: list[str]
  1766. @app.post("/api/config/model/filter")
  1767. async def update_model_filter_config(
  1768. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1769. ):
  1770. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1771. app.state.config.MODEL_FILTER_LIST = form_data.models
  1772. return {
  1773. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1774. "models": app.state.config.MODEL_FILTER_LIST,
  1775. }
  1776. # TODO: webhook endpoint should be under config endpoints
  1777. @app.get("/api/webhook")
  1778. async def get_webhook_url(user=Depends(get_admin_user)):
  1779. return {
  1780. "url": app.state.config.WEBHOOK_URL,
  1781. }
  1782. class UrlForm(BaseModel):
  1783. url: str
  1784. @app.post("/api/webhook")
  1785. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1786. app.state.config.WEBHOOK_URL = form_data.url
  1787. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1788. return {"url": app.state.config.WEBHOOK_URL}
  1789. @app.get("/api/version")
  1790. async def get_app_version():
  1791. return {
  1792. "version": VERSION,
  1793. }
  1794. @app.get("/api/changelog")
  1795. async def get_app_changelog():
  1796. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1797. @app.get("/api/version/updates")
  1798. async def get_app_latest_release_version():
  1799. try:
  1800. async with aiohttp.ClientSession(trust_env=True) as session:
  1801. async with session.get(
  1802. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1803. ) as response:
  1804. response.raise_for_status()
  1805. data = await response.json()
  1806. latest_version = data["tag_name"]
  1807. return {"current": VERSION, "latest": latest_version[1:]}
  1808. except aiohttp.ClientError:
  1809. raise HTTPException(
  1810. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1811. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1812. )
  1813. ############################
  1814. # OAuth Login & Callback
  1815. ############################
  1816. oauth = OAuth()
  1817. for provider_name, provider_config in OAUTH_PROVIDERS.items():
  1818. oauth.register(
  1819. name=provider_name,
  1820. client_id=provider_config["client_id"],
  1821. client_secret=provider_config["client_secret"],
  1822. server_metadata_url=provider_config["server_metadata_url"],
  1823. client_kwargs={
  1824. "scope": provider_config["scope"],
  1825. },
  1826. redirect_uri=provider_config["redirect_uri"],
  1827. )
  1828. # SessionMiddleware is used by authlib for oauth
  1829. if len(OAUTH_PROVIDERS) > 0:
  1830. app.add_middleware(
  1831. SessionMiddleware,
  1832. secret_key=WEBUI_SECRET_KEY,
  1833. session_cookie="oui-session",
  1834. same_site=WEBUI_SESSION_COOKIE_SAME_SITE,
  1835. https_only=WEBUI_SESSION_COOKIE_SECURE,
  1836. )
  1837. @app.get("/oauth/{provider}/login")
  1838. async def oauth_login(provider: str, request: Request):
  1839. if provider not in OAUTH_PROVIDERS:
  1840. raise HTTPException(404)
  1841. # If the provider has a custom redirect URL, use that, otherwise automatically generate one
  1842. redirect_uri = OAUTH_PROVIDERS[provider].get("redirect_uri") or request.url_for(
  1843. "oauth_callback", provider=provider
  1844. )
  1845. client = oauth.create_client(provider)
  1846. if client is None:
  1847. raise HTTPException(404)
  1848. return await client.authorize_redirect(request, redirect_uri)
  1849. # OAuth login logic is as follows:
  1850. # 1. Attempt to find a user with matching subject ID, tied to the provider
  1851. # 2. If OAUTH_MERGE_ACCOUNTS_BY_EMAIL is true, find a user with the email address provided via OAuth
  1852. # - This is considered insecure in general, as OAuth providers do not always verify email addresses
  1853. # 3. If there is no user, and ENABLE_OAUTH_SIGNUP is true, create a user
  1854. # - Email addresses are considered unique, so we fail registration if the email address is alreayd taken
  1855. @app.get("/oauth/{provider}/callback")
  1856. async def oauth_callback(provider: str, request: Request, response: Response):
  1857. if provider not in OAUTH_PROVIDERS:
  1858. raise HTTPException(404)
  1859. client = oauth.create_client(provider)
  1860. try:
  1861. token = await client.authorize_access_token(request)
  1862. except Exception as e:
  1863. log.warning(f"OAuth callback error: {e}")
  1864. raise HTTPException(400, detail=ERROR_MESSAGES.INVALID_CRED)
  1865. user_data: UserInfo = token["userinfo"]
  1866. sub = user_data.get("sub")
  1867. if not sub:
  1868. log.warning(f"OAuth callback failed, sub is missing: {user_data}")
  1869. raise HTTPException(400, detail=ERROR_MESSAGES.INVALID_CRED)
  1870. provider_sub = f"{provider}@{sub}"
  1871. email_claim = webui_app.state.config.OAUTH_EMAIL_CLAIM
  1872. email = user_data.get(email_claim, "").lower()
  1873. # We currently mandate that email addresses are provided
  1874. if not email:
  1875. log.warning(f"OAuth callback failed, email is missing: {user_data}")
  1876. raise HTTPException(400, detail=ERROR_MESSAGES.INVALID_CRED)
  1877. # Check if the user exists
  1878. user = Users.get_user_by_oauth_sub(provider_sub)
  1879. if not user:
  1880. # If the user does not exist, check if merging is enabled
  1881. if OAUTH_MERGE_ACCOUNTS_BY_EMAIL.value:
  1882. # Check if the user exists by email
  1883. user = Users.get_user_by_email(email)
  1884. if user:
  1885. # Update the user with the new oauth sub
  1886. Users.update_user_oauth_sub_by_id(user.id, provider_sub)
  1887. if not user:
  1888. # If the user does not exist, check if signups are enabled
  1889. if ENABLE_OAUTH_SIGNUP.value:
  1890. # Check if an existing user with the same email already exists
  1891. existing_user = Users.get_user_by_email(user_data.get("email", "").lower())
  1892. if existing_user:
  1893. raise HTTPException(400, detail=ERROR_MESSAGES.EMAIL_TAKEN)
  1894. picture_claim = webui_app.state.config.OAUTH_PICTURE_CLAIM
  1895. picture_url = user_data.get(picture_claim, "")
  1896. if picture_url:
  1897. # Download the profile image into a base64 string
  1898. try:
  1899. async with aiohttp.ClientSession() as session:
  1900. async with session.get(picture_url) as resp:
  1901. picture = await resp.read()
  1902. base64_encoded_picture = base64.b64encode(picture).decode(
  1903. "utf-8"
  1904. )
  1905. guessed_mime_type = mimetypes.guess_type(picture_url)[0]
  1906. if guessed_mime_type is None:
  1907. # assume JPG, browsers are tolerant enough of image formats
  1908. guessed_mime_type = "image/jpeg"
  1909. picture_url = f"data:{guessed_mime_type};base64,{base64_encoded_picture}"
  1910. except Exception as e:
  1911. log.error(f"Error downloading profile image '{picture_url}': {e}")
  1912. picture_url = ""
  1913. if not picture_url:
  1914. picture_url = "/user.png"
  1915. username_claim = webui_app.state.config.OAUTH_USERNAME_CLAIM
  1916. role = (
  1917. "admin"
  1918. if Users.get_num_users() == 0
  1919. else webui_app.state.config.DEFAULT_USER_ROLE
  1920. )
  1921. user = Auths.insert_new_auth(
  1922. email=email,
  1923. password=get_password_hash(
  1924. str(uuid.uuid4())
  1925. ), # Random password, not used
  1926. name=user_data.get(username_claim, "User"),
  1927. profile_image_url=picture_url,
  1928. role=role,
  1929. oauth_sub=provider_sub,
  1930. )
  1931. if webui_app.state.config.WEBHOOK_URL:
  1932. post_webhook(
  1933. webui_app.state.config.WEBHOOK_URL,
  1934. WEBHOOK_MESSAGES.USER_SIGNUP(user.name),
  1935. {
  1936. "action": "signup",
  1937. "message": WEBHOOK_MESSAGES.USER_SIGNUP(user.name),
  1938. "user": user.model_dump_json(exclude_none=True),
  1939. },
  1940. )
  1941. else:
  1942. raise HTTPException(
  1943. status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.ACCESS_PROHIBITED
  1944. )
  1945. jwt_token = create_token(
  1946. data={"id": user.id},
  1947. expires_delta=parse_duration(webui_app.state.config.JWT_EXPIRES_IN),
  1948. )
  1949. # Set the cookie token
  1950. response.set_cookie(
  1951. key="token",
  1952. value=jwt_token,
  1953. httponly=True, # Ensures the cookie is not accessible via JavaScript
  1954. )
  1955. # Redirect back to the frontend with the JWT token
  1956. redirect_url = f"{request.base_url}auth#token={jwt_token}"
  1957. return RedirectResponse(url=redirect_url)
  1958. @app.get("/manifest.json")
  1959. async def get_manifest_json():
  1960. return {
  1961. "name": WEBUI_NAME,
  1962. "short_name": WEBUI_NAME,
  1963. "start_url": "/",
  1964. "display": "standalone",
  1965. "background_color": "#343541",
  1966. "orientation": "portrait-primary",
  1967. "icons": [
  1968. {
  1969. "src": "/static/logo.png",
  1970. "type": "image/png",
  1971. "sizes": "500x500",
  1972. "purpose": "any",
  1973. },
  1974. {
  1975. "src": "/static/logo.png",
  1976. "type": "image/png",
  1977. "sizes": "500x500",
  1978. "purpose": "maskable",
  1979. },
  1980. ],
  1981. }
  1982. @app.get("/opensearch.xml")
  1983. async def get_opensearch_xml():
  1984. xml_content = rf"""
  1985. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1986. <ShortName>{WEBUI_NAME}</ShortName>
  1987. <Description>Search {WEBUI_NAME}</Description>
  1988. <InputEncoding>UTF-8</InputEncoding>
  1989. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/static/favicon.png</Image>
  1990. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1991. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1992. </OpenSearchDescription>
  1993. """
  1994. return Response(content=xml_content, media_type="application/xml")
  1995. @app.get("/health")
  1996. async def healthcheck():
  1997. return {"status": True}
  1998. @app.get("/health/db")
  1999. async def healthcheck_with_db():
  2000. Session.execute(text("SELECT 1;")).all()
  2001. return {"status": True}
  2002. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  2003. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  2004. if os.path.exists(FRONTEND_BUILD_DIR):
  2005. mimetypes.add_type("text/javascript", ".js")
  2006. app.mount(
  2007. "/",
  2008. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  2009. name="spa-static-files",
  2010. )
  2011. else:
  2012. log.warning(
  2013. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  2014. )