chat.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. import time
  2. import logging
  3. import sys
  4. from aiocache import cached
  5. from typing import Any, Optional
  6. import random
  7. import json
  8. import inspect
  9. import uuid
  10. import asyncio
  11. from fastapi import Request
  12. from starlette.responses import Response, StreamingResponse
  13. from open_webui.models.users import UserModel
  14. from open_webui.socket.main import (
  15. sio,
  16. get_event_call,
  17. get_event_emitter,
  18. )
  19. from open_webui.functions import generate_function_chat_completion
  20. from open_webui.routers.openai import (
  21. generate_chat_completion as generate_openai_chat_completion,
  22. )
  23. from open_webui.routers.ollama import (
  24. generate_chat_completion as generate_ollama_chat_completion,
  25. )
  26. from open_webui.routers.pipelines import (
  27. process_pipeline_inlet_filter,
  28. process_pipeline_outlet_filter,
  29. )
  30. from open_webui.models.functions import Functions
  31. from open_webui.models.models import Models
  32. from open_webui.utils.plugin import load_function_module_by_id
  33. from open_webui.utils.models import get_all_models, check_model_access
  34. from open_webui.utils.payload import convert_payload_openai_to_ollama
  35. from open_webui.utils.response import (
  36. convert_response_ollama_to_openai,
  37. convert_streaming_response_ollama_to_openai,
  38. )
  39. from open_webui.utils.filter import (
  40. get_sorted_filter_ids,
  41. process_filter_functions,
  42. )
  43. from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL, BYPASS_MODEL_ACCESS_CONTROL
  44. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  45. log = logging.getLogger(__name__)
  46. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  47. async def generate_direct_chat_completion(
  48. request: Request,
  49. form_data: dict,
  50. user: Any,
  51. models: dict,
  52. ):
  53. print("generate_direct_chat_completion")
  54. metadata = form_data.pop("metadata", {})
  55. user_id = metadata.get("user_id")
  56. session_id = metadata.get("session_id")
  57. request_id = str(uuid.uuid4()) # Generate a unique request ID
  58. event_emitter = get_event_emitter(metadata)
  59. event_caller = get_event_call(metadata)
  60. channel = f"{user_id}:{session_id}:{request_id}"
  61. if form_data.get("stream"):
  62. q = asyncio.Queue()
  63. # Define a generator to stream responses
  64. async def event_generator():
  65. nonlocal q
  66. async def message_listener(sid, data):
  67. """
  68. Handle received socket messages and push them into the queue.
  69. """
  70. await q.put(data)
  71. # Register the listener
  72. sio.on(channel, message_listener)
  73. # Start processing chat completion in background
  74. await event_emitter(
  75. {
  76. "type": "request:chat:completion",
  77. "data": {
  78. "form_data": form_data,
  79. "model": models[form_data["model"]],
  80. "channel": channel,
  81. "session_id": session_id,
  82. },
  83. }
  84. )
  85. try:
  86. while True:
  87. data = await q.get() # Wait for new messages
  88. if isinstance(data, dict):
  89. if "error" in data:
  90. raise Exception(data["error"])
  91. if "done" in data and data["done"]:
  92. break # Stop streaming when 'done' is received
  93. yield f"data: {json.dumps(data)}\n\n"
  94. elif isinstance(data, str):
  95. yield data
  96. finally:
  97. del sio.handlers["/"][channel] # Remove the listener
  98. # Return the streaming response
  99. return StreamingResponse(event_generator(), media_type="text/event-stream")
  100. else:
  101. res = await event_caller(
  102. {
  103. "type": "request:chat:completion",
  104. "data": {
  105. "form_data": form_data,
  106. "model": models[form_data["model"]],
  107. "channel": channel,
  108. "session_id": session_id,
  109. },
  110. }
  111. )
  112. print(res)
  113. if "error" in res:
  114. raise Exception(res["error"])
  115. return res
  116. async def generate_chat_completion(
  117. request: Request,
  118. form_data: dict,
  119. user: Any,
  120. bypass_filter: bool = False,
  121. ):
  122. if BYPASS_MODEL_ACCESS_CONTROL:
  123. bypass_filter = True
  124. if request.state.direct and request.state.model:
  125. models = {
  126. request.state.model["id"]: request.state.model,
  127. }
  128. else:
  129. models = request.app.state.MODELS
  130. model_id = form_data["model"]
  131. if model_id not in models:
  132. raise Exception("Model not found")
  133. # Process the form_data through the pipeline
  134. try:
  135. form_data = process_pipeline_inlet_filter(request, form_data, user, models)
  136. except Exception as e:
  137. raise e
  138. model = models[model_id]
  139. # Check if user has access to the model
  140. if not bypass_filter and user.role == "user":
  141. try:
  142. check_model_access(user, model)
  143. except Exception as e:
  144. raise e
  145. if request.state.direct:
  146. return await generate_direct_chat_completion(
  147. request, form_data, user=user, models=models
  148. )
  149. else:
  150. if model["owned_by"] == "arena":
  151. model_ids = model.get("info", {}).get("meta", {}).get("model_ids")
  152. filter_mode = model.get("info", {}).get("meta", {}).get("filter_mode")
  153. if model_ids and filter_mode == "exclude":
  154. model_ids = [
  155. model["id"]
  156. for model in list(request.app.state.MODELS.values())
  157. if model.get("owned_by") != "arena" and model["id"] not in model_ids
  158. ]
  159. selected_model_id = None
  160. if isinstance(model_ids, list) and model_ids:
  161. selected_model_id = random.choice(model_ids)
  162. else:
  163. model_ids = [
  164. model["id"]
  165. for model in list(request.app.state.MODELS.values())
  166. if model.get("owned_by") != "arena"
  167. ]
  168. selected_model_id = random.choice(model_ids)
  169. form_data["model"] = selected_model_id
  170. if form_data.get("stream") == True:
  171. async def stream_wrapper(stream):
  172. yield f"data: {json.dumps({'selected_model_id': selected_model_id})}\n\n"
  173. async for chunk in stream:
  174. yield chunk
  175. response = await generate_chat_completion(
  176. request, form_data, user, bypass_filter=True
  177. )
  178. return StreamingResponse(
  179. stream_wrapper(response.body_iterator),
  180. media_type="text/event-stream",
  181. background=response.background,
  182. )
  183. else:
  184. return {
  185. **(
  186. await generate_chat_completion(
  187. request, form_data, user, bypass_filter=True
  188. )
  189. ),
  190. "selected_model_id": selected_model_id,
  191. }
  192. if model.get("pipe"):
  193. # Below does not require bypass_filter because this is the only route the uses this function and it is already bypassing the filter
  194. return await generate_function_chat_completion(
  195. request, form_data, user=user, models=models
  196. )
  197. if model["owned_by"] == "ollama":
  198. # Using /ollama/api/chat endpoint
  199. form_data = convert_payload_openai_to_ollama(form_data)
  200. response = await generate_ollama_chat_completion(
  201. request=request,
  202. form_data=form_data,
  203. user=user,
  204. bypass_filter=bypass_filter,
  205. )
  206. if form_data.get("stream"):
  207. response.headers["content-type"] = "text/event-stream"
  208. return StreamingResponse(
  209. convert_streaming_response_ollama_to_openai(response),
  210. headers=dict(response.headers),
  211. background=response.background,
  212. )
  213. else:
  214. return convert_response_ollama_to_openai(response)
  215. else:
  216. return await generate_openai_chat_completion(
  217. request=request,
  218. form_data=form_data,
  219. user=user,
  220. bypass_filter=bypass_filter,
  221. )
  222. chat_completion = generate_chat_completion
  223. async def chat_completed(request: Request, form_data: dict, user: Any):
  224. if not request.app.state.MODELS:
  225. await get_all_models(request)
  226. if request.state.direct and request.state.model:
  227. models = {
  228. request.state.model["id"]: request.state.model,
  229. }
  230. else:
  231. models = request.app.state.MODELS
  232. data = form_data
  233. model_id = data["model"]
  234. if model_id not in models:
  235. raise Exception("Model not found")
  236. model = models[model_id]
  237. try:
  238. data = process_pipeline_outlet_filter(request, data, user, models)
  239. except Exception as e:
  240. return Exception(f"Error: {e}")
  241. metadata = {
  242. "chat_id": data["chat_id"],
  243. "message_id": data["id"],
  244. "session_id": data["session_id"],
  245. "user_id": user.id,
  246. }
  247. extra_params = {
  248. "__event_emitter__": get_event_emitter(metadata),
  249. "__event_call__": get_event_call(metadata),
  250. "__user__": {
  251. "id": user.id,
  252. "email": user.email,
  253. "name": user.name,
  254. "role": user.role,
  255. },
  256. "__metadata__": metadata,
  257. "__request__": request,
  258. "__model__": model,
  259. }
  260. try:
  261. result, _ = await process_filter_functions(
  262. request=request,
  263. filter_ids=get_sorted_filter_ids(model),
  264. filter_type="outlet",
  265. form_data=data,
  266. extra_params=extra_params,
  267. )
  268. return result
  269. except Exception as e:
  270. return Exception(f"Error: {e}")
  271. async def chat_action(request: Request, action_id: str, form_data: dict, user: Any):
  272. if "." in action_id:
  273. action_id, sub_action_id = action_id.split(".")
  274. else:
  275. sub_action_id = None
  276. action = Functions.get_function_by_id(action_id)
  277. if not action:
  278. raise Exception(f"Action not found: {action_id}")
  279. if not request.app.state.MODELS:
  280. await get_all_models(request)
  281. if request.state.direct and request.state.model:
  282. models = {
  283. request.state.model["id"]: request.state.model,
  284. }
  285. else:
  286. models = request.app.state.MODELS
  287. data = form_data
  288. model_id = data["model"]
  289. if model_id not in models:
  290. raise Exception("Model not found")
  291. model = models[model_id]
  292. __event_emitter__ = get_event_emitter(
  293. {
  294. "chat_id": data["chat_id"],
  295. "message_id": data["id"],
  296. "session_id": data["session_id"],
  297. "user_id": user.id,
  298. }
  299. )
  300. __event_call__ = get_event_call(
  301. {
  302. "chat_id": data["chat_id"],
  303. "message_id": data["id"],
  304. "session_id": data["session_id"],
  305. "user_id": user.id,
  306. }
  307. )
  308. if action_id in request.app.state.FUNCTIONS:
  309. function_module = request.app.state.FUNCTIONS[action_id]
  310. else:
  311. function_module, _, _ = load_function_module_by_id(action_id)
  312. request.app.state.FUNCTIONS[action_id] = function_module
  313. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  314. valves = Functions.get_function_valves_by_id(action_id)
  315. function_module.valves = function_module.Valves(**(valves if valves else {}))
  316. if hasattr(function_module, "action"):
  317. try:
  318. action = function_module.action
  319. # Get the signature of the function
  320. sig = inspect.signature(action)
  321. params = {"body": data}
  322. # Extra parameters to be passed to the function
  323. extra_params = {
  324. "__model__": model,
  325. "__id__": sub_action_id if sub_action_id is not None else action_id,
  326. "__event_emitter__": __event_emitter__,
  327. "__event_call__": __event_call__,
  328. "__request__": request,
  329. }
  330. # Add extra params in contained in function signature
  331. for key, value in extra_params.items():
  332. if key in sig.parameters:
  333. params[key] = value
  334. if "__user__" in sig.parameters:
  335. __user__ = {
  336. "id": user.id,
  337. "email": user.email,
  338. "name": user.name,
  339. "role": user.role,
  340. }
  341. try:
  342. if hasattr(function_module, "UserValves"):
  343. __user__["valves"] = function_module.UserValves(
  344. **Functions.get_user_valves_by_id_and_user_id(
  345. action_id, user.id
  346. )
  347. )
  348. except Exception as e:
  349. print(e)
  350. params = {**params, "__user__": __user__}
  351. if inspect.iscoroutinefunction(action):
  352. data = await action(**params)
  353. else:
  354. data = action(**params)
  355. except Exception as e:
  356. return Exception(f"Error: {e}")
  357. return data