chat.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444
  1. import time
  2. import logging
  3. import sys
  4. from aiocache import cached
  5. from typing import Any, Optional
  6. import random
  7. import json
  8. import inspect
  9. import uuid
  10. import asyncio
  11. from fastapi import Request, status
  12. from starlette.responses import Response, StreamingResponse, JSONResponse
  13. from open_webui.models.users import UserModel
  14. from open_webui.socket.main import (
  15. sio,
  16. get_event_call,
  17. get_event_emitter,
  18. )
  19. from open_webui.functions import generate_function_chat_completion
  20. from open_webui.routers.openai import (
  21. generate_chat_completion as generate_openai_chat_completion,
  22. )
  23. from open_webui.routers.ollama import (
  24. generate_chat_completion as generate_ollama_chat_completion,
  25. )
  26. from open_webui.routers.pipelines import (
  27. process_pipeline_inlet_filter,
  28. process_pipeline_outlet_filter,
  29. )
  30. from open_webui.models.functions import Functions
  31. from open_webui.models.models import Models
  32. from open_webui.utils.plugin import load_function_module_by_id
  33. from open_webui.utils.models import get_all_models, check_model_access
  34. from open_webui.utils.payload import convert_payload_openai_to_ollama
  35. from open_webui.utils.response import (
  36. convert_response_ollama_to_openai,
  37. convert_streaming_response_ollama_to_openai,
  38. )
  39. from open_webui.utils.filter import (
  40. get_sorted_filter_ids,
  41. process_filter_functions,
  42. )
  43. from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL, BYPASS_MODEL_ACCESS_CONTROL
  44. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  45. log = logging.getLogger(__name__)
  46. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  47. async def generate_direct_chat_completion(
  48. request: Request,
  49. form_data: dict,
  50. user: Any,
  51. models: dict,
  52. ):
  53. print("generate_direct_chat_completion")
  54. metadata = form_data.pop("metadata", {})
  55. user_id = metadata.get("user_id")
  56. session_id = metadata.get("session_id")
  57. request_id = str(uuid.uuid4()) # Generate a unique request ID
  58. event_emitter = get_event_emitter(metadata)
  59. event_caller = get_event_call(metadata)
  60. channel = f"{user_id}:{session_id}:{request_id}"
  61. if form_data.get("stream"):
  62. q = asyncio.Queue()
  63. async def message_listener(sid, data):
  64. """
  65. Handle received socket messages and push them into the queue.
  66. """
  67. await q.put(data)
  68. # Register the listener
  69. sio.on(channel, message_listener)
  70. # Start processing chat completion in background
  71. res = await event_caller(
  72. {
  73. "type": "request:chat:completion",
  74. "data": {
  75. "form_data": form_data,
  76. "model": models[form_data["model"]],
  77. "channel": channel,
  78. "session_id": session_id,
  79. },
  80. }
  81. )
  82. print("res", res)
  83. if res.get("status", False):
  84. # Define a generator to stream responses
  85. async def event_generator():
  86. nonlocal q
  87. try:
  88. while True:
  89. data = await q.get() # Wait for new messages
  90. if isinstance(data, dict):
  91. if "done" in data and data["done"]:
  92. break # Stop streaming when 'done' is received
  93. yield f"data: {json.dumps(data)}\n\n"
  94. elif isinstance(data, str):
  95. yield data
  96. except Exception as e:
  97. log.debug(f"Error in event generator: {e}")
  98. pass
  99. # Define a background task to run the event generator
  100. async def background():
  101. try:
  102. del sio.handlers["/"][channel]
  103. except Exception as e:
  104. pass
  105. # Return the streaming response
  106. return StreamingResponse(
  107. event_generator(), media_type="text/event-stream", background=background
  108. )
  109. else:
  110. raise Exception(str(res))
  111. else:
  112. res = await event_caller(
  113. {
  114. "type": "request:chat:completion",
  115. "data": {
  116. "form_data": form_data,
  117. "model": models[form_data["model"]],
  118. "channel": channel,
  119. "session_id": session_id,
  120. },
  121. }
  122. )
  123. if "error" in res:
  124. raise Exception(res["error"])
  125. return res
  126. async def generate_chat_completion(
  127. request: Request,
  128. form_data: dict,
  129. user: Any,
  130. bypass_filter: bool = False,
  131. ):
  132. if BYPASS_MODEL_ACCESS_CONTROL:
  133. bypass_filter = True
  134. if request.state.direct and request.state.model:
  135. models = {
  136. request.state.model["id"]: request.state.model,
  137. }
  138. else:
  139. models = request.app.state.MODELS
  140. model_id = form_data["model"]
  141. if model_id not in models:
  142. raise Exception("Model not found")
  143. # Process the form_data through the pipeline
  144. try:
  145. form_data = process_pipeline_inlet_filter(request, form_data, user, models)
  146. except Exception as e:
  147. raise e
  148. model = models[model_id]
  149. # Check if user has access to the model
  150. if not bypass_filter and user.role == "user":
  151. try:
  152. check_model_access(user, model)
  153. except Exception as e:
  154. raise e
  155. if request.state.direct:
  156. return await generate_direct_chat_completion(
  157. request, form_data, user=user, models=models
  158. )
  159. else:
  160. if model["owned_by"] == "arena":
  161. model_ids = model.get("info", {}).get("meta", {}).get("model_ids")
  162. filter_mode = model.get("info", {}).get("meta", {}).get("filter_mode")
  163. if model_ids and filter_mode == "exclude":
  164. model_ids = [
  165. model["id"]
  166. for model in list(request.app.state.MODELS.values())
  167. if model.get("owned_by") != "arena" and model["id"] not in model_ids
  168. ]
  169. selected_model_id = None
  170. if isinstance(model_ids, list) and model_ids:
  171. selected_model_id = random.choice(model_ids)
  172. else:
  173. model_ids = [
  174. model["id"]
  175. for model in list(request.app.state.MODELS.values())
  176. if model.get("owned_by") != "arena"
  177. ]
  178. selected_model_id = random.choice(model_ids)
  179. form_data["model"] = selected_model_id
  180. if form_data.get("stream") == True:
  181. async def stream_wrapper(stream):
  182. yield f"data: {json.dumps({'selected_model_id': selected_model_id})}\n\n"
  183. async for chunk in stream:
  184. yield chunk
  185. response = await generate_chat_completion(
  186. request, form_data, user, bypass_filter=True
  187. )
  188. return StreamingResponse(
  189. stream_wrapper(response.body_iterator),
  190. media_type="text/event-stream",
  191. background=response.background,
  192. )
  193. else:
  194. return {
  195. **(
  196. await generate_chat_completion(
  197. request, form_data, user, bypass_filter=True
  198. )
  199. ),
  200. "selected_model_id": selected_model_id,
  201. }
  202. if model.get("pipe"):
  203. # Below does not require bypass_filter because this is the only route the uses this function and it is already bypassing the filter
  204. return await generate_function_chat_completion(
  205. request, form_data, user=user, models=models
  206. )
  207. if model["owned_by"] == "ollama":
  208. # Using /ollama/api/chat endpoint
  209. form_data = convert_payload_openai_to_ollama(form_data)
  210. response = await generate_ollama_chat_completion(
  211. request=request,
  212. form_data=form_data,
  213. user=user,
  214. bypass_filter=bypass_filter,
  215. )
  216. if form_data.get("stream"):
  217. response.headers["content-type"] = "text/event-stream"
  218. return StreamingResponse(
  219. convert_streaming_response_ollama_to_openai(response),
  220. headers=dict(response.headers),
  221. background=response.background,
  222. )
  223. else:
  224. return convert_response_ollama_to_openai(response)
  225. else:
  226. return await generate_openai_chat_completion(
  227. request=request,
  228. form_data=form_data,
  229. user=user,
  230. bypass_filter=bypass_filter,
  231. )
  232. chat_completion = generate_chat_completion
  233. async def chat_completed(request: Request, form_data: dict, user: Any):
  234. if not request.app.state.MODELS:
  235. await get_all_models(request)
  236. if request.state.direct and request.state.model:
  237. models = {
  238. request.state.model["id"]: request.state.model,
  239. }
  240. else:
  241. models = request.app.state.MODELS
  242. data = form_data
  243. model_id = data["model"]
  244. if model_id not in models:
  245. raise Exception("Model not found")
  246. model = models[model_id]
  247. try:
  248. data = process_pipeline_outlet_filter(request, data, user, models)
  249. except Exception as e:
  250. return Exception(f"Error: {e}")
  251. metadata = {
  252. "chat_id": data["chat_id"],
  253. "message_id": data["id"],
  254. "session_id": data["session_id"],
  255. "user_id": user.id,
  256. }
  257. extra_params = {
  258. "__event_emitter__": get_event_emitter(metadata),
  259. "__event_call__": get_event_call(metadata),
  260. "__user__": {
  261. "id": user.id,
  262. "email": user.email,
  263. "name": user.name,
  264. "role": user.role,
  265. },
  266. "__metadata__": metadata,
  267. "__request__": request,
  268. "__model__": model,
  269. }
  270. try:
  271. result, _ = await process_filter_functions(
  272. request=request,
  273. filter_ids=get_sorted_filter_ids(model),
  274. filter_type="outlet",
  275. form_data=data,
  276. extra_params=extra_params,
  277. )
  278. return result
  279. except Exception as e:
  280. return Exception(f"Error: {e}")
  281. async def chat_action(request: Request, action_id: str, form_data: dict, user: Any):
  282. if "." in action_id:
  283. action_id, sub_action_id = action_id.split(".")
  284. else:
  285. sub_action_id = None
  286. action = Functions.get_function_by_id(action_id)
  287. if not action:
  288. raise Exception(f"Action not found: {action_id}")
  289. if not request.app.state.MODELS:
  290. await get_all_models(request)
  291. if request.state.direct and request.state.model:
  292. models = {
  293. request.state.model["id"]: request.state.model,
  294. }
  295. else:
  296. models = request.app.state.MODELS
  297. data = form_data
  298. model_id = data["model"]
  299. if model_id not in models:
  300. raise Exception("Model not found")
  301. model = models[model_id]
  302. __event_emitter__ = get_event_emitter(
  303. {
  304. "chat_id": data["chat_id"],
  305. "message_id": data["id"],
  306. "session_id": data["session_id"],
  307. "user_id": user.id,
  308. }
  309. )
  310. __event_call__ = get_event_call(
  311. {
  312. "chat_id": data["chat_id"],
  313. "message_id": data["id"],
  314. "session_id": data["session_id"],
  315. "user_id": user.id,
  316. }
  317. )
  318. if action_id in request.app.state.FUNCTIONS:
  319. function_module = request.app.state.FUNCTIONS[action_id]
  320. else:
  321. function_module, _, _ = load_function_module_by_id(action_id)
  322. request.app.state.FUNCTIONS[action_id] = function_module
  323. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  324. valves = Functions.get_function_valves_by_id(action_id)
  325. function_module.valves = function_module.Valves(**(valves if valves else {}))
  326. if hasattr(function_module, "action"):
  327. try:
  328. action = function_module.action
  329. # Get the signature of the function
  330. sig = inspect.signature(action)
  331. params = {"body": data}
  332. # Extra parameters to be passed to the function
  333. extra_params = {
  334. "__model__": model,
  335. "__id__": sub_action_id if sub_action_id is not None else action_id,
  336. "__event_emitter__": __event_emitter__,
  337. "__event_call__": __event_call__,
  338. "__request__": request,
  339. }
  340. # Add extra params in contained in function signature
  341. for key, value in extra_params.items():
  342. if key in sig.parameters:
  343. params[key] = value
  344. if "__user__" in sig.parameters:
  345. __user__ = {
  346. "id": user.id,
  347. "email": user.email,
  348. "name": user.name,
  349. "role": user.role,
  350. }
  351. try:
  352. if hasattr(function_module, "UserValves"):
  353. __user__["valves"] = function_module.UserValves(
  354. **Functions.get_user_valves_by_id_and_user_id(
  355. action_id, user.id
  356. )
  357. )
  358. except Exception as e:
  359. print(e)
  360. params = {**params, "__user__": __user__}
  361. if inspect.iscoroutinefunction(action):
  362. data = await action(**params)
  363. else:
  364. data = action(**params)
  365. except Exception as e:
  366. return Exception(f"Error: {e}")
  367. return data