middleware.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. import time
  2. import logging
  3. import sys
  4. import asyncio
  5. from aiocache import cached
  6. from typing import Any, Optional
  7. import random
  8. import json
  9. import inspect
  10. from uuid import uuid4
  11. from fastapi import Request
  12. from fastapi import BackgroundTasks
  13. from starlette.responses import Response, StreamingResponse
  14. from open_webui.models.chats import Chats
  15. from open_webui.socket.main import (
  16. get_event_call,
  17. get_event_emitter,
  18. )
  19. from open_webui.routers.tasks import (
  20. generate_queries,
  21. generate_title,
  22. generate_chat_tags,
  23. )
  24. from open_webui.models.users import UserModel
  25. from open_webui.models.functions import Functions
  26. from open_webui.models.models import Models
  27. from open_webui.retrieval.utils import get_sources_from_files
  28. from open_webui.utils.chat import generate_chat_completion
  29. from open_webui.utils.task import (
  30. get_task_model_id,
  31. rag_template,
  32. tools_function_calling_generation_template,
  33. )
  34. from open_webui.utils.misc import (
  35. get_message_list,
  36. add_or_update_system_message,
  37. get_last_user_message,
  38. prepend_to_first_user_message_content,
  39. )
  40. from open_webui.utils.tools import get_tools
  41. from open_webui.utils.plugin import load_function_module_by_id
  42. from open_webui.tasks import create_task
  43. from open_webui.config import DEFAULT_TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  44. from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL, BYPASS_MODEL_ACCESS_CONTROL
  45. from open_webui.constants import TASKS
  46. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  47. log = logging.getLogger(__name__)
  48. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  49. async def chat_completion_filter_functions_handler(request, body, model, extra_params):
  50. skip_files = None
  51. def get_filter_function_ids(model):
  52. def get_priority(function_id):
  53. function = Functions.get_function_by_id(function_id)
  54. if function is not None and hasattr(function, "valves"):
  55. # TODO: Fix FunctionModel
  56. return (function.valves if function.valves else {}).get("priority", 0)
  57. return 0
  58. filter_ids = [
  59. function.id for function in Functions.get_global_filter_functions()
  60. ]
  61. if "info" in model and "meta" in model["info"]:
  62. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  63. filter_ids = list(set(filter_ids))
  64. enabled_filter_ids = [
  65. function.id
  66. for function in Functions.get_functions_by_type("filter", active_only=True)
  67. ]
  68. filter_ids = [
  69. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  70. ]
  71. filter_ids.sort(key=get_priority)
  72. return filter_ids
  73. filter_ids = get_filter_function_ids(model)
  74. for filter_id in filter_ids:
  75. filter = Functions.get_function_by_id(filter_id)
  76. if not filter:
  77. continue
  78. if filter_id in request.app.state.FUNCTIONS:
  79. function_module = request.app.state.FUNCTIONS[filter_id]
  80. else:
  81. function_module, _, _ = load_function_module_by_id(filter_id)
  82. request.app.state.FUNCTIONS[filter_id] = function_module
  83. # Check if the function has a file_handler variable
  84. if hasattr(function_module, "file_handler"):
  85. skip_files = function_module.file_handler
  86. # Apply valves to the function
  87. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  88. valves = Functions.get_function_valves_by_id(filter_id)
  89. function_module.valves = function_module.Valves(
  90. **(valves if valves else {})
  91. )
  92. if hasattr(function_module, "inlet"):
  93. try:
  94. inlet = function_module.inlet
  95. # Create a dictionary of parameters to be passed to the function
  96. params = {"body": body} | {
  97. k: v
  98. for k, v in {
  99. **extra_params,
  100. "__model__": model,
  101. "__id__": filter_id,
  102. }.items()
  103. if k in inspect.signature(inlet).parameters
  104. }
  105. if "__user__" in params and hasattr(function_module, "UserValves"):
  106. try:
  107. params["__user__"]["valves"] = function_module.UserValves(
  108. **Functions.get_user_valves_by_id_and_user_id(
  109. filter_id, params["__user__"]["id"]
  110. )
  111. )
  112. except Exception as e:
  113. print(e)
  114. if inspect.iscoroutinefunction(inlet):
  115. body = await inlet(**params)
  116. else:
  117. body = inlet(**params)
  118. except Exception as e:
  119. print(f"Error: {e}")
  120. raise e
  121. if skip_files and "files" in body.get("metadata", {}):
  122. del body["metadata"]["files"]
  123. return body, {}
  124. async def chat_completion_tools_handler(
  125. request: Request, body: dict, user: UserModel, models, extra_params: dict
  126. ) -> tuple[dict, dict]:
  127. async def get_content_from_response(response) -> Optional[str]:
  128. content = None
  129. if hasattr(response, "body_iterator"):
  130. async for chunk in response.body_iterator:
  131. data = json.loads(chunk.decode("utf-8"))
  132. content = data["choices"][0]["message"]["content"]
  133. # Cleanup any remaining background tasks if necessary
  134. if response.background is not None:
  135. await response.background()
  136. else:
  137. content = response["choices"][0]["message"]["content"]
  138. return content
  139. def get_tools_function_calling_payload(messages, task_model_id, content):
  140. user_message = get_last_user_message(messages)
  141. history = "\n".join(
  142. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  143. for message in messages[::-1][:4]
  144. )
  145. prompt = f"History:\n{history}\nQuery: {user_message}"
  146. return {
  147. "model": task_model_id,
  148. "messages": [
  149. {"role": "system", "content": content},
  150. {"role": "user", "content": f"Query: {prompt}"},
  151. ],
  152. "stream": False,
  153. "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
  154. }
  155. # If tool_ids field is present, call the functions
  156. metadata = body.get("metadata", {})
  157. tool_ids = metadata.get("tool_ids", None)
  158. log.debug(f"{tool_ids=}")
  159. if not tool_ids:
  160. return body, {}
  161. skip_files = False
  162. sources = []
  163. task_model_id = get_task_model_id(
  164. body["model"],
  165. request.app.state.config.TASK_MODEL,
  166. request.app.state.config.TASK_MODEL_EXTERNAL,
  167. models,
  168. )
  169. tools = get_tools(
  170. request,
  171. tool_ids,
  172. user,
  173. {
  174. **extra_params,
  175. "__model__": models[task_model_id],
  176. "__messages__": body["messages"],
  177. "__files__": metadata.get("files", []),
  178. },
  179. )
  180. log.info(f"{tools=}")
  181. specs = [tool["spec"] for tool in tools.values()]
  182. tools_specs = json.dumps(specs)
  183. if request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE != "":
  184. template = request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  185. else:
  186. template = DEFAULT_TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  187. tools_function_calling_prompt = tools_function_calling_generation_template(
  188. template, tools_specs
  189. )
  190. log.info(f"{tools_function_calling_prompt=}")
  191. payload = get_tools_function_calling_payload(
  192. body["messages"], task_model_id, tools_function_calling_prompt
  193. )
  194. try:
  195. response = await generate_chat_completion(request, form_data=payload, user=user)
  196. log.debug(f"{response=}")
  197. content = await get_content_from_response(response)
  198. log.debug(f"{content=}")
  199. if not content:
  200. return body, {}
  201. try:
  202. content = content[content.find("{") : content.rfind("}") + 1]
  203. if not content:
  204. raise Exception("No JSON object found in the response")
  205. result = json.loads(content)
  206. tool_function_name = result.get("name", None)
  207. if tool_function_name not in tools:
  208. return body, {}
  209. tool_function_params = result.get("parameters", {})
  210. try:
  211. required_params = (
  212. tools[tool_function_name]
  213. .get("spec", {})
  214. .get("parameters", {})
  215. .get("required", [])
  216. )
  217. tool_function = tools[tool_function_name]["callable"]
  218. tool_function_params = {
  219. k: v
  220. for k, v in tool_function_params.items()
  221. if k in required_params
  222. }
  223. tool_output = await tool_function(**tool_function_params)
  224. except Exception as e:
  225. tool_output = str(e)
  226. if isinstance(tool_output, str):
  227. if tools[tool_function_name]["citation"]:
  228. sources.append(
  229. {
  230. "source": {
  231. "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  232. },
  233. "document": [tool_output],
  234. "metadata": [
  235. {
  236. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  237. }
  238. ],
  239. }
  240. )
  241. else:
  242. sources.append(
  243. {
  244. "source": {},
  245. "document": [tool_output],
  246. "metadata": [
  247. {
  248. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  249. }
  250. ],
  251. }
  252. )
  253. if tools[tool_function_name]["file_handler"]:
  254. skip_files = True
  255. except Exception as e:
  256. log.exception(f"Error: {e}")
  257. content = None
  258. except Exception as e:
  259. log.exception(f"Error: {e}")
  260. content = None
  261. log.debug(f"tool_contexts: {sources}")
  262. if skip_files and "files" in body.get("metadata", {}):
  263. del body["metadata"]["files"]
  264. return body, {"sources": sources}
  265. async def chat_completion_files_handler(
  266. request: Request, body: dict, user: UserModel
  267. ) -> tuple[dict, dict[str, list]]:
  268. sources = []
  269. if files := body.get("metadata", {}).get("files", None):
  270. try:
  271. queries_response = await generate_queries(
  272. {
  273. "model": body["model"],
  274. "messages": body["messages"],
  275. "type": "retrieval",
  276. },
  277. user,
  278. )
  279. queries_response = queries_response["choices"][0]["message"]["content"]
  280. try:
  281. bracket_start = queries_response.find("{")
  282. bracket_end = queries_response.rfind("}") + 1
  283. if bracket_start == -1 or bracket_end == -1:
  284. raise Exception("No JSON object found in the response")
  285. queries_response = queries_response[bracket_start:bracket_end]
  286. queries_response = json.loads(queries_response)
  287. except Exception as e:
  288. queries_response = {"queries": [queries_response]}
  289. queries = queries_response.get("queries", [])
  290. except Exception as e:
  291. queries = []
  292. if len(queries) == 0:
  293. queries = [get_last_user_message(body["messages"])]
  294. sources = get_sources_from_files(
  295. files=files,
  296. queries=queries,
  297. embedding_function=request.app.state.EMBEDDING_FUNCTION,
  298. k=request.app.state.config.TOP_K,
  299. reranking_function=request.app.state.rf,
  300. r=request.app.state.config.RELEVANCE_THRESHOLD,
  301. hybrid_search=request.app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  302. )
  303. log.debug(f"rag_contexts:sources: {sources}")
  304. return body, {"sources": sources}
  305. def apply_params_to_form_data(form_data, model):
  306. params = form_data.pop("params", {})
  307. if model.get("ollama"):
  308. form_data["options"] = params
  309. if "format" in params:
  310. form_data["format"] = params["format"]
  311. if "keep_alive" in params:
  312. form_data["keep_alive"] = params["keep_alive"]
  313. else:
  314. if "seed" in params:
  315. form_data["seed"] = params["seed"]
  316. if "stop" in params:
  317. form_data["stop"] = params["stop"]
  318. if "temperature" in params:
  319. form_data["temperature"] = params["temperature"]
  320. if "top_p" in params:
  321. form_data["top_p"] = params["top_p"]
  322. if "frequency_penalty" in params:
  323. form_data["frequency_penalty"] = params["frequency_penalty"]
  324. return form_data
  325. async def process_chat_payload(request, form_data, metadata, user, model):
  326. form_data = apply_params_to_form_data(form_data, model)
  327. log.debug(f"form_data: {form_data}")
  328. extra_params = {
  329. "__event_emitter__": get_event_emitter(metadata),
  330. "__event_call__": get_event_call(metadata),
  331. "__user__": {
  332. "id": user.id,
  333. "email": user.email,
  334. "name": user.name,
  335. "role": user.role,
  336. },
  337. "__metadata__": metadata,
  338. "__request__": request,
  339. }
  340. # Initialize events to store additional event to be sent to the client
  341. # Initialize contexts and citation
  342. models = request.app.state.MODELS
  343. events = []
  344. sources = []
  345. try:
  346. form_data, flags = await chat_completion_filter_functions_handler(
  347. request, form_data, model, extra_params
  348. )
  349. except Exception as e:
  350. return Exception(f"Error: {e}")
  351. tool_ids = form_data.pop("tool_ids", None)
  352. files = form_data.pop("files", None)
  353. metadata = {
  354. **metadata,
  355. "tool_ids": tool_ids,
  356. "files": files,
  357. }
  358. form_data["metadata"] = metadata
  359. try:
  360. form_data, flags = await chat_completion_tools_handler(
  361. request, form_data, user, models, extra_params
  362. )
  363. sources.extend(flags.get("sources", []))
  364. except Exception as e:
  365. log.exception(e)
  366. try:
  367. form_data, flags = await chat_completion_files_handler(request, form_data, user)
  368. sources.extend(flags.get("sources", []))
  369. except Exception as e:
  370. log.exception(e)
  371. # If context is not empty, insert it into the messages
  372. if len(sources) > 0:
  373. context_string = ""
  374. for source_idx, source in enumerate(sources):
  375. source_id = source.get("source", {}).get("name", "")
  376. if "document" in source:
  377. for doc_idx, doc_context in enumerate(source["document"]):
  378. metadata = source.get("metadata")
  379. doc_source_id = None
  380. if metadata:
  381. doc_source_id = metadata[doc_idx].get("source", source_id)
  382. if source_id:
  383. context_string += f"<source><source_id>{doc_source_id if doc_source_id is not None else source_id}</source_id><source_context>{doc_context}</source_context></source>\n"
  384. else:
  385. # If there is no source_id, then do not include the source_id tag
  386. context_string += f"<source><source_context>{doc_context}</source_context></source>\n"
  387. context_string = context_string.strip()
  388. prompt = get_last_user_message(form_data["messages"])
  389. if prompt is None:
  390. raise Exception("No user message found")
  391. if (
  392. request.app.state.config.RELEVANCE_THRESHOLD == 0
  393. and context_string.strip() == ""
  394. ):
  395. log.debug(
  396. f"With a 0 relevancy threshold for RAG, the context cannot be empty"
  397. )
  398. # Workaround for Ollama 2.0+ system prompt issue
  399. # TODO: replace with add_or_update_system_message
  400. if model["owned_by"] == "ollama":
  401. form_data["messages"] = prepend_to_first_user_message_content(
  402. rag_template(
  403. request.app.state.config.RAG_TEMPLATE, context_string, prompt
  404. ),
  405. form_data["messages"],
  406. )
  407. else:
  408. form_data["messages"] = add_or_update_system_message(
  409. rag_template(
  410. request.app.state.config.RAG_TEMPLATE, context_string, prompt
  411. ),
  412. form_data["messages"],
  413. )
  414. # If there are citations, add them to the data_items
  415. sources = [source for source in sources if source.get("source", {}).get("name", "")]
  416. if len(sources) > 0:
  417. events.append({"sources": sources})
  418. return form_data, events
  419. async def process_chat_response(request, response, user, events, metadata, tasks):
  420. if not isinstance(response, StreamingResponse):
  421. return response
  422. if not any(
  423. content_type in response.headers["Content-Type"]
  424. for content_type in ["text/event-stream", "application/x-ndjson"]
  425. ):
  426. return response
  427. event_emitter = None
  428. if "session_id" in metadata:
  429. event_emitter = get_event_emitter(metadata)
  430. if event_emitter:
  431. task_id = str(uuid4()) # Create a unique task ID.
  432. # Handle as a background task
  433. async def post_response_handler(response, events):
  434. try:
  435. for event in events:
  436. await event_emitter(
  437. {
  438. "type": "chat-completion",
  439. "data": event,
  440. }
  441. )
  442. content = ""
  443. async for line in response.body_iterator:
  444. line = line.decode("utf-8") if isinstance(line, bytes) else line
  445. data = line
  446. # Skip empty lines
  447. if not data.strip():
  448. continue
  449. # "data: " is the prefix for each event
  450. if not data.startswith("data: "):
  451. continue
  452. # Remove the prefix
  453. data = data[len("data: ") :]
  454. try:
  455. data = json.loads(data)
  456. value = (
  457. data.get("choices", [])[0].get("delta", {}).get("content")
  458. )
  459. if value:
  460. content = f"{content}{value}"
  461. # Save message in the database
  462. Chats.upsert_message_to_chat_by_id_and_message_id(
  463. metadata["chat_id"],
  464. metadata["message_id"],
  465. {
  466. "content": content,
  467. },
  468. )
  469. except Exception as e:
  470. done = "data: [DONE]" in line
  471. if done:
  472. data = {"done": True}
  473. else:
  474. continue
  475. await event_emitter(
  476. {
  477. "type": "chat-completion",
  478. "data": data,
  479. }
  480. )
  481. message_map = Chats.get_messages_by_chat_id(metadata["chat_id"])
  482. message = message_map.get(metadata["message_id"])
  483. if message:
  484. messages = get_message_list(message_map, message.get("id"))
  485. if TASKS.TITLE_GENERATION in tasks:
  486. res = await generate_title(
  487. request,
  488. {
  489. "model": message["model"],
  490. "messages": messages,
  491. "chat_id": metadata["chat_id"],
  492. },
  493. user,
  494. )
  495. if res:
  496. title = (
  497. res.get("choices", [])[0]
  498. .get("message", {})
  499. .get("content", message.get("content", "New Chat"))
  500. )
  501. Chats.update_chat_title_by_id(metadata["chat_id"], title)
  502. await event_emitter(
  503. {
  504. "type": "chat-title",
  505. "data": title,
  506. }
  507. )
  508. if TASKS.TAGS_GENERATION in tasks:
  509. res = await generate_chat_tags(
  510. request,
  511. {
  512. "model": message["model"],
  513. "messages": messages,
  514. "chat_id": metadata["chat_id"],
  515. },
  516. user,
  517. )
  518. if res:
  519. tags_string = (
  520. res.get("choices", [])[0]
  521. .get("message", {})
  522. .get("content", "")
  523. )
  524. tags_string = tags_string[
  525. tags_string.find("{") : tags_string.rfind("}") + 1
  526. ]
  527. try:
  528. tags = json.loads(tags_string).get("tags", [])
  529. Chats.update_chat_tags_by_id(
  530. metadata["chat_id"], tags, user
  531. )
  532. await event_emitter(
  533. {
  534. "type": "chat-tags",
  535. "data": tags,
  536. }
  537. )
  538. except Exception as e:
  539. print(f"Error: {e}")
  540. except asyncio.CancelledError:
  541. print("Task was cancelled!")
  542. await event_emitter({"type": "task-cancelled"})
  543. if response.background is not None:
  544. await response.background()
  545. # background_tasks.add_task(post_response_handler, response, events)
  546. task_id, _ = create_task(post_response_handler(response, events))
  547. return {"status": True, "task_id": task_id}
  548. else:
  549. # Fallback to the original response
  550. async def stream_wrapper(original_generator, events):
  551. def wrap_item(item):
  552. return f"data: {item}\n\n"
  553. for event in events:
  554. yield wrap_item(json.dumps(event))
  555. async for data in original_generator:
  556. yield data
  557. return StreamingResponse(
  558. stream_wrapper(response.body_iterator, events),
  559. headers=dict(response.headers),
  560. )