middleware.py 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. import time
  2. import logging
  3. import sys
  4. import asyncio
  5. from aiocache import cached
  6. from typing import Any, Optional
  7. import random
  8. import json
  9. import inspect
  10. from uuid import uuid4
  11. from concurrent.futures import ThreadPoolExecutor
  12. from fastapi import Request
  13. from fastapi import BackgroundTasks
  14. from starlette.responses import Response, StreamingResponse
  15. from open_webui.models.chats import Chats
  16. from open_webui.models.users import Users
  17. from open_webui.socket.main import (
  18. get_event_call,
  19. get_event_emitter,
  20. get_active_status_by_user_id,
  21. )
  22. from open_webui.routers.tasks import (
  23. generate_queries,
  24. generate_title,
  25. generate_image_prompt,
  26. generate_chat_tags,
  27. )
  28. from open_webui.routers.retrieval import process_web_search, SearchForm
  29. from open_webui.routers.images import image_generations, GenerateImageForm
  30. from open_webui.utils.webhook import post_webhook
  31. from open_webui.models.users import UserModel
  32. from open_webui.models.functions import Functions
  33. from open_webui.models.models import Models
  34. from open_webui.retrieval.utils import get_sources_from_files
  35. from open_webui.utils.chat import generate_chat_completion
  36. from open_webui.utils.task import (
  37. get_task_model_id,
  38. rag_template,
  39. tools_function_calling_generation_template,
  40. )
  41. from open_webui.utils.misc import (
  42. get_message_list,
  43. add_or_update_system_message,
  44. get_last_user_message,
  45. get_last_assistant_message,
  46. prepend_to_first_user_message_content,
  47. )
  48. from open_webui.utils.tools import get_tools
  49. from open_webui.utils.plugin import load_function_module_by_id
  50. from open_webui.tasks import create_task
  51. from open_webui.config import DEFAULT_TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  52. from open_webui.env import (
  53. SRC_LOG_LEVELS,
  54. GLOBAL_LOG_LEVEL,
  55. BYPASS_MODEL_ACCESS_CONTROL,
  56. ENABLE_REALTIME_CHAT_SAVE,
  57. )
  58. from open_webui.constants import TASKS
  59. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  60. log = logging.getLogger(__name__)
  61. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  62. async def chat_completion_filter_functions_handler(request, body, model, extra_params):
  63. skip_files = None
  64. def get_filter_function_ids(model):
  65. def get_priority(function_id):
  66. function = Functions.get_function_by_id(function_id)
  67. if function is not None and hasattr(function, "valves"):
  68. # TODO: Fix FunctionModel
  69. return (function.valves if function.valves else {}).get("priority", 0)
  70. return 0
  71. filter_ids = [
  72. function.id for function in Functions.get_global_filter_functions()
  73. ]
  74. if "info" in model and "meta" in model["info"]:
  75. filter_ids.extend(model["info"]["meta"].get("filterIds", []))
  76. filter_ids = list(set(filter_ids))
  77. enabled_filter_ids = [
  78. function.id
  79. for function in Functions.get_functions_by_type("filter", active_only=True)
  80. ]
  81. filter_ids = [
  82. filter_id for filter_id in filter_ids if filter_id in enabled_filter_ids
  83. ]
  84. filter_ids.sort(key=get_priority)
  85. return filter_ids
  86. filter_ids = get_filter_function_ids(model)
  87. for filter_id in filter_ids:
  88. filter = Functions.get_function_by_id(filter_id)
  89. if not filter:
  90. continue
  91. if filter_id in request.app.state.FUNCTIONS:
  92. function_module = request.app.state.FUNCTIONS[filter_id]
  93. else:
  94. function_module, _, _ = load_function_module_by_id(filter_id)
  95. request.app.state.FUNCTIONS[filter_id] = function_module
  96. # Check if the function has a file_handler variable
  97. if hasattr(function_module, "file_handler"):
  98. skip_files = function_module.file_handler
  99. # Apply valves to the function
  100. if hasattr(function_module, "valves") and hasattr(function_module, "Valves"):
  101. valves = Functions.get_function_valves_by_id(filter_id)
  102. function_module.valves = function_module.Valves(
  103. **(valves if valves else {})
  104. )
  105. if hasattr(function_module, "inlet"):
  106. try:
  107. inlet = function_module.inlet
  108. # Create a dictionary of parameters to be passed to the function
  109. params = {"body": body} | {
  110. k: v
  111. for k, v in {
  112. **extra_params,
  113. "__model__": model,
  114. "__id__": filter_id,
  115. }.items()
  116. if k in inspect.signature(inlet).parameters
  117. }
  118. if "__user__" in params and hasattr(function_module, "UserValves"):
  119. try:
  120. params["__user__"]["valves"] = function_module.UserValves(
  121. **Functions.get_user_valves_by_id_and_user_id(
  122. filter_id, params["__user__"]["id"]
  123. )
  124. )
  125. except Exception as e:
  126. print(e)
  127. if inspect.iscoroutinefunction(inlet):
  128. body = await inlet(**params)
  129. else:
  130. body = inlet(**params)
  131. except Exception as e:
  132. print(f"Error: {e}")
  133. raise e
  134. if skip_files and "files" in body.get("metadata", {}):
  135. del body["metadata"]["files"]
  136. return body, {}
  137. async def chat_completion_tools_handler(
  138. request: Request, body: dict, user: UserModel, models, extra_params: dict
  139. ) -> tuple[dict, dict]:
  140. async def get_content_from_response(response) -> Optional[str]:
  141. content = None
  142. if hasattr(response, "body_iterator"):
  143. async for chunk in response.body_iterator:
  144. data = json.loads(chunk.decode("utf-8"))
  145. content = data["choices"][0]["message"]["content"]
  146. # Cleanup any remaining background tasks if necessary
  147. if response.background is not None:
  148. await response.background()
  149. else:
  150. content = response["choices"][0]["message"]["content"]
  151. return content
  152. def get_tools_function_calling_payload(messages, task_model_id, content):
  153. user_message = get_last_user_message(messages)
  154. history = "\n".join(
  155. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  156. for message in messages[::-1][:4]
  157. )
  158. prompt = f"History:\n{history}\nQuery: {user_message}"
  159. return {
  160. "model": task_model_id,
  161. "messages": [
  162. {"role": "system", "content": content},
  163. {"role": "user", "content": f"Query: {prompt}"},
  164. ],
  165. "stream": False,
  166. "metadata": {"task": str(TASKS.FUNCTION_CALLING)},
  167. }
  168. # If tool_ids field is present, call the functions
  169. metadata = body.get("metadata", {})
  170. tool_ids = metadata.get("tool_ids", None)
  171. log.debug(f"{tool_ids=}")
  172. if not tool_ids:
  173. return body, {}
  174. skip_files = False
  175. sources = []
  176. task_model_id = get_task_model_id(
  177. body["model"],
  178. request.app.state.config.TASK_MODEL,
  179. request.app.state.config.TASK_MODEL_EXTERNAL,
  180. models,
  181. )
  182. tools = get_tools(
  183. request,
  184. tool_ids,
  185. user,
  186. {
  187. **extra_params,
  188. "__model__": models[task_model_id],
  189. "__messages__": body["messages"],
  190. "__files__": metadata.get("files", []),
  191. },
  192. )
  193. log.info(f"{tools=}")
  194. specs = [tool["spec"] for tool in tools.values()]
  195. tools_specs = json.dumps(specs)
  196. if request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE != "":
  197. template = request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  198. else:
  199. template = DEFAULT_TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  200. tools_function_calling_prompt = tools_function_calling_generation_template(
  201. template, tools_specs
  202. )
  203. log.info(f"{tools_function_calling_prompt=}")
  204. payload = get_tools_function_calling_payload(
  205. body["messages"], task_model_id, tools_function_calling_prompt
  206. )
  207. try:
  208. response = await generate_chat_completion(request, form_data=payload, user=user)
  209. log.debug(f"{response=}")
  210. content = await get_content_from_response(response)
  211. log.debug(f"{content=}")
  212. if not content:
  213. return body, {}
  214. try:
  215. content = content[content.find("{") : content.rfind("}") + 1]
  216. if not content:
  217. raise Exception("No JSON object found in the response")
  218. result = json.loads(content)
  219. tool_function_name = result.get("name", None)
  220. if tool_function_name not in tools:
  221. return body, {}
  222. tool_function_params = result.get("parameters", {})
  223. try:
  224. required_params = (
  225. tools[tool_function_name]
  226. .get("spec", {})
  227. .get("parameters", {})
  228. .get("required", [])
  229. )
  230. tool_function = tools[tool_function_name]["callable"]
  231. tool_function_params = {
  232. k: v
  233. for k, v in tool_function_params.items()
  234. if k in required_params
  235. }
  236. tool_output = await tool_function(**tool_function_params)
  237. except Exception as e:
  238. tool_output = str(e)
  239. if isinstance(tool_output, str):
  240. if tools[tool_function_name]["citation"]:
  241. sources.append(
  242. {
  243. "source": {
  244. "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  245. },
  246. "document": [tool_output],
  247. "metadata": [
  248. {
  249. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  250. }
  251. ],
  252. }
  253. )
  254. else:
  255. sources.append(
  256. {
  257. "source": {},
  258. "document": [tool_output],
  259. "metadata": [
  260. {
  261. "source": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
  262. }
  263. ],
  264. }
  265. )
  266. if tools[tool_function_name]["file_handler"]:
  267. skip_files = True
  268. except Exception as e:
  269. log.exception(f"Error: {e}")
  270. content = None
  271. except Exception as e:
  272. log.exception(f"Error: {e}")
  273. content = None
  274. log.debug(f"tool_contexts: {sources}")
  275. if skip_files and "files" in body.get("metadata", {}):
  276. del body["metadata"]["files"]
  277. return body, {"sources": sources}
  278. async def chat_web_search_handler(
  279. request: Request, form_data: dict, extra_params: dict, user
  280. ):
  281. event_emitter = extra_params["__event_emitter__"]
  282. await event_emitter(
  283. {
  284. "type": "status",
  285. "data": {
  286. "action": "web_search",
  287. "description": "Generating search query",
  288. "done": False,
  289. },
  290. }
  291. )
  292. messages = form_data["messages"]
  293. user_message = get_last_user_message(messages)
  294. queries = []
  295. try:
  296. res = await generate_queries(
  297. request,
  298. {
  299. "model": form_data["model"],
  300. "messages": messages,
  301. "prompt": user_message,
  302. "type": "web_search",
  303. },
  304. user,
  305. )
  306. response = res["choices"][0]["message"]["content"]
  307. try:
  308. bracket_start = response.find("{")
  309. bracket_end = response.rfind("}") + 1
  310. if bracket_start == -1 or bracket_end == -1:
  311. raise Exception("No JSON object found in the response")
  312. response = response[bracket_start:bracket_end]
  313. queries = json.loads(response)
  314. queries = queries.get("queries", [])
  315. except Exception as e:
  316. queries = [response]
  317. except Exception as e:
  318. log.exception(e)
  319. queries = [user_message]
  320. if len(queries) == 0:
  321. await event_emitter(
  322. {
  323. "type": "status",
  324. "data": {
  325. "action": "web_search",
  326. "description": "No search query generated",
  327. "done": True,
  328. },
  329. }
  330. )
  331. return
  332. searchQuery = queries[0]
  333. await event_emitter(
  334. {
  335. "type": "status",
  336. "data": {
  337. "action": "web_search",
  338. "description": 'Searching "{{searchQuery}}"',
  339. "query": searchQuery,
  340. "done": False,
  341. },
  342. }
  343. )
  344. try:
  345. # Offload process_web_search to a separate thread
  346. loop = asyncio.get_running_loop()
  347. with ThreadPoolExecutor() as executor:
  348. results = await loop.run_in_executor(
  349. executor,
  350. lambda: process_web_search(
  351. request,
  352. SearchForm(
  353. **{
  354. "query": searchQuery,
  355. }
  356. ),
  357. user,
  358. ),
  359. )
  360. if results:
  361. await event_emitter(
  362. {
  363. "type": "status",
  364. "data": {
  365. "action": "web_search",
  366. "description": "Searched {{count}} sites",
  367. "query": searchQuery,
  368. "urls": results["filenames"],
  369. "done": True,
  370. },
  371. }
  372. )
  373. files = form_data.get("files", [])
  374. files.append(
  375. {
  376. "collection_name": results["collection_name"],
  377. "name": searchQuery,
  378. "type": "web_search_results",
  379. "urls": results["filenames"],
  380. }
  381. )
  382. form_data["files"] = files
  383. else:
  384. await event_emitter(
  385. {
  386. "type": "status",
  387. "data": {
  388. "action": "web_search",
  389. "description": "No search results found",
  390. "query": searchQuery,
  391. "done": True,
  392. "error": True,
  393. },
  394. }
  395. )
  396. except Exception as e:
  397. log.exception(e)
  398. await event_emitter(
  399. {
  400. "type": "status",
  401. "data": {
  402. "action": "web_search",
  403. "description": 'Error searching "{{searchQuery}}"',
  404. "query": searchQuery,
  405. "done": True,
  406. "error": True,
  407. },
  408. }
  409. )
  410. return form_data
  411. async def chat_image_generation_handler(
  412. request: Request, form_data: dict, extra_params: dict, user
  413. ):
  414. __event_emitter__ = extra_params["__event_emitter__"]
  415. await __event_emitter__(
  416. {
  417. "type": "status",
  418. "data": {"description": "Generating an image", "done": False},
  419. }
  420. )
  421. messages = form_data["messages"]
  422. user_message = get_last_user_message(messages)
  423. prompt = user_message
  424. negative_prompt = ""
  425. if request.app.state.config.ENABLE_IMAGE_PROMPT_GENERATION:
  426. try:
  427. res = await generate_image_prompt(
  428. request,
  429. {
  430. "model": form_data["model"],
  431. "messages": messages,
  432. },
  433. user,
  434. )
  435. response = res["choices"][0]["message"]["content"]
  436. try:
  437. bracket_start = response.find("{")
  438. bracket_end = response.rfind("}") + 1
  439. if bracket_start == -1 or bracket_end == -1:
  440. raise Exception("No JSON object found in the response")
  441. response = response[bracket_start:bracket_end]
  442. response = json.loads(response)
  443. prompt = response.get("prompt", [])
  444. except Exception as e:
  445. prompt = user_message
  446. except Exception as e:
  447. log.exception(e)
  448. prompt = user_message
  449. system_message_content = ""
  450. try:
  451. images = await image_generations(
  452. request=request,
  453. form_data=GenerateImageForm(**{"prompt": prompt}),
  454. user=user,
  455. )
  456. await __event_emitter__(
  457. {
  458. "type": "status",
  459. "data": {"description": "Generated an image", "done": True},
  460. }
  461. )
  462. for image in images:
  463. await __event_emitter__(
  464. {
  465. "type": "message",
  466. "data": {"content": f"![Generated Image]({image['url']})"},
  467. }
  468. )
  469. system_message_content = "<context>User is shown the generated image, tell the user that the image has been generated</context>"
  470. except Exception as e:
  471. log.exception(e)
  472. await __event_emitter__(
  473. {
  474. "type": "status",
  475. "data": {
  476. "description": f"An error occured while generating an image",
  477. "done": True,
  478. },
  479. }
  480. )
  481. system_message_content = "<context>Unable to generate an image, tell the user that an error occured</context>"
  482. if system_message_content:
  483. form_data["messages"] = add_or_update_system_message(
  484. system_message_content, form_data["messages"]
  485. )
  486. return form_data
  487. async def chat_completion_files_handler(
  488. request: Request, body: dict, user: UserModel
  489. ) -> tuple[dict, dict[str, list]]:
  490. sources = []
  491. if files := body.get("metadata", {}).get("files", None):
  492. try:
  493. queries_response = await generate_queries(
  494. request,
  495. {
  496. "model": body["model"],
  497. "messages": body["messages"],
  498. "type": "retrieval",
  499. },
  500. user,
  501. )
  502. queries_response = queries_response["choices"][0]["message"]["content"]
  503. try:
  504. bracket_start = queries_response.find("{")
  505. bracket_end = queries_response.rfind("}") + 1
  506. if bracket_start == -1 or bracket_end == -1:
  507. raise Exception("No JSON object found in the response")
  508. queries_response = queries_response[bracket_start:bracket_end]
  509. queries_response = json.loads(queries_response)
  510. except Exception as e:
  511. queries_response = {"queries": [queries_response]}
  512. queries = queries_response.get("queries", [])
  513. except Exception as e:
  514. queries = []
  515. if len(queries) == 0:
  516. queries = [get_last_user_message(body["messages"])]
  517. try:
  518. # Offload get_sources_from_files to a separate thread
  519. loop = asyncio.get_running_loop()
  520. with ThreadPoolExecutor() as executor:
  521. sources = await loop.run_in_executor(
  522. executor,
  523. lambda: get_sources_from_files(
  524. files=files,
  525. queries=queries,
  526. embedding_function=request.app.state.EMBEDDING_FUNCTION,
  527. k=request.app.state.config.TOP_K,
  528. reranking_function=request.app.state.rf,
  529. r=request.app.state.config.RELEVANCE_THRESHOLD,
  530. hybrid_search=request.app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  531. ),
  532. )
  533. except Exception as e:
  534. log.exception(e)
  535. log.debug(f"rag_contexts:sources: {sources}")
  536. return body, {"sources": sources}
  537. def apply_params_to_form_data(form_data, model):
  538. params = form_data.pop("params", {})
  539. if model.get("ollama"):
  540. form_data["options"] = params
  541. if "format" in params:
  542. form_data["format"] = params["format"]
  543. if "keep_alive" in params:
  544. form_data["keep_alive"] = params["keep_alive"]
  545. else:
  546. if "seed" in params:
  547. form_data["seed"] = params["seed"]
  548. if "stop" in params:
  549. form_data["stop"] = params["stop"]
  550. if "temperature" in params:
  551. form_data["temperature"] = params["temperature"]
  552. if "top_p" in params:
  553. form_data["top_p"] = params["top_p"]
  554. if "frequency_penalty" in params:
  555. form_data["frequency_penalty"] = params["frequency_penalty"]
  556. return form_data
  557. async def process_chat_payload(request, form_data, metadata, user, model):
  558. form_data = apply_params_to_form_data(form_data, model)
  559. log.debug(f"form_data: {form_data}")
  560. event_emitter = get_event_emitter(metadata)
  561. event_call = get_event_call(metadata)
  562. extra_params = {
  563. "__event_emitter__": event_emitter,
  564. "__event_call__": event_call,
  565. "__user__": {
  566. "id": user.id,
  567. "email": user.email,
  568. "name": user.name,
  569. "role": user.role,
  570. },
  571. "__metadata__": metadata,
  572. "__request__": request,
  573. }
  574. # Initialize events to store additional event to be sent to the client
  575. # Initialize contexts and citation
  576. models = request.app.state.MODELS
  577. events = []
  578. sources = []
  579. user_message = get_last_user_message(form_data["messages"])
  580. model_knowledge = model.get("info", {}).get("meta", {}).get("knowledge", False)
  581. if model_knowledge:
  582. await event_emitter(
  583. {
  584. "type": "status",
  585. "data": {
  586. "action": "knowledge_search",
  587. "query": user_message,
  588. "done": False,
  589. },
  590. }
  591. )
  592. knowledge_files = []
  593. for item in model_knowledge:
  594. if item.get("collection_name"):
  595. knowledge_files.append(
  596. {
  597. "id": item.get("collection_name"),
  598. "name": item.get("name"),
  599. "legacy": True,
  600. }
  601. )
  602. elif item.get("collection_names"):
  603. knowledge_files.append(
  604. {
  605. "name": item.get("name"),
  606. "type": "collection",
  607. "collection_names": item.get("collection_names"),
  608. "legacy": True,
  609. }
  610. )
  611. else:
  612. knowledge_files.append(item)
  613. files = form_data.get("files", [])
  614. files.extend(knowledge_files)
  615. form_data["files"] = files
  616. features = form_data.pop("features", None)
  617. if features:
  618. if "web_search" in features and features["web_search"]:
  619. form_data = await chat_web_search_handler(
  620. request, form_data, extra_params, user
  621. )
  622. if "image_generation" in features and features["image_generation"]:
  623. form_data = await chat_image_generation_handler(
  624. request, form_data, extra_params, user
  625. )
  626. try:
  627. form_data, flags = await chat_completion_filter_functions_handler(
  628. request, form_data, model, extra_params
  629. )
  630. except Exception as e:
  631. raise Exception(f"Error: {e}")
  632. tool_ids = form_data.pop("tool_ids", None)
  633. files = form_data.pop("files", None)
  634. # Remove files duplicates
  635. if files:
  636. files = list({json.dumps(f, sort_keys=True): f for f in files}.values())
  637. metadata = {
  638. **metadata,
  639. "tool_ids": tool_ids,
  640. "files": files,
  641. }
  642. form_data["metadata"] = metadata
  643. try:
  644. form_data, flags = await chat_completion_tools_handler(
  645. request, form_data, user, models, extra_params
  646. )
  647. sources.extend(flags.get("sources", []))
  648. except Exception as e:
  649. log.exception(e)
  650. try:
  651. form_data, flags = await chat_completion_files_handler(request, form_data, user)
  652. sources.extend(flags.get("sources", []))
  653. except Exception as e:
  654. log.exception(e)
  655. # If context is not empty, insert it into the messages
  656. if len(sources) > 0:
  657. context_string = ""
  658. for source_idx, source in enumerate(sources):
  659. source_id = source.get("source", {}).get("name", "")
  660. if "document" in source:
  661. for doc_idx, doc_context in enumerate(source["document"]):
  662. metadata = source.get("metadata")
  663. doc_source_id = None
  664. if metadata:
  665. doc_source_id = metadata[doc_idx].get("source", source_id)
  666. if source_id:
  667. context_string += f"<source><source_id>{doc_source_id if doc_source_id is not None else source_id}</source_id><source_context>{doc_context}</source_context></source>\n"
  668. else:
  669. # If there is no source_id, then do not include the source_id tag
  670. context_string += f"<source><source_context>{doc_context}</source_context></source>\n"
  671. context_string = context_string.strip()
  672. prompt = get_last_user_message(form_data["messages"])
  673. if prompt is None:
  674. raise Exception("No user message found")
  675. if (
  676. request.app.state.config.RELEVANCE_THRESHOLD == 0
  677. and context_string.strip() == ""
  678. ):
  679. log.debug(
  680. f"With a 0 relevancy threshold for RAG, the context cannot be empty"
  681. )
  682. # Workaround for Ollama 2.0+ system prompt issue
  683. # TODO: replace with add_or_update_system_message
  684. if model["owned_by"] == "ollama":
  685. form_data["messages"] = prepend_to_first_user_message_content(
  686. rag_template(
  687. request.app.state.config.RAG_TEMPLATE, context_string, prompt
  688. ),
  689. form_data["messages"],
  690. )
  691. else:
  692. form_data["messages"] = add_or_update_system_message(
  693. rag_template(
  694. request.app.state.config.RAG_TEMPLATE, context_string, prompt
  695. ),
  696. form_data["messages"],
  697. )
  698. # If there are citations, add them to the data_items
  699. sources = [source for source in sources if source.get("source", {}).get("name", "")]
  700. if len(sources) > 0:
  701. events.append({"sources": sources})
  702. if model_knowledge:
  703. await event_emitter(
  704. {
  705. "type": "status",
  706. "data": {
  707. "action": "knowledge_search",
  708. "query": user_message,
  709. "done": True,
  710. "hidden": True,
  711. },
  712. }
  713. )
  714. return form_data, events
  715. async def process_chat_response(
  716. request, response, form_data, user, events, metadata, tasks
  717. ):
  718. async def background_tasks_handler():
  719. message_map = Chats.get_messages_by_chat_id(metadata["chat_id"])
  720. message = message_map.get(metadata["message_id"]) if message_map else None
  721. if message:
  722. messages = get_message_list(message_map, message.get("id"))
  723. if tasks:
  724. if TASKS.TITLE_GENERATION in tasks:
  725. if tasks[TASKS.TITLE_GENERATION]:
  726. res = await generate_title(
  727. request,
  728. {
  729. "model": message["model"],
  730. "messages": messages,
  731. "chat_id": metadata["chat_id"],
  732. },
  733. user,
  734. )
  735. if res and isinstance(res, dict):
  736. if len(res.get("choices", [])) == 1:
  737. title = (
  738. res.get("choices", [])[0]
  739. .get("message", {})
  740. .get(
  741. "content",
  742. message.get("content", "New Chat"),
  743. )
  744. ).strip()
  745. else:
  746. title = None
  747. if not title:
  748. title = messages[0].get("content", "New Chat")
  749. Chats.update_chat_title_by_id(metadata["chat_id"], title)
  750. await event_emitter(
  751. {
  752. "type": "chat:title",
  753. "data": title,
  754. }
  755. )
  756. elif len(messages) == 2:
  757. title = messages[0].get("content", "New Chat")
  758. Chats.update_chat_title_by_id(metadata["chat_id"], title)
  759. await event_emitter(
  760. {
  761. "type": "chat:title",
  762. "data": message.get("content", "New Chat"),
  763. }
  764. )
  765. if TASKS.TAGS_GENERATION in tasks and tasks[TASKS.TAGS_GENERATION]:
  766. res = await generate_chat_tags(
  767. request,
  768. {
  769. "model": message["model"],
  770. "messages": messages,
  771. "chat_id": metadata["chat_id"],
  772. },
  773. user,
  774. )
  775. if res and isinstance(res, dict):
  776. if len(res.get("choices", [])) == 1:
  777. tags_string = (
  778. res.get("choices", [])[0]
  779. .get("message", {})
  780. .get("content", "")
  781. )
  782. else:
  783. tags_string = ""
  784. tags_string = tags_string[
  785. tags_string.find("{") : tags_string.rfind("}") + 1
  786. ]
  787. try:
  788. tags = json.loads(tags_string).get("tags", [])
  789. Chats.update_chat_tags_by_id(
  790. metadata["chat_id"], tags, user
  791. )
  792. await event_emitter(
  793. {
  794. "type": "chat:tags",
  795. "data": tags,
  796. }
  797. )
  798. except Exception as e:
  799. pass
  800. event_emitter = None
  801. if (
  802. "session_id" in metadata
  803. and metadata["session_id"]
  804. and "chat_id" in metadata
  805. and metadata["chat_id"]
  806. and "message_id" in metadata
  807. and metadata["message_id"]
  808. ):
  809. event_emitter = get_event_emitter(metadata)
  810. if not isinstance(response, StreamingResponse):
  811. if event_emitter:
  812. if "selected_model_id" in response:
  813. Chats.upsert_message_to_chat_by_id_and_message_id(
  814. metadata["chat_id"],
  815. metadata["message_id"],
  816. {
  817. "selectedModelId": response["selected_model_id"],
  818. },
  819. )
  820. if response.get("choices", [])[0].get("message", {}).get("content"):
  821. content = response["choices"][0]["message"]["content"]
  822. if content:
  823. await event_emitter(
  824. {
  825. "type": "chat:completion",
  826. "data": response,
  827. }
  828. )
  829. title = Chats.get_chat_title_by_id(metadata["chat_id"])
  830. await event_emitter(
  831. {
  832. "type": "chat:completion",
  833. "data": {
  834. "done": True,
  835. "content": content,
  836. "title": title,
  837. },
  838. }
  839. )
  840. # Save message in the database
  841. Chats.upsert_message_to_chat_by_id_and_message_id(
  842. metadata["chat_id"],
  843. metadata["message_id"],
  844. {
  845. "content": content,
  846. },
  847. )
  848. # Send a webhook notification if the user is not active
  849. if get_active_status_by_user_id(user.id) is None:
  850. webhook_url = Users.get_user_webhook_url_by_id(user.id)
  851. if webhook_url:
  852. post_webhook(
  853. webhook_url,
  854. f"{title} - {request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}\n\n{content}",
  855. {
  856. "action": "chat",
  857. "message": content,
  858. "title": title,
  859. "url": f"{request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}",
  860. },
  861. )
  862. await background_tasks_handler()
  863. return response
  864. else:
  865. return response
  866. if not any(
  867. content_type in response.headers["Content-Type"]
  868. for content_type in ["text/event-stream", "application/x-ndjson"]
  869. ):
  870. return response
  871. if event_emitter:
  872. task_id = str(uuid4()) # Create a unique task ID.
  873. # Handle as a background task
  874. async def post_response_handler(response, events):
  875. message = Chats.get_message_by_id_and_message_id(
  876. metadata["chat_id"], metadata["message_id"]
  877. )
  878. content = message.get("content", "") if message else ""
  879. try:
  880. for event in events:
  881. await event_emitter(
  882. {
  883. "type": "chat:completion",
  884. "data": event,
  885. }
  886. )
  887. # Save message in the database
  888. Chats.upsert_message_to_chat_by_id_and_message_id(
  889. metadata["chat_id"],
  890. metadata["message_id"],
  891. {
  892. **event,
  893. },
  894. )
  895. # We might want to disable this by default
  896. detect_reasoning = True
  897. reasoning_tags = ["think", "reason", "reasoning", "thought"]
  898. current_tag = None
  899. reasoning_start_time = None
  900. reasoning_content = ""
  901. ongoing_content = ""
  902. async for line in response.body_iterator:
  903. line = line.decode("utf-8") if isinstance(line, bytes) else line
  904. data = line
  905. # Skip empty lines
  906. if not data.strip():
  907. continue
  908. # "data:" is the prefix for each event
  909. if not data.startswith("data:"):
  910. continue
  911. # Remove the prefix
  912. data = data[len("data:") :].strip()
  913. try:
  914. data = json.loads(data)
  915. if "selected_model_id" in data:
  916. Chats.upsert_message_to_chat_by_id_and_message_id(
  917. metadata["chat_id"],
  918. metadata["message_id"],
  919. {
  920. "selectedModelId": data["selected_model_id"],
  921. },
  922. )
  923. else:
  924. value = (
  925. data.get("choices", [])[0]
  926. .get("delta", {})
  927. .get("content")
  928. )
  929. if value:
  930. content = f"{content}{value}"
  931. if detect_reasoning:
  932. for tag in reasoning_tags:
  933. start_tag = f"<{tag}>\n"
  934. end_tag = f"</{tag}>\n"
  935. if start_tag in content:
  936. # Remove the start tag
  937. content = content.replace(start_tag, "")
  938. ongoing_content = content
  939. reasoning_start_time = time.time()
  940. reasoning_content = ""
  941. current_tag = tag
  942. break
  943. if reasoning_start_time is not None:
  944. # Remove the last value from the content
  945. content = content[: -len(value)]
  946. reasoning_content += value
  947. end_tag = f"</{current_tag}>\n"
  948. if end_tag in reasoning_content:
  949. reasoning_end_time = time.time()
  950. reasoning_duration = int(
  951. reasoning_end_time
  952. - reasoning_start_time
  953. )
  954. reasoning_content = (
  955. reasoning_content.strip(
  956. f"<{current_tag}>\n"
  957. )
  958. .strip(end_tag)
  959. .strip()
  960. )
  961. if reasoning_content:
  962. reasoning_display_content = "\n".join(
  963. (
  964. f"> {line}"
  965. if not line.startswith(">")
  966. else line
  967. )
  968. for line in reasoning_content.splitlines()
  969. )
  970. # Format reasoning with <details> tag
  971. content = f"{ongoing_content}<details>\n<summary>Thought for {reasoning_duration} seconds</summary>\n{reasoning_display_content}\n</details>\n"
  972. else:
  973. content = ""
  974. reasoning_start_time = None
  975. else:
  976. reasoning_display_content = "\n".join(
  977. (
  978. f"> {line}"
  979. if not line.startswith(">")
  980. else line
  981. )
  982. for line in reasoning_content.splitlines()
  983. )
  984. # Show ongoing thought process
  985. content = f"{ongoing_content}<details>\n<summary>Thinking… <loading/></summary>\n{reasoning_display_content}\n</details>\n"
  986. if ENABLE_REALTIME_CHAT_SAVE:
  987. # Save message in the database
  988. Chats.upsert_message_to_chat_by_id_and_message_id(
  989. metadata["chat_id"],
  990. metadata["message_id"],
  991. {
  992. "content": content,
  993. },
  994. )
  995. else:
  996. data = {
  997. "content": content,
  998. }
  999. await event_emitter(
  1000. {
  1001. "type": "chat:completion",
  1002. "data": data,
  1003. }
  1004. )
  1005. except Exception as e:
  1006. done = "data: [DONE]" in line
  1007. if done:
  1008. pass
  1009. else:
  1010. continue
  1011. title = Chats.get_chat_title_by_id(metadata["chat_id"])
  1012. data = {"done": True, "content": content, "title": title}
  1013. if not ENABLE_REALTIME_CHAT_SAVE:
  1014. # Save message in the database
  1015. Chats.upsert_message_to_chat_by_id_and_message_id(
  1016. metadata["chat_id"],
  1017. metadata["message_id"],
  1018. {
  1019. "content": content,
  1020. },
  1021. )
  1022. # Send a webhook notification if the user is not active
  1023. if get_active_status_by_user_id(user.id) is None:
  1024. webhook_url = Users.get_user_webhook_url_by_id(user.id)
  1025. if webhook_url:
  1026. post_webhook(
  1027. webhook_url,
  1028. f"{title} - {request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}\n\n{content}",
  1029. {
  1030. "action": "chat",
  1031. "message": content,
  1032. "title": title,
  1033. "url": f"{request.app.state.config.WEBUI_URL}/c/{metadata['chat_id']}",
  1034. },
  1035. )
  1036. await event_emitter(
  1037. {
  1038. "type": "chat:completion",
  1039. "data": data,
  1040. }
  1041. )
  1042. await background_tasks_handler()
  1043. except asyncio.CancelledError:
  1044. print("Task was cancelled!")
  1045. await event_emitter({"type": "task-cancelled"})
  1046. if not ENABLE_REALTIME_CHAT_SAVE:
  1047. # Save message in the database
  1048. Chats.upsert_message_to_chat_by_id_and_message_id(
  1049. metadata["chat_id"],
  1050. metadata["message_id"],
  1051. {
  1052. "content": content,
  1053. },
  1054. )
  1055. if response.background is not None:
  1056. await response.background()
  1057. # background_tasks.add_task(post_response_handler, response, events)
  1058. task_id, _ = create_task(post_response_handler(response, events))
  1059. return {"status": True, "task_id": task_id}
  1060. else:
  1061. # Fallback to the original response
  1062. async def stream_wrapper(original_generator, events):
  1063. def wrap_item(item):
  1064. return f"data: {item}\n\n"
  1065. for event in events:
  1066. yield wrap_item(json.dumps(event))
  1067. async for data in original_generator:
  1068. yield data
  1069. return StreamingResponse(
  1070. stream_wrapper(response.body_iterator, events),
  1071. headers=dict(response.headers),
  1072. background=response.background,
  1073. )