payload.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. from open_webui.utils.task import prompt_template, prompt_variables_template
  2. from open_webui.utils.misc import (
  3. add_or_update_system_message,
  4. )
  5. from typing import Callable, Optional
  6. import json
  7. # inplace function: form_data is modified
  8. def apply_model_system_prompt_to_body(
  9. params: dict, form_data: dict, metadata: Optional[dict] = None, user=None
  10. ) -> dict:
  11. system = params.get("system", None)
  12. if not system:
  13. return form_data
  14. # Metadata (WebUI Usage)
  15. if metadata:
  16. variables = metadata.get("variables", {})
  17. if variables:
  18. system = prompt_variables_template(system, variables)
  19. # Legacy (API Usage)
  20. if user:
  21. template_params = {
  22. "user_name": user.name,
  23. "user_location": user.info.get("location") if user.info else None,
  24. }
  25. else:
  26. template_params = {}
  27. system = prompt_template(system, **template_params)
  28. form_data["messages"] = add_or_update_system_message(
  29. system, form_data.get("messages", [])
  30. )
  31. return form_data
  32. # inplace function: form_data is modified
  33. def apply_model_params_to_body(
  34. params: dict, form_data: dict, mappings: dict[str, Callable]
  35. ) -> dict:
  36. if not params:
  37. return form_data
  38. for key, cast_func in mappings.items():
  39. if (value := params.get(key)) is not None:
  40. form_data[key] = cast_func(value)
  41. return form_data
  42. # inplace function: form_data is modified
  43. def apply_model_params_to_body_openai(params: dict, form_data: dict) -> dict:
  44. mappings = {
  45. "temperature": float,
  46. "top_p": float,
  47. "max_tokens": int,
  48. "frequency_penalty": float,
  49. "reasoning_effort": str,
  50. "seed": lambda x: x,
  51. "stop": lambda x: [bytes(s, "utf-8").decode("unicode_escape") for s in x],
  52. }
  53. return apply_model_params_to_body(params, form_data, mappings)
  54. def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
  55. # Convert OpenAI parameter names to Ollama parameter names if needed.
  56. name_differences = {
  57. "max_tokens": "num_predict",
  58. }
  59. for key, value in name_differences.items():
  60. if (param := params.get(key, None)) is not None:
  61. # Copy the parameter to new name then delete it, to prevent Ollama warning of invalid option provided
  62. params[value] = params[key]
  63. del params[key]
  64. # See https://github.com/ollama/ollama/blob/main/docs/api.md#request-8
  65. mappings = {
  66. "temperature": float,
  67. "top_p": float,
  68. "seed": lambda x: x,
  69. "mirostat": int,
  70. "mirostat_eta": float,
  71. "mirostat_tau": float,
  72. "num_ctx": int,
  73. "num_batch": int,
  74. "num_keep": int,
  75. "num_predict": int,
  76. "repeat_last_n": int,
  77. "top_k": int,
  78. "min_p": float,
  79. "typical_p": float,
  80. "repeat_penalty": float,
  81. "presence_penalty": float,
  82. "frequency_penalty": float,
  83. "penalize_newline": bool,
  84. "stop": lambda x: [bytes(s, "utf-8").decode("unicode_escape") for s in x],
  85. "numa": bool,
  86. "num_gpu": int,
  87. "main_gpu": int,
  88. "low_vram": bool,
  89. "vocab_only": bool,
  90. "use_mmap": bool,
  91. "use_mlock": bool,
  92. "num_thread": int,
  93. }
  94. return apply_model_params_to_body(params, form_data, mappings)
  95. def convert_messages_openai_to_ollama(messages: list[dict]) -> list[dict]:
  96. ollama_messages = []
  97. for message in messages:
  98. # Initialize the new message structure with the role
  99. new_message = {"role": message["role"]}
  100. content = message.get("content", [])
  101. tool_calls = message.get("tool_calls", None)
  102. tool_call_id = message.get("tool_call_id", None)
  103. # Check if the content is a string (just a simple message)
  104. if isinstance(content, str):
  105. # If the content is a string, it's pure text
  106. new_message["content"] = content
  107. # If message is a tool call, add the tool call id to the message
  108. if tool_call_id:
  109. new_message["tool_call_id"] = tool_call_id
  110. elif tool_calls:
  111. # If tool calls are present, add them to the message
  112. ollama_tool_calls = []
  113. for tool_call in tool_calls:
  114. ollama_tool_call = {
  115. "index": tool_call.get("index", 0),
  116. "id": tool_call.get("id", None),
  117. "function": {
  118. "name": tool_call.get("function", {}).get("name", ""),
  119. "arguments": json.loads(
  120. tool_call.get("function", {}).get("arguments", {})
  121. ),
  122. },
  123. }
  124. ollama_tool_calls.append(ollama_tool_call)
  125. new_message["tool_calls"] = ollama_tool_calls
  126. # Put the content to empty string (Ollama requires an empty string for tool calls)
  127. new_message["content"] = ""
  128. else:
  129. # Otherwise, assume the content is a list of dicts, e.g., text followed by an image URL
  130. content_text = ""
  131. images = []
  132. # Iterate through the list of content items
  133. for item in content:
  134. # Check if it's a text type
  135. if item.get("type") == "text":
  136. content_text += item.get("text", "")
  137. # Check if it's an image URL type
  138. elif item.get("type") == "image_url":
  139. img_url = item.get("image_url", {}).get("url", "")
  140. if img_url:
  141. # If the image url starts with data:, it's a base64 image and should be trimmed
  142. if img_url.startswith("data:"):
  143. img_url = img_url.split(",")[-1]
  144. images.append(img_url)
  145. # Add content text (if any)
  146. if content_text:
  147. new_message["content"] = content_text.strip()
  148. # Add images (if any)
  149. if images:
  150. new_message["images"] = images
  151. # Append the new formatted message to the result
  152. ollama_messages.append(new_message)
  153. return ollama_messages
  154. def convert_payload_openai_to_ollama(openai_payload: dict) -> dict:
  155. """
  156. Converts a payload formatted for OpenAI's API to be compatible with Ollama's API endpoint for chat completions.
  157. Args:
  158. openai_payload (dict): The payload originally designed for OpenAI API usage.
  159. Returns:
  160. dict: A modified payload compatible with the Ollama API.
  161. """
  162. ollama_payload = {}
  163. # Mapping basic model and message details
  164. ollama_payload["model"] = openai_payload.get("model")
  165. ollama_payload["messages"] = convert_messages_openai_to_ollama(
  166. openai_payload.get("messages")
  167. )
  168. ollama_payload["stream"] = openai_payload.get("stream", False)
  169. if "tools" in openai_payload:
  170. ollama_payload["tools"] = openai_payload["tools"]
  171. if "format" in openai_payload:
  172. ollama_payload["format"] = openai_payload["format"]
  173. # If there are advanced parameters in the payload, format them in Ollama's options field
  174. if openai_payload.get("options"):
  175. ollama_payload["options"] = openai_payload["options"]
  176. ollama_options = openai_payload["options"]
  177. # Re-Mapping OpenAI's `max_tokens` -> Ollama's `num_predict`
  178. if "max_tokens" in ollama_options:
  179. ollama_options["num_predict"] = ollama_options["max_tokens"]
  180. del ollama_options[
  181. "max_tokens"
  182. ] # To prevent Ollama warning of invalid option provided
  183. # Ollama lacks a "system" prompt option. It has to be provided as a direct parameter, so we copy it down.
  184. if "system" in ollama_options:
  185. ollama_payload["system"] = ollama_options["system"]
  186. del ollama_options[
  187. "system"
  188. ] # To prevent Ollama warning of invalid option provided
  189. if "metadata" in openai_payload:
  190. ollama_payload["metadata"] = openai_payload["metadata"]
  191. return ollama_payload