payload.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. from open_webui.utils.task import prompt_template, prompt_variables_template
  2. from open_webui.utils.misc import (
  3. add_or_update_system_message,
  4. )
  5. from typing import Callable, Optional
  6. # inplace function: form_data is modified
  7. def apply_model_system_prompt_to_body(
  8. params: dict, form_data: dict, metadata: Optional[dict] = None, user=None
  9. ) -> dict:
  10. system = params.get("system", None)
  11. if not system:
  12. return form_data
  13. # Metadata (WebUI Usage)
  14. if metadata:
  15. variables = metadata.get("variables", {})
  16. if variables:
  17. system = prompt_variables_template(system, variables)
  18. # Legacy (API Usage)
  19. if user:
  20. template_params = {
  21. "user_name": user.name,
  22. "user_location": user.info.get("location") if user.info else None,
  23. }
  24. else:
  25. template_params = {}
  26. system = prompt_template(system, **template_params)
  27. form_data["messages"] = add_or_update_system_message(
  28. system, form_data.get("messages", [])
  29. )
  30. return form_data
  31. # inplace function: form_data is modified
  32. def apply_model_params_to_body(
  33. params: dict, form_data: dict, mappings: dict[str, Callable]
  34. ) -> dict:
  35. if not params:
  36. return form_data
  37. for key, cast_func in mappings.items():
  38. if (value := params.get(key)) is not None:
  39. form_data[key] = cast_func(value)
  40. return form_data
  41. # inplace function: form_data is modified
  42. def apply_model_params_to_body_openai(params: dict, form_data: dict) -> dict:
  43. mappings = {
  44. "temperature": float,
  45. "top_p": float,
  46. "max_tokens": int,
  47. "frequency_penalty": float,
  48. "reasoning_effort": str,
  49. "seed": lambda x: x,
  50. "stop": lambda x: [bytes(s, "utf-8").decode("unicode_escape") for s in x],
  51. }
  52. return apply_model_params_to_body(params, form_data, mappings)
  53. def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
  54. opts = [
  55. "temperature",
  56. "top_p",
  57. "seed",
  58. "mirostat",
  59. "mirostat_eta",
  60. "mirostat_tau",
  61. "num_ctx",
  62. "num_batch",
  63. "num_keep",
  64. "repeat_last_n",
  65. "tfs_z",
  66. "top_k",
  67. "min_p",
  68. "use_mmap",
  69. "use_mlock",
  70. "num_thread",
  71. "num_gpu",
  72. ]
  73. mappings = {i: lambda x: x for i in opts}
  74. form_data = apply_model_params_to_body(params, form_data, mappings)
  75. name_differences = {
  76. "max_tokens": "num_predict",
  77. "frequency_penalty": "repeat_penalty",
  78. }
  79. for key, value in name_differences.items():
  80. if (param := params.get(key, None)) is not None:
  81. form_data[value] = param
  82. return form_data
  83. def convert_messages_openai_to_ollama(messages: list[dict]) -> list[dict]:
  84. ollama_messages = []
  85. for message in messages:
  86. # Initialize the new message structure with the role
  87. new_message = {"role": message["role"]}
  88. content = message.get("content", [])
  89. # Check if the content is a string (just a simple message)
  90. if isinstance(content, str):
  91. # If the content is a string, it's pure text
  92. new_message["content"] = content
  93. else:
  94. # Otherwise, assume the content is a list of dicts, e.g., text followed by an image URL
  95. content_text = ""
  96. images = []
  97. # Iterate through the list of content items
  98. for item in content:
  99. # Check if it's a text type
  100. if item.get("type") == "text":
  101. content_text += item.get("text", "")
  102. # Check if it's an image URL type
  103. elif item.get("type") == "image_url":
  104. img_url = item.get("image_url", {}).get("url", "")
  105. if img_url:
  106. # If the image url starts with data:, it's a base64 image and should be trimmed
  107. if img_url.startswith("data:"):
  108. img_url = img_url.split(",")[-1]
  109. images.append(img_url)
  110. # Add content text (if any)
  111. if content_text:
  112. new_message["content"] = content_text.strip()
  113. # Add images (if any)
  114. if images:
  115. new_message["images"] = images
  116. # Append the new formatted message to the result
  117. ollama_messages.append(new_message)
  118. return ollama_messages
  119. def convert_payload_openai_to_ollama(openai_payload: dict) -> dict:
  120. """
  121. Converts a payload formatted for OpenAI's API to be compatible with Ollama's API endpoint for chat completions.
  122. Args:
  123. openai_payload (dict): The payload originally designed for OpenAI API usage.
  124. Returns:
  125. dict: A modified payload compatible with the Ollama API.
  126. """
  127. ollama_payload = {}
  128. # Mapping basic model and message details
  129. ollama_payload["model"] = openai_payload.get("model")
  130. ollama_payload["messages"] = convert_messages_openai_to_ollama(
  131. openai_payload.get("messages")
  132. )
  133. ollama_payload["stream"] = openai_payload.get("stream", False)
  134. if "tools" in openai_payload:
  135. ollama_payload["tools"] = openai_payload["tools"]
  136. if "format" in openai_payload:
  137. ollama_payload["format"] = openai_payload["format"]
  138. # If there are advanced parameters in the payload, format them in Ollama's options field
  139. ollama_options = {}
  140. if openai_payload.get("options"):
  141. ollama_payload["options"] = openai_payload["options"]
  142. ollama_options = openai_payload["options"]
  143. # Mapping OpenAI's `max_tokens` -> Ollama's `num_predict`
  144. if "max_tokens" in openai_payload:
  145. ollama_options["num_predict"] = openai_payload["max_tokens"]
  146. # Add options to payload if any have been set
  147. if ollama_options:
  148. ollama_payload["options"] = ollama_options
  149. if "metadata" in openai_payload:
  150. ollama_payload["metadata"] = openai_payload["metadata"]
  151. return ollama_payload