config.py 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. import os
  2. import chromadb
  3. from chromadb import Settings
  4. from base64 import b64encode
  5. from bs4 import BeautifulSoup
  6. from pathlib import Path
  7. import json
  8. import markdown
  9. import requests
  10. import shutil
  11. from secrets import token_bytes
  12. from constants import ERROR_MESSAGES
  13. try:
  14. from dotenv import load_dotenv, find_dotenv
  15. load_dotenv(find_dotenv("../.env"))
  16. except ImportError:
  17. print("dotenv not installed, skipping...")
  18. WEBUI_NAME = "Open WebUI"
  19. shutil.copyfile("../build/favicon.png", "./static/favicon.png")
  20. ####################################
  21. # ENV (dev,test,prod)
  22. ####################################
  23. ENV = os.environ.get("ENV", "dev")
  24. try:
  25. with open(f"../package.json", "r") as f:
  26. PACKAGE_DATA = json.load(f)
  27. except:
  28. PACKAGE_DATA = {"version": "0.0.0"}
  29. VERSION = PACKAGE_DATA["version"]
  30. # Function to parse each section
  31. def parse_section(section):
  32. items = []
  33. for li in section.find_all("li"):
  34. # Extract raw HTML string
  35. raw_html = str(li)
  36. # Extract text without HTML tags
  37. text = li.get_text(separator=" ", strip=True)
  38. # Split into title and content
  39. parts = text.split(": ", 1)
  40. title = parts[0].strip() if len(parts) > 1 else ""
  41. content = parts[1].strip() if len(parts) > 1 else text
  42. items.append({"title": title, "content": content, "raw": raw_html})
  43. return items
  44. try:
  45. with open("../CHANGELOG.md", "r") as file:
  46. changelog_content = file.read()
  47. except:
  48. changelog_content = ""
  49. # Convert markdown content to HTML
  50. html_content = markdown.markdown(changelog_content)
  51. # Parse the HTML content
  52. soup = BeautifulSoup(html_content, "html.parser")
  53. # Initialize JSON structure
  54. changelog_json = {}
  55. # Iterate over each version
  56. for version in soup.find_all("h2"):
  57. version_number = version.get_text().strip().split(" - ")[0][1:-1] # Remove brackets
  58. date = version.get_text().strip().split(" - ")[1]
  59. version_data = {"date": date}
  60. # Find the next sibling that is a h3 tag (section title)
  61. current = version.find_next_sibling()
  62. print(current)
  63. while current and current.name != "h2":
  64. if current.name == "h3":
  65. section_title = current.get_text().lower() # e.g., "added", "fixed"
  66. section_items = parse_section(current.find_next_sibling("ul"))
  67. version_data[section_title] = section_items
  68. # Move to the next element
  69. current = current.find_next_sibling()
  70. changelog_json[version_number] = version_data
  71. CHANGELOG = changelog_json
  72. ####################################
  73. # CUSTOM_NAME
  74. ####################################
  75. CUSTOM_NAME = os.environ.get("CUSTOM_NAME", "")
  76. if CUSTOM_NAME:
  77. r = requests.get(f"https://api.openwebui.com/api/v1/custom/{CUSTOM_NAME}")
  78. data = r.json()
  79. if "logo" in data:
  80. url = (
  81. f"https://api.openwebui.com{data['logo']}"
  82. if data["logo"][0] == "/"
  83. else data["logo"]
  84. )
  85. r = requests.get(url, stream=True)
  86. if r.status_code == 200:
  87. with open("./static/favicon.png", "wb") as f:
  88. r.raw.decode_content = True
  89. shutil.copyfileobj(r.raw, f)
  90. WEBUI_NAME = data["name"]
  91. ####################################
  92. # DATA/FRONTEND BUILD DIR
  93. ####################################
  94. DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve())
  95. FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build")))
  96. try:
  97. with open(f"{DATA_DIR}/config.json", "r") as f:
  98. CONFIG_DATA = json.load(f)
  99. except:
  100. CONFIG_DATA = {}
  101. ####################################
  102. # File Upload DIR
  103. ####################################
  104. UPLOAD_DIR = f"{DATA_DIR}/uploads"
  105. Path(UPLOAD_DIR).mkdir(parents=True, exist_ok=True)
  106. ####################################
  107. # Cache DIR
  108. ####################################
  109. CACHE_DIR = f"{DATA_DIR}/cache"
  110. Path(CACHE_DIR).mkdir(parents=True, exist_ok=True)
  111. ####################################
  112. # Docs DIR
  113. ####################################
  114. DOCS_DIR = f"{DATA_DIR}/docs"
  115. Path(DOCS_DIR).mkdir(parents=True, exist_ok=True)
  116. ####################################
  117. # OLLAMA_API_BASE_URL
  118. ####################################
  119. OLLAMA_API_BASE_URL = os.environ.get(
  120. "OLLAMA_API_BASE_URL", "http://localhost:11434/api"
  121. )
  122. if ENV == "prod":
  123. if OLLAMA_API_BASE_URL == "/ollama/api":
  124. OLLAMA_API_BASE_URL = "http://host.docker.internal:11434/api"
  125. ####################################
  126. # OPENAI_API
  127. ####################################
  128. OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
  129. OPENAI_API_BASE_URL = os.environ.get("OPENAI_API_BASE_URL", "")
  130. if OPENAI_API_BASE_URL == "":
  131. OPENAI_API_BASE_URL = "https://api.openai.com/v1"
  132. ####################################
  133. # WEBUI
  134. ####################################
  135. ENABLE_SIGNUP = os.environ.get("ENABLE_SIGNUP", True)
  136. DEFAULT_MODELS = os.environ.get("DEFAULT_MODELS", None)
  137. DEFAULT_PROMPT_SUGGESTIONS = (
  138. CONFIG_DATA["ui"]["prompt_suggestions"]
  139. if "ui" in CONFIG_DATA
  140. and "prompt_suggestions" in CONFIG_DATA["ui"]
  141. and type(CONFIG_DATA["ui"]["prompt_suggestions"]) is list
  142. else [
  143. {
  144. "title": ["Help me study", "vocabulary for a college entrance exam"],
  145. "content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option.",
  146. },
  147. {
  148. "title": ["Give me ideas", "for what to do with my kids' art"],
  149. "content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
  150. },
  151. {
  152. "title": ["Tell me a fun fact", "about the Roman Empire"],
  153. "content": "Tell me a random fun fact about the Roman Empire",
  154. },
  155. {
  156. "title": ["Show me a code snippet", "of a website's sticky header"],
  157. "content": "Show me a code snippet of a website's sticky header in CSS and JavaScript.",
  158. },
  159. ]
  160. )
  161. DEFAULT_USER_ROLE = os.getenv("DEFAULT_USER_ROLE", "pending")
  162. USER_PERMISSIONS = {"chat": {"deletion": True}}
  163. ####################################
  164. # WEBUI_VERSION
  165. ####################################
  166. WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100")
  167. ####################################
  168. # WEBUI_AUTH (Required for security)
  169. ####################################
  170. WEBUI_AUTH = True
  171. ####################################
  172. # WEBUI_SECRET_KEY
  173. ####################################
  174. WEBUI_SECRET_KEY = os.environ.get(
  175. "WEBUI_SECRET_KEY",
  176. os.environ.get(
  177. "WEBUI_JWT_SECRET_KEY", "t0p-s3cr3t"
  178. ), # DEPRECATED: remove at next major version
  179. )
  180. if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
  181. raise ValueError(ERROR_MESSAGES.ENV_VAR_NOT_FOUND)
  182. ####################################
  183. # RAG
  184. ####################################
  185. CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
  186. # this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (all-MiniLM-L6-v2)
  187. RAG_EMBEDDING_MODEL = os.environ.get("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2")
  188. # device type ebbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
  189. RAG_EMBEDDING_MODEL_DEVICE_TYPE = os.environ.get(
  190. "RAG_EMBEDDING_MODEL_DEVICE_TYPE", "cpu"
  191. )
  192. CHROMA_CLIENT = chromadb.PersistentClient(
  193. path=CHROMA_DATA_PATH,
  194. settings=Settings(allow_reset=True, anonymized_telemetry=False),
  195. )
  196. CHUNK_SIZE = 1500
  197. CHUNK_OVERLAP = 100
  198. RAG_TEMPLATE = """Use the following context as your learned knowledge, inside <context></context> XML tags.
  199. <context>
  200. [context]
  201. </context>
  202. When answer to user:
  203. - If you don't know, just say that you don't know.
  204. - If you don't know when you are not sure, ask for clarification.
  205. Avoid mentioning that you obtained the information from the context.
  206. And answer according to the language of the user's question.
  207. Given the context information, answer the query.
  208. Query: [query]"""
  209. ####################################
  210. # Transcribe
  211. ####################################
  212. WHISPER_MODEL = os.getenv("WHISPER_MODEL", "base")
  213. WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models")
  214. ####################################
  215. # Images
  216. ####################################
  217. AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "")