config.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. import os
  2. import sys
  3. import logging
  4. import chromadb
  5. from chromadb import Settings
  6. from base64 import b64encode
  7. from bs4 import BeautifulSoup
  8. from pathlib import Path
  9. import json
  10. import yaml
  11. import markdown
  12. import requests
  13. import shutil
  14. from secrets import token_bytes
  15. from constants import ERROR_MESSAGES
  16. try:
  17. from dotenv import load_dotenv, find_dotenv
  18. load_dotenv(find_dotenv("../.env"))
  19. except ImportError:
  20. log.warning("dotenv not installed, skipping...")
  21. WEBUI_NAME = "Open WebUI"
  22. shutil.copyfile("../build/favicon.png", "./static/favicon.png")
  23. ####################################
  24. # ENV (dev,test,prod)
  25. ####################################
  26. ENV = os.environ.get("ENV", "dev")
  27. try:
  28. with open(f"../package.json", "r") as f:
  29. PACKAGE_DATA = json.load(f)
  30. except:
  31. PACKAGE_DATA = {"version": "0.0.0"}
  32. VERSION = PACKAGE_DATA["version"]
  33. # Function to parse each section
  34. def parse_section(section):
  35. items = []
  36. for li in section.find_all("li"):
  37. # Extract raw HTML string
  38. raw_html = str(li)
  39. # Extract text without HTML tags
  40. text = li.get_text(separator=" ", strip=True)
  41. # Split into title and content
  42. parts = text.split(": ", 1)
  43. title = parts[0].strip() if len(parts) > 1 else ""
  44. content = parts[1].strip() if len(parts) > 1 else text
  45. items.append({"title": title, "content": content, "raw": raw_html})
  46. return items
  47. try:
  48. with open("../CHANGELOG.md", "r") as file:
  49. changelog_content = file.read()
  50. except:
  51. changelog_content = ""
  52. # Convert markdown content to HTML
  53. html_content = markdown.markdown(changelog_content)
  54. # Parse the HTML content
  55. soup = BeautifulSoup(html_content, "html.parser")
  56. # Initialize JSON structure
  57. changelog_json = {}
  58. # Iterate over each version
  59. for version in soup.find_all("h2"):
  60. version_number = version.get_text().strip().split(" - ")[0][1:-1] # Remove brackets
  61. date = version.get_text().strip().split(" - ")[1]
  62. version_data = {"date": date}
  63. # Find the next sibling that is a h3 tag (section title)
  64. current = version.find_next_sibling()
  65. while current and current.name != "h2":
  66. if current.name == "h3":
  67. section_title = current.get_text().lower() # e.g., "added", "fixed"
  68. section_items = parse_section(current.find_next_sibling("ul"))
  69. version_data[section_title] = section_items
  70. # Move to the next element
  71. current = current.find_next_sibling()
  72. changelog_json[version_number] = version_data
  73. CHANGELOG = changelog_json
  74. ####################################
  75. # LOGGING
  76. ####################################
  77. log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]
  78. GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper()
  79. if GLOBAL_LOG_LEVEL in log_levels:
  80. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True)
  81. else:
  82. GLOBAL_LOG_LEVEL = "INFO"
  83. log = logging.getLogger(__name__)
  84. log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
  85. log_sources = ["AUDIO", "CONFIG", "DB", "IMAGES", "LITELLM", "MAIN", "MODELS", "OLLAMA", "OPENAI", "RAG"]
  86. SRC_LOG_LEVELS = {}
  87. for source in log_sources:
  88. log_env_var = source + "_LOG_LEVEL"
  89. SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper()
  90. if SRC_LOG_LEVELS[source] not in log_levels:
  91. SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL
  92. log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}")
  93. log.setLevel(SRC_LOG_LEVELS["CONFIG"])
  94. ####################################
  95. # CUSTOM_NAME
  96. ####################################
  97. CUSTOM_NAME = os.environ.get("CUSTOM_NAME", "")
  98. if CUSTOM_NAME:
  99. try:
  100. r = requests.get(f"https://api.openwebui.com/api/v1/custom/{CUSTOM_NAME}")
  101. data = r.json()
  102. if r.ok:
  103. if "logo" in data:
  104. url = (
  105. f"https://api.openwebui.com{data['logo']}"
  106. if data["logo"][0] == "/"
  107. else data["logo"]
  108. )
  109. r = requests.get(url, stream=True)
  110. if r.status_code == 200:
  111. with open("./static/favicon.png", "wb") as f:
  112. r.raw.decode_content = True
  113. shutil.copyfileobj(r.raw, f)
  114. WEBUI_NAME = data["name"]
  115. except Exception as e:
  116. log.exception(e)
  117. pass
  118. ####################################
  119. # DATA/FRONTEND BUILD DIR
  120. ####################################
  121. DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve())
  122. FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build")))
  123. try:
  124. with open(f"{DATA_DIR}/config.json", "r") as f:
  125. CONFIG_DATA = json.load(f)
  126. except:
  127. CONFIG_DATA = {}
  128. ####################################
  129. # File Upload DIR
  130. ####################################
  131. UPLOAD_DIR = f"{DATA_DIR}/uploads"
  132. Path(UPLOAD_DIR).mkdir(parents=True, exist_ok=True)
  133. ####################################
  134. # Cache DIR
  135. ####################################
  136. CACHE_DIR = f"{DATA_DIR}/cache"
  137. Path(CACHE_DIR).mkdir(parents=True, exist_ok=True)
  138. ####################################
  139. # Docs DIR
  140. ####################################
  141. DOCS_DIR = f"{DATA_DIR}/docs"
  142. Path(DOCS_DIR).mkdir(parents=True, exist_ok=True)
  143. ####################################
  144. # LITELLM_CONFIG
  145. ####################################
  146. def create_config_file(file_path):
  147. directory = os.path.dirname(file_path)
  148. # Check if directory exists, if not, create it
  149. if not os.path.exists(directory):
  150. os.makedirs(directory)
  151. # Data to write into the YAML file
  152. config_data = {
  153. "general_settings": {},
  154. "litellm_settings": {},
  155. "model_list": [],
  156. "router_settings": {},
  157. }
  158. # Write data to YAML file
  159. with open(file_path, "w") as file:
  160. yaml.dump(config_data, file)
  161. LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml"
  162. if not os.path.exists(LITELLM_CONFIG_PATH):
  163. log.info("Config file doesn't exist. Creating...")
  164. create_config_file(LITELLM_CONFIG_PATH)
  165. log.info("Config file created successfully.")
  166. ####################################
  167. # OLLAMA_BASE_URL
  168. ####################################
  169. OLLAMA_API_BASE_URL = os.environ.get(
  170. "OLLAMA_API_BASE_URL", "http://localhost:11434/api"
  171. )
  172. OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
  173. if OLLAMA_BASE_URL == "" and OLLAMA_API_BASE_URL != "":
  174. OLLAMA_BASE_URL = (
  175. OLLAMA_API_BASE_URL[:-4]
  176. if OLLAMA_API_BASE_URL.endswith("/api")
  177. else OLLAMA_API_BASE_URL
  178. )
  179. if ENV == "prod":
  180. if OLLAMA_BASE_URL == "/ollama":
  181. OLLAMA_BASE_URL = "http://host.docker.internal:11434"
  182. OLLAMA_BASE_URLS = os.environ.get("OLLAMA_BASE_URLS", "")
  183. OLLAMA_BASE_URLS = OLLAMA_BASE_URLS if OLLAMA_BASE_URLS != "" else OLLAMA_BASE_URL
  184. OLLAMA_BASE_URLS = [url.strip() for url in OLLAMA_BASE_URLS.split(";")]
  185. ####################################
  186. # OPENAI_API
  187. ####################################
  188. OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
  189. OPENAI_API_BASE_URL = os.environ.get("OPENAI_API_BASE_URL", "")
  190. if OPENAI_API_BASE_URL == "":
  191. OPENAI_API_BASE_URL = "https://api.openai.com/v1"
  192. OPENAI_API_KEYS = os.environ.get("OPENAI_API_KEYS", "")
  193. OPENAI_API_KEYS = OPENAI_API_KEYS if OPENAI_API_KEYS != "" else OPENAI_API_KEY
  194. OPENAI_API_KEYS = [url.strip() for url in OPENAI_API_KEYS.split(";")]
  195. OPENAI_API_BASE_URLS = os.environ.get("OPENAI_API_BASE_URLS", "")
  196. OPENAI_API_BASE_URLS = (
  197. OPENAI_API_BASE_URLS if OPENAI_API_BASE_URLS != "" else OPENAI_API_BASE_URL
  198. )
  199. OPENAI_API_BASE_URLS = [
  200. url.strip() if url != "" else "https://api.openai.com/v1"
  201. for url in OPENAI_API_BASE_URLS.split(";")
  202. ]
  203. ####################################
  204. # WEBUI
  205. ####################################
  206. ENABLE_SIGNUP = os.environ.get("ENABLE_SIGNUP", "True").lower() == "true"
  207. DEFAULT_MODELS = os.environ.get("DEFAULT_MODELS", None)
  208. DEFAULT_PROMPT_SUGGESTIONS = (
  209. CONFIG_DATA["ui"]["prompt_suggestions"]
  210. if "ui" in CONFIG_DATA
  211. and "prompt_suggestions" in CONFIG_DATA["ui"]
  212. and type(CONFIG_DATA["ui"]["prompt_suggestions"]) is list
  213. else [
  214. {
  215. "title": ["Help me study", "vocabulary for a college entrance exam"],
  216. "content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option.",
  217. },
  218. {
  219. "title": ["Give me ideas", "for what to do with my kids' art"],
  220. "content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
  221. },
  222. {
  223. "title": ["Tell me a fun fact", "about the Roman Empire"],
  224. "content": "Tell me a random fun fact about the Roman Empire",
  225. },
  226. {
  227. "title": ["Show me a code snippet", "of a website's sticky header"],
  228. "content": "Show me a code snippet of a website's sticky header in CSS and JavaScript.",
  229. },
  230. ]
  231. )
  232. DEFAULT_USER_ROLE = os.getenv("DEFAULT_USER_ROLE", "pending")
  233. USER_PERMISSIONS_CHAT_DELETION = (
  234. os.environ.get("USER_PERMISSIONS_CHAT_DELETION", "True").lower() == "true"
  235. )
  236. USER_PERMISSIONS = {"chat": {"deletion": USER_PERMISSIONS_CHAT_DELETION}}
  237. MODEL_FILTER_ENABLED = os.environ.get("MODEL_FILTER_ENABLED", "False").lower() == "true"
  238. MODEL_FILTER_LIST = os.environ.get("MODEL_FILTER_LIST", "")
  239. MODEL_FILTER_LIST = [model.strip() for model in MODEL_FILTER_LIST.split(";")]
  240. WEBHOOK_URL = os.environ.get("WEBHOOK_URL", "")
  241. ####################################
  242. # WEBUI_VERSION
  243. ####################################
  244. WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100")
  245. ####################################
  246. # WEBUI_AUTH (Required for security)
  247. ####################################
  248. WEBUI_AUTH = True
  249. ####################################
  250. # WEBUI_SECRET_KEY
  251. ####################################
  252. WEBUI_SECRET_KEY = os.environ.get(
  253. "WEBUI_SECRET_KEY",
  254. os.environ.get(
  255. "WEBUI_JWT_SECRET_KEY", "t0p-s3cr3t"
  256. ), # DEPRECATED: remove at next major version
  257. )
  258. if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
  259. raise ValueError(ERROR_MESSAGES.ENV_VAR_NOT_FOUND)
  260. ####################################
  261. # RAG
  262. ####################################
  263. CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
  264. # this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (all-MiniLM-L6-v2)
  265. RAG_EMBEDDING_MODEL = os.environ.get("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2")
  266. # device type ebbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
  267. RAG_EMBEDDING_MODEL_DEVICE_TYPE = os.environ.get(
  268. "RAG_EMBEDDING_MODEL_DEVICE_TYPE", "cpu"
  269. )
  270. CHROMA_CLIENT = chromadb.PersistentClient(
  271. path=CHROMA_DATA_PATH,
  272. settings=Settings(allow_reset=True, anonymized_telemetry=False),
  273. )
  274. CHUNK_SIZE = 1500
  275. CHUNK_OVERLAP = 100
  276. RAG_TEMPLATE = """Use the following context as your learned knowledge, inside <context></context> XML tags.
  277. <context>
  278. [context]
  279. </context>
  280. When answer to user:
  281. - If you don't know, just say that you don't know.
  282. - If you don't know when you are not sure, ask for clarification.
  283. Avoid mentioning that you obtained the information from the context.
  284. And answer according to the language of the user's question.
  285. Given the context information, answer the query.
  286. Query: [query]"""
  287. ####################################
  288. # Transcribe
  289. ####################################
  290. WHISPER_MODEL = os.getenv("WHISPER_MODEL", "base")
  291. WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models")
  292. ####################################
  293. # Images
  294. ####################################
  295. AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "")
  296. COMFYUI_BASE_URL = os.getenv("COMFYUI_BASE_URL", "")