config.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. import os
  2. import sys
  3. import logging
  4. import chromadb
  5. from chromadb import Settings
  6. from base64 import b64encode
  7. from bs4 import BeautifulSoup
  8. from pathlib import Path
  9. import json
  10. import yaml
  11. import markdown
  12. import requests
  13. import shutil
  14. from secrets import token_bytes
  15. from constants import ERROR_MESSAGES
  16. try:
  17. from dotenv import load_dotenv, find_dotenv
  18. load_dotenv(find_dotenv("../.env"))
  19. except ImportError:
  20. log.warning("dotenv not installed, skipping...")
  21. WEBUI_NAME = os.environ.get("WEBUI_NAME", "Open WebUI")
  22. WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
  23. ####################################
  24. # ENV (dev,test,prod)
  25. ####################################
  26. ENV = os.environ.get("ENV", "dev")
  27. try:
  28. with open(f"../package.json", "r") as f:
  29. PACKAGE_DATA = json.load(f)
  30. except:
  31. PACKAGE_DATA = {"version": "0.0.0"}
  32. VERSION = PACKAGE_DATA["version"]
  33. # Function to parse each section
  34. def parse_section(section):
  35. items = []
  36. for li in section.find_all("li"):
  37. # Extract raw HTML string
  38. raw_html = str(li)
  39. # Extract text without HTML tags
  40. text = li.get_text(separator=" ", strip=True)
  41. # Split into title and content
  42. parts = text.split(": ", 1)
  43. title = parts[0].strip() if len(parts) > 1 else ""
  44. content = parts[1].strip() if len(parts) > 1 else text
  45. items.append({"title": title, "content": content, "raw": raw_html})
  46. return items
  47. try:
  48. with open("../CHANGELOG.md", "r") as file:
  49. changelog_content = file.read()
  50. except:
  51. changelog_content = ""
  52. # Convert markdown content to HTML
  53. html_content = markdown.markdown(changelog_content)
  54. # Parse the HTML content
  55. soup = BeautifulSoup(html_content, "html.parser")
  56. # Initialize JSON structure
  57. changelog_json = {}
  58. # Iterate over each version
  59. for version in soup.find_all("h2"):
  60. version_number = version.get_text().strip().split(" - ")[0][1:-1] # Remove brackets
  61. date = version.get_text().strip().split(" - ")[1]
  62. version_data = {"date": date}
  63. # Find the next sibling that is a h3 tag (section title)
  64. current = version.find_next_sibling()
  65. while current and current.name != "h2":
  66. if current.name == "h3":
  67. section_title = current.get_text().lower() # e.g., "added", "fixed"
  68. section_items = parse_section(current.find_next_sibling("ul"))
  69. version_data[section_title] = section_items
  70. # Move to the next element
  71. current = current.find_next_sibling()
  72. changelog_json[version_number] = version_data
  73. CHANGELOG = changelog_json
  74. ####################################
  75. # DATA/FRONTEND BUILD DIR
  76. ####################################
  77. DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve())
  78. FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build")))
  79. try:
  80. with open(f"{DATA_DIR}/config.json", "r") as f:
  81. CONFIG_DATA = json.load(f)
  82. except:
  83. CONFIG_DATA = {}
  84. ####################################
  85. # Static DIR
  86. ####################################
  87. STATIC_DIR = str(Path(os.getenv("STATIC_DIR", "./static")).resolve())
  88. shutil.copyfile(f"{FRONTEND_BUILD_DIR}/favicon.png", f"{STATIC_DIR}/favicon.png")
  89. ####################################
  90. # LOGGING
  91. ####################################
  92. log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]
  93. GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper()
  94. if GLOBAL_LOG_LEVEL in log_levels:
  95. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True)
  96. else:
  97. GLOBAL_LOG_LEVEL = "INFO"
  98. log = logging.getLogger(__name__)
  99. log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
  100. log_sources = [
  101. "AUDIO",
  102. "COMFYUI",
  103. "CONFIG",
  104. "DB",
  105. "IMAGES",
  106. "LITELLM",
  107. "MAIN",
  108. "MODELS",
  109. "OLLAMA",
  110. "OPENAI",
  111. "RAG",
  112. "WEBHOOK",
  113. ]
  114. SRC_LOG_LEVELS = {}
  115. for source in log_sources:
  116. log_env_var = source + "_LOG_LEVEL"
  117. SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper()
  118. if SRC_LOG_LEVELS[source] not in log_levels:
  119. SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL
  120. log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}")
  121. log.setLevel(SRC_LOG_LEVELS["CONFIG"])
  122. ####################################
  123. # CUSTOM_NAME
  124. ####################################
  125. CUSTOM_NAME = os.environ.get("CUSTOM_NAME", "")
  126. if CUSTOM_NAME:
  127. try:
  128. r = requests.get(f"https://api.openwebui.com/api/v1/custom/{CUSTOM_NAME}")
  129. data = r.json()
  130. if r.ok:
  131. if "logo" in data:
  132. WEBUI_FAVICON_URL = url = (
  133. f"https://api.openwebui.com{data['logo']}"
  134. if data["logo"][0] == "/"
  135. else data["logo"]
  136. )
  137. r = requests.get(url, stream=True)
  138. if r.status_code == 200:
  139. with open(f"{STATIC_DIR}/favicon.png", "wb") as f:
  140. r.raw.decode_content = True
  141. shutil.copyfileobj(r.raw, f)
  142. WEBUI_NAME = data["name"]
  143. except Exception as e:
  144. log.exception(e)
  145. pass
  146. else:
  147. if WEBUI_NAME != "Open WebUI":
  148. WEBUI_NAME += " (Open WebUI)"
  149. ####################################
  150. # File Upload DIR
  151. ####################################
  152. UPLOAD_DIR = f"{DATA_DIR}/uploads"
  153. Path(UPLOAD_DIR).mkdir(parents=True, exist_ok=True)
  154. ####################################
  155. # Cache DIR
  156. ####################################
  157. CACHE_DIR = f"{DATA_DIR}/cache"
  158. Path(CACHE_DIR).mkdir(parents=True, exist_ok=True)
  159. ####################################
  160. # Docs DIR
  161. ####################################
  162. DOCS_DIR = f"{DATA_DIR}/docs"
  163. Path(DOCS_DIR).mkdir(parents=True, exist_ok=True)
  164. ####################################
  165. # LITELLM_CONFIG
  166. ####################################
  167. def create_config_file(file_path):
  168. directory = os.path.dirname(file_path)
  169. # Check if directory exists, if not, create it
  170. if not os.path.exists(directory):
  171. os.makedirs(directory)
  172. # Data to write into the YAML file
  173. config_data = {
  174. "general_settings": {},
  175. "litellm_settings": {},
  176. "model_list": [],
  177. "router_settings": {},
  178. }
  179. # Write data to YAML file
  180. with open(file_path, "w") as file:
  181. yaml.dump(config_data, file)
  182. LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml"
  183. if not os.path.exists(LITELLM_CONFIG_PATH):
  184. log.info("Config file doesn't exist. Creating...")
  185. create_config_file(LITELLM_CONFIG_PATH)
  186. log.info("Config file created successfully.")
  187. ####################################
  188. # OLLAMA_BASE_URL
  189. ####################################
  190. OLLAMA_API_BASE_URL = os.environ.get(
  191. "OLLAMA_API_BASE_URL", "http://localhost:11434/api"
  192. )
  193. OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
  194. K8S_FLAG = os.environ.get("K8S_FLAG", "")
  195. USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false")
  196. if OLLAMA_BASE_URL == "" and OLLAMA_API_BASE_URL != "":
  197. OLLAMA_BASE_URL = (
  198. OLLAMA_API_BASE_URL[:-4]
  199. if OLLAMA_API_BASE_URL.endswith("/api")
  200. else OLLAMA_API_BASE_URL
  201. )
  202. if ENV == "prod":
  203. if OLLAMA_BASE_URL == "/ollama" and not K8S_FLAG:
  204. if USE_OLLAMA_DOCKER.lower() == "true":
  205. # if you use all-in-one docker container (Open WebUI + Ollama)
  206. # with the docker build arg USE_OLLAMA=true (--build-arg="USE_OLLAMA=true") this only works with http://localhost:11434
  207. OLLAMA_BASE_URL = "http://localhost:11434"
  208. else:
  209. OLLAMA_BASE_URL = "http://host.docker.internal:11434"
  210. elif K8S_FLAG:
  211. OLLAMA_BASE_URL = "http://ollama-service.open-webui.svc.cluster.local:11434"
  212. OLLAMA_BASE_URLS = os.environ.get("OLLAMA_BASE_URLS", "")
  213. OLLAMA_BASE_URLS = OLLAMA_BASE_URLS if OLLAMA_BASE_URLS != "" else OLLAMA_BASE_URL
  214. OLLAMA_BASE_URLS = [url.strip() for url in OLLAMA_BASE_URLS.split(";")]
  215. ####################################
  216. # OPENAI_API
  217. ####################################
  218. OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
  219. OPENAI_API_BASE_URL = os.environ.get("OPENAI_API_BASE_URL", "")
  220. if OPENAI_API_BASE_URL == "":
  221. OPENAI_API_BASE_URL = "https://api.openai.com/v1"
  222. OPENAI_API_KEYS = os.environ.get("OPENAI_API_KEYS", "")
  223. OPENAI_API_KEYS = OPENAI_API_KEYS if OPENAI_API_KEYS != "" else OPENAI_API_KEY
  224. OPENAI_API_KEYS = [url.strip() for url in OPENAI_API_KEYS.split(";")]
  225. OPENAI_API_BASE_URLS = os.environ.get("OPENAI_API_BASE_URLS", "")
  226. OPENAI_API_BASE_URLS = (
  227. OPENAI_API_BASE_URLS if OPENAI_API_BASE_URLS != "" else OPENAI_API_BASE_URL
  228. )
  229. OPENAI_API_BASE_URLS = [
  230. url.strip() if url != "" else "https://api.openai.com/v1"
  231. for url in OPENAI_API_BASE_URLS.split(";")
  232. ]
  233. ####################################
  234. # WEBUI
  235. ####################################
  236. ENABLE_SIGNUP = os.environ.get("ENABLE_SIGNUP", "True").lower() == "true"
  237. DEFAULT_MODELS = os.environ.get("DEFAULT_MODELS", None)
  238. DEFAULT_PROMPT_SUGGESTIONS = (
  239. CONFIG_DATA["ui"]["prompt_suggestions"]
  240. if "ui" in CONFIG_DATA
  241. and "prompt_suggestions" in CONFIG_DATA["ui"]
  242. and type(CONFIG_DATA["ui"]["prompt_suggestions"]) is list
  243. else [
  244. {
  245. "title": ["Help me study", "vocabulary for a college entrance exam"],
  246. "content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option.",
  247. },
  248. {
  249. "title": ["Give me ideas", "for what to do with my kids' art"],
  250. "content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
  251. },
  252. {
  253. "title": ["Tell me a fun fact", "about the Roman Empire"],
  254. "content": "Tell me a random fun fact about the Roman Empire",
  255. },
  256. {
  257. "title": ["Show me a code snippet", "of a website's sticky header"],
  258. "content": "Show me a code snippet of a website's sticky header in CSS and JavaScript.",
  259. },
  260. ]
  261. )
  262. DEFAULT_USER_ROLE = os.getenv("DEFAULT_USER_ROLE", "pending")
  263. USER_PERMISSIONS_CHAT_DELETION = (
  264. os.environ.get("USER_PERMISSIONS_CHAT_DELETION", "True").lower() == "true"
  265. )
  266. USER_PERMISSIONS = {"chat": {"deletion": USER_PERMISSIONS_CHAT_DELETION}}
  267. MODEL_FILTER_ENABLED = os.environ.get("MODEL_FILTER_ENABLED", "False").lower() == "true"
  268. MODEL_FILTER_LIST = os.environ.get("MODEL_FILTER_LIST", "")
  269. MODEL_FILTER_LIST = [model.strip() for model in MODEL_FILTER_LIST.split(";")]
  270. WEBHOOK_URL = os.environ.get("WEBHOOK_URL", "")
  271. ####################################
  272. # WEBUI_VERSION
  273. ####################################
  274. WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100")
  275. ####################################
  276. # WEBUI_AUTH (Required for security)
  277. ####################################
  278. WEBUI_AUTH = True
  279. WEBUI_AUTH_TRUSTED_EMAIL_HEADER = os.environ.get(
  280. "WEBUI_AUTH_TRUSTED_EMAIL_HEADER", None
  281. )
  282. ####################################
  283. # WEBUI_SECRET_KEY
  284. ####################################
  285. WEBUI_SECRET_KEY = os.environ.get(
  286. "WEBUI_SECRET_KEY",
  287. os.environ.get(
  288. "WEBUI_JWT_SECRET_KEY", "t0p-s3cr3t"
  289. ), # DEPRECATED: remove at next major version
  290. )
  291. if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
  292. raise ValueError(ERROR_MESSAGES.ENV_VAR_NOT_FOUND)
  293. ####################################
  294. # RAG
  295. ####################################
  296. CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
  297. # this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (all-MiniLM-L6-v2)
  298. RAG_EMBEDDING_MODEL = os.environ.get("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2")
  299. log.info(f"Embedding model set: {RAG_EMBEDDING_MODEL}"),
  300. RAG_EMBEDDING_MODEL_AUTO_UPDATE = (
  301. os.environ.get("RAG_EMBEDDING_MODEL_AUTO_UPDATE", "").lower() == "true"
  302. )
  303. # device type ebbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
  304. USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false")
  305. if USE_CUDA.lower() == "true":
  306. DEVICE_TYPE = "cuda"
  307. else:
  308. DEVICE_TYPE = "cpu"
  309. CHROMA_CLIENT = chromadb.PersistentClient(
  310. path=CHROMA_DATA_PATH,
  311. settings=Settings(allow_reset=True, anonymized_telemetry=False),
  312. )
  313. CHUNK_SIZE = 1500
  314. CHUNK_OVERLAP = 100
  315. RAG_TEMPLATE = """Use the following context as your learned knowledge, inside <context></context> XML tags.
  316. <context>
  317. [context]
  318. </context>
  319. When answer to user:
  320. - If you don't know, just say that you don't know.
  321. - If you don't know when you are not sure, ask for clarification.
  322. Avoid mentioning that you obtained the information from the context.
  323. And answer according to the language of the user's question.
  324. Given the context information, answer the query.
  325. Query: [query]"""
  326. ####################################
  327. # Transcribe
  328. ####################################
  329. WHISPER_MODEL = os.getenv("WHISPER_MODEL", "base")
  330. WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models")
  331. WHISPER_MODEL_AUTO_UPDATE = (
  332. os.environ.get("WHISPER_MODEL_AUTO_UPDATE", "").lower() == "true"
  333. )
  334. ####################################
  335. # Images
  336. ####################################
  337. AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "")
  338. COMFYUI_BASE_URL = os.getenv("COMFYUI_BASE_URL", "")