config.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. import os
  2. import chromadb
  3. from chromadb import Settings
  4. from secrets import token_bytes
  5. from base64 import b64encode
  6. from constants import ERROR_MESSAGES
  7. from pathlib import Path
  8. import json
  9. try:
  10. from dotenv import load_dotenv, find_dotenv
  11. load_dotenv(find_dotenv("../.env"))
  12. except ImportError:
  13. print("dotenv not installed, skipping...")
  14. ####################################
  15. # ENV (dev,test,prod)
  16. ####################################
  17. ENV = os.environ.get("ENV", "dev")
  18. ####################################
  19. # DATA/FRONTEND BUILD DIR
  20. ####################################
  21. DATA_DIR = str(Path(os.getenv("DATA_DIR", "./data")).resolve())
  22. FRONTEND_BUILD_DIR = str(Path(os.getenv("FRONTEND_BUILD_DIR", "../build")))
  23. try:
  24. with open(f"{DATA_DIR}/config.json", "r") as f:
  25. CONFIG_DATA = json.load(f)
  26. except:
  27. CONFIG_DATA = {}
  28. ####################################
  29. # File Upload DIR
  30. ####################################
  31. UPLOAD_DIR = f"{DATA_DIR}/uploads"
  32. Path(UPLOAD_DIR).mkdir(parents=True, exist_ok=True)
  33. ####################################
  34. # Cache DIR
  35. ####################################
  36. CACHE_DIR = f"{DATA_DIR}/cache"
  37. Path(CACHE_DIR).mkdir(parents=True, exist_ok=True)
  38. ####################################
  39. # Docs DIR
  40. ####################################
  41. DOCS_DIR = f"{DATA_DIR}/docs"
  42. Path(DOCS_DIR).mkdir(parents=True, exist_ok=True)
  43. ####################################
  44. # OLLAMA_API_BASE_URL
  45. ####################################
  46. OLLAMA_API_BASE_URL = os.environ.get(
  47. "OLLAMA_API_BASE_URL", "http://localhost:11434/api"
  48. )
  49. if ENV == "prod":
  50. if OLLAMA_API_BASE_URL == "/ollama/api":
  51. OLLAMA_API_BASE_URL = "http://host.docker.internal:11434/api"
  52. ####################################
  53. # OPENAI_API
  54. ####################################
  55. OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "")
  56. OPENAI_API_BASE_URL = os.environ.get("OPENAI_API_BASE_URL", "")
  57. if OPENAI_API_BASE_URL == "":
  58. OPENAI_API_BASE_URL = "https://api.openai.com/v1"
  59. ####################################
  60. # WEBUI
  61. ####################################
  62. ENABLE_SIGNUP = os.environ.get("ENABLE_SIGNUP", True)
  63. DEFAULT_MODELS = os.environ.get("DEFAULT_MODELS", None)
  64. DEFAULT_PROMPT_SUGGESTIONS = (
  65. CONFIG_DATA["ui"]["prompt_suggestions"]
  66. if "ui" in CONFIG_DATA
  67. and "prompt_suggestions" in CONFIG_DATA["ui"]
  68. and type(CONFIG_DATA["ui"]["prompt_suggestions"]) is list
  69. else [
  70. {
  71. "title": ["Help me study", "vocabulary for a college entrance exam"],
  72. "content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option.",
  73. },
  74. {
  75. "title": ["Give me ideas", "for what to do with my kids' art"],
  76. "content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
  77. },
  78. {
  79. "title": ["Tell me a fun fact", "about the Roman Empire"],
  80. "content": "Tell me a random fun fact about the Roman Empire",
  81. },
  82. {
  83. "title": ["Show me a code snippet", "of a website's sticky header"],
  84. "content": "Show me a code snippet of a website's sticky header in CSS and JavaScript.",
  85. },
  86. ]
  87. )
  88. DEFAULT_USER_ROLE = "pending"
  89. USER_PERMISSIONS = {"chat": {"deletion": True}}
  90. ####################################
  91. # WEBUI_VERSION
  92. ####################################
  93. WEBUI_VERSION = os.environ.get("WEBUI_VERSION", "v1.0.0-alpha.100")
  94. ####################################
  95. # WEBUI_AUTH (Required for security)
  96. ####################################
  97. WEBUI_AUTH = True
  98. ####################################
  99. # WEBUI_SECRET_KEY
  100. ####################################
  101. WEBUI_SECRET_KEY = os.environ.get(
  102. "WEBUI_SECRET_KEY",
  103. os.environ.get(
  104. "WEBUI_JWT_SECRET_KEY", "t0p-s3cr3t"
  105. ), # DEPRECATED: remove at next major version
  106. )
  107. if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
  108. raise ValueError(ERROR_MESSAGES.ENV_VAR_NOT_FOUND)
  109. ####################################
  110. # RAG
  111. ####################################
  112. CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
  113. # this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (all-MiniLM-L6-v2)
  114. RAG_EMBEDDING_MODEL = os.environ.get("RAG_EMBEDDING_MODEL", "all-MiniLM-L6-v2")
  115. # device type ebbeding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
  116. RAG_EMBEDDING_MODEL_DEVICE_TYPE = os.environ.get(
  117. "RAG_EMBEDDING_MODEL_DEVICE_TYPE", "cpu"
  118. )
  119. CHROMA_CLIENT = chromadb.PersistentClient(
  120. path=CHROMA_DATA_PATH,
  121. settings=Settings(allow_reset=True, anonymized_telemetry=False),
  122. )
  123. CHUNK_SIZE = 1500
  124. CHUNK_OVERLAP = 100
  125. RAG_TEMPLATE = """Use the following context as your learned knowledge, inside <context></context> XML tags.
  126. <context>
  127. [context]
  128. </context>
  129. When answer to user:
  130. - If you don't know, just say that you don't know.
  131. - If you don't know when you are not sure, ask for clarification.
  132. Avoid mentioning that you obtained the information from the context.
  133. And answer according to the language of the user's question.
  134. Given the context information, answer the query.
  135. Query: [query]"""
  136. ####################################
  137. # Transcribe
  138. ####################################
  139. WHISPER_MODEL = os.getenv("WHISPER_MODEL", "base")
  140. WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models")
  141. ####################################
  142. # Images
  143. ####################################
  144. AUTOMATIC1111_BASE_URL = os.getenv("AUTOMATIC1111_BASE_URL", "")