浏览代码

Merge pull request #5311 from open-webui/dev

dev
Timothy Jaeryang Baek 7 月之前
父节点
当前提交
d5f13dd9e0

+ 2 - 2
backend/open_webui/apps/audio/main.py

@@ -21,14 +21,14 @@ from open_webui.config import (
     AUDIO_TTS_VOICE,
     AUDIO_TTS_VOICE,
     CACHE_DIR,
     CACHE_DIR,
     CORS_ALLOW_ORIGIN,
     CORS_ALLOW_ORIGIN,
-    DEVICE_TYPE,
     WHISPER_MODEL,
     WHISPER_MODEL,
     WHISPER_MODEL_AUTO_UPDATE,
     WHISPER_MODEL_AUTO_UPDATE,
     WHISPER_MODEL_DIR,
     WHISPER_MODEL_DIR,
     AppConfig,
     AppConfig,
 )
 )
+
 from open_webui.constants import ERROR_MESSAGES
 from open_webui.constants import ERROR_MESSAGES
-from open_webui.env import SRC_LOG_LEVELS
+from open_webui.env import SRC_LOG_LEVELS, DEVICE_TYPE
 from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile, status
 from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile, status
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.responses import FileResponse
 from fastapi.responses import FileResponse

+ 0 - 1
backend/open_webui/apps/rag/main.py

@@ -44,7 +44,6 @@ from open_webui.config import (
     CHUNK_SIZE,
     CHUNK_SIZE,
     CONTENT_EXTRACTION_ENGINE,
     CONTENT_EXTRACTION_ENGINE,
     CORS_ALLOW_ORIGIN,
     CORS_ALLOW_ORIGIN,
-    DEVICE_TYPE,
     DOCS_DIR,
     DOCS_DIR,
     ENABLE_RAG_HYBRID_SEARCH,
     ENABLE_RAG_HYBRID_SEARCH,
     ENABLE_RAG_LOCAL_WEB_FETCH,
     ENABLE_RAG_LOCAL_WEB_FETCH,

+ 1 - 63
backend/open_webui/config.py

@@ -539,40 +539,6 @@ Path(TOOLS_DIR).mkdir(parents=True, exist_ok=True)
 FUNCTIONS_DIR = os.getenv("FUNCTIONS_DIR", f"{DATA_DIR}/functions")
 FUNCTIONS_DIR = os.getenv("FUNCTIONS_DIR", f"{DATA_DIR}/functions")
 Path(FUNCTIONS_DIR).mkdir(parents=True, exist_ok=True)
 Path(FUNCTIONS_DIR).mkdir(parents=True, exist_ok=True)
 
 
-
-####################################
-# LITELLM_CONFIG
-####################################
-
-
-def create_config_file(file_path):
-    directory = os.path.dirname(file_path)
-
-    # Check if directory exists, if not, create it
-    if not os.path.exists(directory):
-        os.makedirs(directory)
-
-    # Data to write into the YAML file
-    config_data = {
-        "general_settings": {},
-        "litellm_settings": {},
-        "model_list": [],
-        "router_settings": {},
-    }
-
-    # Write data to YAML file
-    with open(file_path, "w") as file:
-        yaml.dump(config_data, file)
-
-
-LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml"
-
-# if not os.path.exists(LITELLM_CONFIG_PATH):
-#     log.info("Config file doesn't exist. Creating...")
-#     create_config_file(LITELLM_CONFIG_PATH)
-#     log.info("Config file created successfully.")
-
-
 ####################################
 ####################################
 # OLLAMA_BASE_URL
 # OLLAMA_BASE_URL
 ####################################
 ####################################
@@ -922,7 +888,7 @@ TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = PersistentConfig(
 
 
 
 
 ####################################
 ####################################
-# RAG document content extraction
+# Vector Database
 ####################################
 ####################################
 
 
 VECTOR_DB = os.environ.get("VECTOR_DB", "chroma")
 VECTOR_DB = os.environ.get("VECTOR_DB", "chroma")
@@ -1051,34 +1017,6 @@ RAG_RERANKING_MODEL_TRUST_REMOTE_CODE = (
     os.environ.get("RAG_RERANKING_MODEL_TRUST_REMOTE_CODE", "").lower() == "true"
     os.environ.get("RAG_RERANKING_MODEL_TRUST_REMOTE_CODE", "").lower() == "true"
 )
 )
 
 
-
-if CHROMA_HTTP_HOST != "":
-    CHROMA_CLIENT = chromadb.HttpClient(
-        host=CHROMA_HTTP_HOST,
-        port=CHROMA_HTTP_PORT,
-        headers=CHROMA_HTTP_HEADERS,
-        ssl=CHROMA_HTTP_SSL,
-        tenant=CHROMA_TENANT,
-        database=CHROMA_DATABASE,
-        settings=Settings(allow_reset=True, anonymized_telemetry=False),
-    )
-else:
-    CHROMA_CLIENT = chromadb.PersistentClient(
-        path=CHROMA_DATA_PATH,
-        settings=Settings(allow_reset=True, anonymized_telemetry=False),
-        tenant=CHROMA_TENANT,
-        database=CHROMA_DATABASE,
-    )
-
-
-# device type embedding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
-USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false")
-
-if USE_CUDA.lower() == "true":
-    DEVICE_TYPE = "cuda"
-else:
-    DEVICE_TYPE = "cpu"
-
 CHUNK_SIZE = PersistentConfig(
 CHUNK_SIZE = PersistentConfig(
     "CHUNK_SIZE", "rag.chunk_size", int(os.environ.get("CHUNK_SIZE", "1500"))
     "CHUNK_SIZE", "rag.chunk_size", int(os.environ.get("CHUNK_SIZE", "1500"))
 )
 )

+ 9 - 0
backend/open_webui/env.py

@@ -32,6 +32,15 @@ except ImportError:
     print("dotenv not installed, skipping...")
     print("dotenv not installed, skipping...")
 
 
 
 
+# device type embedding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
+USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false")
+
+if USE_CUDA.lower() == "true":
+    DEVICE_TYPE = "cuda"
+else:
+    DEVICE_TYPE = "cpu"
+
+
 ####################################
 ####################################
 # LOGGING
 # LOGGING
 ####################################
 ####################################