Browse Source

Merge pull request #1374 from open-webui/dev

fix
Timothy Jaeryang Baek 1 year ago
parent
commit
edeff20e1d

+ 1 - 1
.github/workflows/format-backend.yaml

@@ -33,7 +33,7 @@ jobs:
           pip install black
 
       - name: Format backend
-        run: black . --exclude "/venv/"
+        run: npm run format:backend
 
       - name: Check for changes after format
         run: git diff --exit-code

+ 2 - 0
Makefile

@@ -8,6 +8,8 @@ remove:
 
 start:
 	@docker-compose start
+startAndBuild: 
+	docker-compose up -d --build
 
 stop:
 	@docker-compose stop

+ 3 - 3
backend/apps/images/main.py

@@ -325,7 +325,7 @@ def save_url_image(url):
 
         return image_id
     except Exception as e:
-        print(f"Error saving image: {e}")
+        log.exception(f"Error saving image: {e}")
         return None
 
 
@@ -397,7 +397,7 @@ def generate_image(
                 user.id,
                 app.state.COMFYUI_BASE_URL,
             )
-            print(res)
+            log.debug(f"res: {res}")
 
             images = []
 
@@ -409,7 +409,7 @@ def generate_image(
                 with open(file_body_path, "w") as f:
                     json.dump(data.model_dump(exclude_none=True), f)
 
-            print(images)
+            log.debug(f"images: {images}")
             return images
         else:
             if form_data.model:

+ 13 - 7
backend/apps/images/utils/comfyui.py

@@ -4,6 +4,12 @@ import json
 import urllib.request
 import urllib.parse
 import random
+import logging
+
+from config import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["COMFYUI"])
 
 from pydantic import BaseModel
 
@@ -121,7 +127,7 @@ COMFYUI_DEFAULT_PROMPT = """
 
 
 def queue_prompt(prompt, client_id, base_url):
-    print("queue_prompt")
+    log.info("queue_prompt")
     p = {"prompt": prompt, "client_id": client_id}
     data = json.dumps(p).encode("utf-8")
     req = urllib.request.Request(f"{base_url}/prompt", data=data)
@@ -129,7 +135,7 @@ def queue_prompt(prompt, client_id, base_url):
 
 
 def get_image(filename, subfolder, folder_type, base_url):
-    print("get_image")
+    log.info("get_image")
     data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
     url_values = urllib.parse.urlencode(data)
     with urllib.request.urlopen(f"{base_url}/view?{url_values}") as response:
@@ -137,14 +143,14 @@ def get_image(filename, subfolder, folder_type, base_url):
 
 
 def get_image_url(filename, subfolder, folder_type, base_url):
-    print("get_image")
+    log.info("get_image")
     data = {"filename": filename, "subfolder": subfolder, "type": folder_type}
     url_values = urllib.parse.urlencode(data)
     return f"{base_url}/view?{url_values}"
 
 
 def get_history(prompt_id, base_url):
-    print("get_history")
+    log.info("get_history")
     with urllib.request.urlopen(f"{base_url}/history/{prompt_id}") as response:
         return json.loads(response.read())
 
@@ -212,15 +218,15 @@ def comfyui_generate_image(
     try:
         ws = websocket.WebSocket()
         ws.connect(f"ws://{host}/ws?clientId={client_id}")
-        print("WebSocket connection established.")
+        log.info("WebSocket connection established.")
     except Exception as e:
-        print(f"Failed to connect to WebSocket server: {e}")
+        log.exception(f"Failed to connect to WebSocket server: {e}")
         return None
 
     try:
         images = get_images(ws, comfyui_prompt, client_id, base_url)
     except Exception as e:
-        print(f"Error while receiving images: {e}")
+        log.exception(f"Error while receiving images: {e}")
         images = None
 
     ws.close()

+ 2 - 2
backend/apps/ollama/main.py

@@ -272,7 +272,7 @@ async def pull_model(
                         if request_id in REQUEST_POOL:
                             yield chunk
                         else:
-                            print("User: canceled request")
+                            log.warning("User: canceled request")
                             break
                 finally:
                     if hasattr(r, "close"):
@@ -670,7 +670,7 @@ async def generate_completion(
         else:
             raise HTTPException(
                 status_code=400,
-                detail="error_detail",
+                detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
             )
 
     url = app.state.OLLAMA_BASE_URLS[url_idx]

+ 3 - 3
backend/apps/rag/main.py

@@ -333,7 +333,7 @@ def store_docs_in_vector_db(docs, collection_name, overwrite: bool = False) -> b
         if overwrite:
             for collection in CHROMA_CLIENT.list_collections():
                 if collection_name == collection.name:
-                    print(f"deleting existing collection {collection_name}")
+                    log.info(f"deleting existing collection {collection_name}")
                     CHROMA_CLIENT.delete_collection(name=collection_name)
 
         collection = CHROMA_CLIENT.create_collection(
@@ -346,7 +346,7 @@ def store_docs_in_vector_db(docs, collection_name, overwrite: bool = False) -> b
         )
         return True
     except Exception as e:
-        print(e)
+        log.exception(e)
         if e.__class__.__name__ == "UniqueConstraintError":
             return True
 
@@ -575,7 +575,7 @@ def scan_docs_dir(user=Depends(get_admin_user)):
                                 ),
                             )
                 except Exception as e:
-                    print(e)
+                    log.exception(e)
                     pass
 
         except Exception as e:

+ 2 - 0
backend/apps/rag/utils.py

@@ -156,6 +156,8 @@ def rag_messages(docs, messages, template, k, embedding_function):
 
         relevant_contexts.append(context)
 
+    log.debug(f"relevant_contexts: {relevant_contexts}")
+
     context_string = ""
     for context in relevant_contexts:
         if context:

+ 2 - 0
backend/config.py

@@ -119,6 +119,7 @@ log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
 
 log_sources = [
     "AUDIO",
+    "COMFYUI",
     "CONFIG",
     "DB",
     "IMAGES",
@@ -128,6 +129,7 @@ log_sources = [
     "OLLAMA",
     "OPENAI",
     "RAG",
+    "WEBHOOK",
 ]
 
 SRC_LOG_LEVELS = {}

+ 8 - 5
backend/main.py

@@ -164,15 +164,18 @@ app.mount("/rag/api/v1", rag_app)
 
 @app.get("/api/config")
 async def get_app_config():
+    # Checking and Handling the Absence of 'ui' in CONFIG_DATA
+
+    default_locale = "en-US"
+    if "ui" in CONFIG_DATA:
+        default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
+
+    # The Rest of the Function Now Uses the Variables Defined Above
     return {
         "status": True,
         "name": WEBUI_NAME,
         "version": VERSION,
-        "default_locale": (
-            CONFIG_DATA["ui"]["default_locale"]
-            if "ui" in CONFIG_DATA and "default_locale" in CONFIG_DATA["ui"]
-            else "en-US"
-        ),
+        "default_locale": default_locale,
         "images": images_app.state.ENABLED,
         "default_models": webui_app.state.DEFAULT_MODELS,
         "default_prompt_suggestions": webui_app.state.DEFAULT_PROMPT_SUGGESTIONS,

+ 9 - 2
backend/utils/webhook.py

@@ -1,6 +1,11 @@
 import json
 import requests
-from config import VERSION, WEBUI_FAVICON_URL, WEBUI_NAME
+import logging
+
+from config import SRC_LOG_LEVELS, VERSION, WEBUI_FAVICON_URL, WEBUI_NAME
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["WEBHOOK"])
 
 
 def post_webhook(url: str, message: str, event_data: dict) -> bool:
@@ -39,9 +44,11 @@ def post_webhook(url: str, message: str, event_data: dict) -> bool:
         else:
             payload = {**event_data}
 
+        log.debug(f"payload: {payload}")
         r = requests.post(url, json=payload)
         r.raise_for_status()
+        log.debug(f"r.text: {r.text}")
         return True
     except Exception as e:
-        print(e)
+        log.exception(e)
         return False

+ 1 - 0
package.json

@@ -13,6 +13,7 @@
 		"lint:types": "npm run check",
 		"lint:backend": "pylint backend/",
 		"format": "prettier --plugin-search-dir --write '**/*.{js,ts,svelte,css,md,html,json}'",
+		"format:backend": "black . --exclude \"/venv/\"",
 		"i18n:parse": "i18next --config i18next-parser.config.ts && prettier --write 'src/lib/i18n/**/*.{js,json}'"
 	},
 	"devDependencies": {

+ 4 - 0
src/app.css

@@ -78,3 +78,7 @@ select {
 	/* for Chrome */
 	-webkit-appearance: none;
 }
+
+.katex-mathml {
+	display: none;
+}

+ 5 - 5
src/routes/(app)/+page.svelte

@@ -520,11 +520,6 @@
 	const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
 		const responseMessage = history.messages[responseMessageId];
 
-		// Wait until history/message have been updated
-		await tick();
-
-		scrollToBottom();
-
 		const docs = messages
 			.filter((message) => message?.files ?? null)
 			.map((message) =>
@@ -593,6 +588,11 @@
 				: `${OPENAI_API_BASE_URL}`
 		);
 
+		// Wait until history/message have been updated
+		await tick();
+
+		scrollToBottom();
+
 		if (res && res.ok) {
 			const reader = res.body
 				.pipeThrough(new TextDecoderStream())

+ 5 - 5
src/routes/(app)/c/[id]/+page.svelte

@@ -536,11 +536,6 @@
 	const sendPromptOpenAI = async (model, userPrompt, responseMessageId, _chatId) => {
 		const responseMessage = history.messages[responseMessageId];
 
-		// Wait until history/message have been updated
-		await tick();
-
-		scrollToBottom();
-
 		const docs = messages
 			.filter((message) => message?.files ?? null)
 			.map((message) =>
@@ -607,6 +602,11 @@
 				: `${OPENAI_API_BASE_URL}`
 		);
 
+		// Wait until history/message have been updated
+		await tick();
+
+		scrollToBottom();
+
 		if (res && res.ok) {
 			const reader = res.body
 				.pipeThrough(new TextDecoderStream())