浏览代码

Fix

Logging cleanup: removed some extraneous hard prints (including some that revealed message content!); improved debug logging a bit.

+ added chat_id to task metadata (helpful for logging/tracking in some pipe functions)
DmitriyAlergant-T1A 5 月之前
父节点
当前提交
d24c21b40f

+ 0 - 2
backend/open_webui/apps/openai/main.py

@@ -585,8 +585,6 @@ async def generate_chat_completion(
     # Convert the modified body back to JSON
     payload = json.dumps(payload)
 
-    log.debug(payload)
-
     headers = {}
     headers["Authorization"] = f"Bearer {key}"
     headers["Content-Type"] = "application/json"

+ 9 - 4
backend/open_webui/apps/webui/main.py

@@ -68,6 +68,7 @@ from open_webui.config import (
 )
 from open_webui.env import (
     ENV,
+    SRC_LOG_LEVELS,
     WEBUI_AUTH_TRUSTED_EMAIL_HEADER,
     WEBUI_AUTH_TRUSTED_NAME_HEADER,
 )
@@ -94,6 +95,7 @@ app = FastAPI(
 )
 
 log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
 
 app.state.config = AppConfig()
 
@@ -270,7 +272,7 @@ async def get_pipe_models():
                 log.exception(e)
                 sub_pipes = []
 
-            print(sub_pipes)
+            log.debug(f"get_pipe_models: function '{pipe.id}' is a manifold of {sub_pipes}")
 
             for p in sub_pipes:
                 sub_pipe_id = f'{pipe.id}.{p["id"]}'
@@ -280,6 +282,7 @@ async def get_pipe_models():
                     sub_pipe_name = f"{function_module.name}{sub_pipe_name}"
 
                 pipe_flag = {"type": pipe.type}
+                
                 pipe_models.append(
                     {
                         "id": sub_pipe_id,
@@ -293,6 +296,8 @@ async def get_pipe_models():
         else:
             pipe_flag = {"type": "pipe"}
 
+            log.debug(f"get_pipe_models: function '{pipe.id}' is a single pipe {{ 'id': {pipe.id}, 'name': {pipe.name} }}")
+            
             pipe_models.append(
                 {
                     "id": pipe.id,
@@ -346,7 +351,7 @@ def get_pipe_id(form_data: dict) -> str:
     pipe_id = form_data["model"]
     if "." in pipe_id:
         pipe_id, _ = pipe_id.split(".", 1)
-    print(pipe_id)
+
     return pipe_id
 
 
@@ -453,7 +458,7 @@ async def generate_function_chat_completion(form_data, user, models: dict = {}):
                     return
 
             except Exception as e:
-                print(f"Error: {e}")
+                log.error(f"Error: {e}")
                 yield f"data: {json.dumps({'error': {'detail':str(e)}})}\n\n"
                 return
 
@@ -483,7 +488,7 @@ async def generate_function_chat_completion(form_data, user, models: dict = {}):
             res = await execute_pipe(pipe, params)
 
         except Exception as e:
-            print(f"Error: {e}")
+            log.error(f"Error: {e}")
             return {"error": {"detail": str(e)}}
 
         if isinstance(res, StreamingResponse) or isinstance(res, dict):

+ 11 - 6
backend/open_webui/apps/webui/utils.py

@@ -5,10 +5,15 @@ import sys
 from importlib import util
 import types
 import tempfile
+import logging
 
+from open_webui.env import SRC_LOG_LEVELS
 from open_webui.apps.webui.models.functions import Functions
 from open_webui.apps.webui.models.tools import Tools
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
+
 
 def extract_frontmatter(content):
     """
@@ -95,7 +100,7 @@ def load_tools_module_by_id(toolkit_id, content=None):
         # Executing the modified content in the created module's namespace
         exec(content, module.__dict__)
         frontmatter = extract_frontmatter(content)
-        print(f"Loaded module: {module.__name__}")
+        log.info(f"Loaded module: {module.__name__}")
 
         # Create and return the object if the class 'Tools' is found in the module
         if hasattr(module, "Tools"):
@@ -103,7 +108,7 @@ def load_tools_module_by_id(toolkit_id, content=None):
         else:
             raise Exception("No Tools class found in the module")
     except Exception as e:
-        print(f"Error loading module: {toolkit_id}: {e}")
+        log.error(f"Error loading module: {toolkit_id}: {e}")
         del sys.modules[module_name]  # Clean up
         raise e
     finally:
@@ -139,7 +144,7 @@ def load_function_module_by_id(function_id, content=None):
         # Execute the modified content in the created module's namespace
         exec(content, module.__dict__)
         frontmatter = extract_frontmatter(content)
-        print(f"Loaded module: {module.__name__}")
+        log.info(f"Loaded module: {module.__name__}")
 
         # Create appropriate object based on available class type in the module
         if hasattr(module, "Pipe"):
@@ -151,7 +156,7 @@ def load_function_module_by_id(function_id, content=None):
         else:
             raise Exception("No Function class found in the module")
     except Exception as e:
-        print(f"Error loading module: {function_id}: {e}")
+        log.error(f"Error loading module: {function_id}: {e}")
         del sys.modules[module_name]  # Cleanup by removing the module in case of error
 
         Functions.update_function_by_id(function_id, {"is_active": False})
@@ -164,7 +169,7 @@ def install_frontmatter_requirements(requirements):
     if requirements:
         req_list = [req.strip() for req in requirements.split(",")]
         for req in req_list:
-            print(f"Installing requirement: {req}")
+            log.info(f"Installing requirement: {req}")
             subprocess.check_call([sys.executable, "-m", "pip", "install", req])
     else:
-        print("No requirements found in frontmatter.")
+        log.info("No requirements found in frontmatter.")

+ 27 - 29
backend/open_webui/main.py

@@ -539,7 +539,6 @@ async def chat_completion_files_handler(
         if len(queries) == 0:
             queries = [get_last_user_message(body["messages"])]
 
-        print(f"{queries=}")
 
         sources = get_sources_from_files(
             files=files,
@@ -970,7 +969,7 @@ app.add_middleware(SecurityHeadersMiddleware)
 @app.middleware("http")
 async def commit_session_after_request(request: Request, call_next):
     response = await call_next(request)
-    log.debug("Commit session after request")
+    #log.debug("Commit session after request")
     Session.commit()
     return response
 
@@ -1177,6 +1176,8 @@ async def get_all_models():
             model["actions"].extend(
                 get_action_items_from_module(action_function, function_module)
             )
+    log.debug(f"get_all_models() returned {len(models)} models")
+
     return models
 
 
@@ -1214,6 +1215,8 @@ async def get_models(user=Depends(get_verified_user)):
                     filtered_models.append(model)
         models = filtered_models
 
+    log.debug(f"/api/models returned filtered models accessible to the user: {json.dumps([model['id'] for model in models])}")
+
     return {"data": models}
 
 
@@ -1704,7 +1707,6 @@ async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_u
 
 @app.post("/api/task/title/completions")
 async def generate_title(form_data: dict, user=Depends(get_verified_user)):
-    print("generate_title")
 
     model_list = await get_all_models()
     models = {model["id"]: model for model in model_list}
@@ -1725,9 +1727,7 @@ async def generate_title(form_data: dict, user=Depends(get_verified_user)):
         models,
     )
 
-    print(task_model_id)
-
-    model = models[task_model_id]
+    log.debug(f"generating chat title using model {task_model_id} for user {user.email} ")
 
     if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
         template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
@@ -1766,10 +1766,12 @@ Artificial Intelligence in Healthcare
                 "max_completion_tokens": 50,
             }
         ),
-        "chat_id": form_data.get("chat_id", None),
-        "metadata": {"task": str(TASKS.TITLE_GENERATION), "task_body": form_data},
+        "metadata": {
+                "task": str(TASKS.TITLE_GENERATION), 
+                "task_body": form_data,
+                "chat_id": form_data.get("chat_id", None)
+                },
     }
-    log.debug(payload)
 
     # Handle pipeline filters
     try:
@@ -1793,7 +1795,7 @@ Artificial Intelligence in Healthcare
 
 @app.post("/api/task/tags/completions")
 async def generate_chat_tags(form_data: dict, user=Depends(get_verified_user)):
-    print("generate_chat_tags")
+
     if not app.state.config.ENABLE_TAGS_GENERATION:
         return JSONResponse(
             status_code=status.HTTP_200_OK,
@@ -1818,7 +1820,8 @@ async def generate_chat_tags(form_data: dict, user=Depends(get_verified_user)):
         app.state.config.TASK_MODEL_EXTERNAL,
         models,
     )
-    print(task_model_id)
+    
+    log.debug(f"generating chat tags using model {task_model_id} for user {user.email} ")
 
     if app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE != "":
         template = app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE
@@ -1849,9 +1852,12 @@ JSON format: { "tags": ["tag1", "tag2", "tag3"] }
         "model": task_model_id,
         "messages": [{"role": "user", "content": content}],
         "stream": False,
-        "metadata": {"task": str(TASKS.TAGS_GENERATION), "task_body": form_data},
+        "metadata": {
+            "task": str(TASKS.TAGS_GENERATION), 
+            "task_body": form_data,
+            "chat_id": form_data.get("chat_id", None)
+            }
     }
-    log.debug(payload)
 
     # Handle pipeline filters
     try:
@@ -1875,7 +1881,7 @@ JSON format: { "tags": ["tag1", "tag2", "tag3"] }
 
 @app.post("/api/task/queries/completions")
 async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
-    print("generate_queries")
+
     type = form_data.get("type")
     if type == "web_search":
         if not app.state.config.ENABLE_SEARCH_QUERY_GENERATION:
@@ -1908,9 +1914,8 @@ async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
         app.state.config.TASK_MODEL_EXTERNAL,
         models,
     )
-    print(task_model_id)
-
-    model = models[task_model_id]
+    
+    log.debug(f"generating {type} queries using model {task_model_id} for user {user.email}")
 
     if app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE != "":
         template = app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE
@@ -1925,9 +1930,8 @@ async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
         "model": task_model_id,
         "messages": [{"role": "user", "content": content}],
         "stream": False,
-        "metadata": {"task": str(TASKS.QUERY_GENERATION), "task_body": form_data},
+        "metadata": {"task": str(TASKS.QUERY_GENERATION), "task_body": form_data, "chat_id": form_data.get("chat_id", None)},
     }
-    log.debug(payload)
 
     # Handle pipeline filters
     try:
@@ -1951,7 +1955,6 @@ async def generate_queries(form_data: dict, user=Depends(get_verified_user)):
 
 @app.post("/api/task/emoji/completions")
 async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
-    print("generate_emoji")
 
     model_list = await get_all_models()
     models = {model["id"]: model for model in model_list}
@@ -1971,9 +1974,8 @@ async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
         app.state.config.TASK_MODEL_EXTERNAL,
         models,
     )
-    print(task_model_id)
 
-    model = models[task_model_id]
+    log.debug(f"generating emoji using model {task_model_id} for user {user.email} ")
 
     template = '''
 Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
@@ -2003,7 +2005,6 @@ Message: """{{prompt}}"""
         "chat_id": form_data.get("chat_id", None),
         "metadata": {"task": str(TASKS.EMOJI_GENERATION), "task_body": form_data},
     }
-    log.debug(payload)
 
     # Handle pipeline filters
     try:
@@ -2027,7 +2028,6 @@ Message: """{{prompt}}"""
 
 @app.post("/api/task/moa/completions")
 async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)):
-    print("generate_moa_response")
 
     model_list = await get_all_models()
     models = {model["id"]: model for model in model_list}
@@ -2047,9 +2047,8 @@ async def generate_moa_response(form_data: dict, user=Depends(get_verified_user)
         app.state.config.TASK_MODEL_EXTERNAL,
         models,
     )
-    print(task_model_id)
-
-    model = models[task_model_id]
+   
+    log.debug(f"generating MOA model {task_model_id} for user {user.email} ")
 
     template = """You have been provided with a set of responses from various models to the latest user query: "{{prompt}}"
 
@@ -2073,7 +2072,6 @@ Responses from models: {{responses}}"""
             "task_body": form_data,
         },
     }
-    log.debug(payload)
 
     try:
         payload = filter_pipeline(payload, user, models)
@@ -2108,7 +2106,7 @@ Responses from models: {{responses}}"""
 async def get_pipelines_list(user=Depends(get_admin_user)):
     responses = await get_openai_models_responses()
 
-    print(responses)
+    log.debug(f"get_pipelines_list: get_openai_models_responses returned {responses}")
     urlIdxs = [
         idx
         for idx, response in enumerate(responses)