Browse Source

Use log.debug() for logging request bodies for the backend API

Andrew Phillips 10 months ago
parent
commit
c0c875eae2
3 changed files with 4 additions and 5 deletions
  1. 1 2
      backend/apps/ollama/main.py
  2. 1 1
      backend/apps/openai/main.py
  3. 2 2
      backend/main.py

+ 1 - 2
backend/apps/ollama/main.py

@@ -839,8 +839,7 @@ async def generate_chat_completion(
 
     url = app.state.config.OLLAMA_BASE_URLS[url_idx]
     log.info(f"url: {url}")
-
-    print(payload)
+    log.debug(payload)
 
     return await post_streaming_url(f"{url}/api/chat", json.dumps(payload))
 

+ 1 - 1
backend/apps/openai/main.py

@@ -430,7 +430,7 @@ async def generate_chat_completion(
     # Convert the modified body back to JSON
     payload = json.dumps(payload)
 
-    print(payload)
+    log.debug(payload)
 
     url = app.state.config.OPENAI_API_BASE_URLS[idx]
     key = app.state.config.OPENAI_API_KEYS[idx]

+ 2 - 2
backend/main.py

@@ -773,7 +773,7 @@ async def generate_title(form_data: dict, user=Depends(get_verified_user)):
         "title": True,
     }
 
-    print(payload)
+    log.debug(payload)
 
     try:
         payload = filter_pipeline(payload, user)
@@ -837,7 +837,7 @@ async def generate_search_query(form_data: dict, user=Depends(get_verified_user)
         "max_tokens": 30,
     }
 
-    print(payload)
+    log.debug(payload)
 
     try:
         payload = filter_pipeline(payload, user)