Przeglądaj źródła

fix: formatting test errors, remove print, merge dev

Michael Poluektov 9 miesięcy temu
rodzic
commit
a725801e55

+ 1 - 1
backend/apps/images/main.py

@@ -514,7 +514,7 @@ async def image_generations(
 
 
             data = ImageGenerationPayload(**data)
             data = ImageGenerationPayload(**data)
 
 
-            res = comfyui_generate_image(
+            res = await comfyui_generate_image(
                 app.state.config.MODEL,
                 app.state.config.MODEL,
                 data,
                 data,
                 user.id,
                 user.id,

+ 6 - 5
backend/apps/images/utils/comfyui.py

@@ -1,3 +1,4 @@
+import asyncio
 import websocket  # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
 import websocket  # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
 import uuid
 import uuid
 import json
 import json
@@ -328,7 +329,7 @@ class ImageGenerationPayload(BaseModel):
     flux_fp8_clip: Optional[bool] = None
     flux_fp8_clip: Optional[bool] = None
 
 
 
 
-def comfyui_generate_image(
+async def comfyui_generate_image(
     model: str, payload: ImageGenerationPayload, client_id, base_url
     model: str, payload: ImageGenerationPayload, client_id, base_url
 ):
 ):
     ws_url = base_url.replace("http://", "ws://").replace("https://", "wss://")
     ws_url = base_url.replace("http://", "ws://").replace("https://", "wss://")
@@ -377,9 +378,9 @@ def comfyui_generate_image(
             comfyui_prompt["12"]["inputs"]["weight_dtype"] = payload.flux_weight_dtype
             comfyui_prompt["12"]["inputs"]["weight_dtype"] = payload.flux_weight_dtype
 
 
         if payload.flux_fp8_clip:
         if payload.flux_fp8_clip:
-            comfyui_prompt["11"]["inputs"][
-                "clip_name2"
-            ] = "t5xxl_fp8_e4m3fn.safetensors"
+            comfyui_prompt["11"]["inputs"]["clip_name2"] = (
+                "t5xxl_fp8_e4m3fn.safetensors"
+            )
 
 
     comfyui_prompt["5"]["inputs"]["batch_size"] = payload.n
     comfyui_prompt["5"]["inputs"]["batch_size"] = payload.n
     comfyui_prompt["5"]["inputs"]["width"] = payload.width
     comfyui_prompt["5"]["inputs"]["width"] = payload.width
@@ -397,7 +398,7 @@ def comfyui_generate_image(
         return None
         return None
 
 
     try:
     try:
-        images = get_images(ws, comfyui_prompt, client_id, base_url)
+        images = await asyncio.to_thread(get_images, ws, comfyui_prompt, client_id, base_url)
     except Exception as e:
     except Exception as e:
         log.exception(f"Error while receiving images: {e}")
         log.exception(f"Error while receiving images: {e}")
         images = None
         images = None

+ 2 - 2
backend/apps/openai/main.py

@@ -359,10 +359,10 @@ async def generate_chat_completion(
 ):
 ):
     idx = 0
     idx = 0
     payload = {**form_data}
     payload = {**form_data}
-    
+
     if "metadata" in payload:
     if "metadata" in payload:
         del payload["metadata"]
         del payload["metadata"]
-        
+
     model_id = form_data.get("model")
     model_id = form_data.get("model")
     model_info = Models.get_model_by_id(model_id)
     model_info = Models.get_model_by_id(model_id)
 
 

+ 0 - 1
backend/utils/misc.py

@@ -192,7 +192,6 @@ def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
         if (param := params.get(key, None)) is not None:
         if (param := params.get(key, None)) is not None:
             form_data[value] = param
             form_data[value] = param
 
 
-    print(form_data)
     return form_data
     return form_data