|
@@ -432,7 +432,12 @@ async def generate_chat_completion(
|
|
idx = model["urlIdx"]
|
|
idx = model["urlIdx"]
|
|
|
|
|
|
if "pipeline" in model and model.get("pipeline"):
|
|
if "pipeline" in model and model.get("pipeline"):
|
|
- payload["user"] = {"name": user.name, "id": user.id}
|
|
|
|
|
|
+ payload["user"] = {
|
|
|
|
+ "name": user.name,
|
|
|
|
+ "id": user.id,
|
|
|
|
+ "email": user.email,
|
|
|
|
+ "role": user.role,
|
|
|
|
+ }
|
|
|
|
|
|
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
|
|
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
|
|
# This is a workaround until OpenAI fixes the issue with this model
|
|
# This is a workaround until OpenAI fixes the issue with this model
|