|
@@ -3,7 +3,10 @@ from fastapi.middleware.cors import CORSMiddleware
|
|
|
from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
|
|
|
|
|
|
import requests
|
|
|
+import aiohttp
|
|
|
+import asyncio
|
|
|
import json
|
|
|
+
|
|
|
from pydantic import BaseModel
|
|
|
|
|
|
|
|
@@ -15,7 +18,9 @@ from utils.utils import (
|
|
|
get_verified_user,
|
|
|
get_admin_user,
|
|
|
)
|
|
|
-from config import OPENAI_API_BASE_URL, OPENAI_API_KEY, CACHE_DIR
|
|
|
+from config import OPENAI_API_BASE_URLS, OPENAI_API_KEYS, CACHE_DIR
|
|
|
+from typing import List, Optional
|
|
|
+
|
|
|
|
|
|
import hashlib
|
|
|
from pathlib import Path
|
|
@@ -29,116 +34,207 @@ app.add_middleware(
|
|
|
allow_headers=["*"],
|
|
|
)
|
|
|
|
|
|
-app.state.OPENAI_API_BASE_URL = OPENAI_API_BASE_URL
|
|
|
-app.state.OPENAI_API_KEY = OPENAI_API_KEY
|
|
|
+app.state.OPENAI_API_BASE_URLS = OPENAI_API_BASE_URLS
|
|
|
+app.state.OPENAI_API_KEYS = OPENAI_API_KEYS
|
|
|
+
|
|
|
+app.state.MODELS = {}
|
|
|
+
|
|
|
|
|
|
+@app.middleware("http")
|
|
|
+async def check_url(request: Request, call_next):
|
|
|
+ if len(app.state.MODELS) == 0:
|
|
|
+ await get_all_models()
|
|
|
+ else:
|
|
|
+ pass
|
|
|
|
|
|
-class UrlUpdateForm(BaseModel):
|
|
|
- url: str
|
|
|
+ response = await call_next(request)
|
|
|
+ return response
|
|
|
|
|
|
|
|
|
-class KeyUpdateForm(BaseModel):
|
|
|
- key: str
|
|
|
+class UrlsUpdateForm(BaseModel):
|
|
|
+ urls: List[str]
|
|
|
|
|
|
|
|
|
-@app.get("/url")
|
|
|
-async def get_openai_url(user=Depends(get_admin_user)):
|
|
|
- return {"OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL}
|
|
|
+class KeysUpdateForm(BaseModel):
|
|
|
+ keys: List[str]
|
|
|
|
|
|
|
|
|
-@app.post("/url/update")
|
|
|
-async def update_openai_url(form_data: UrlUpdateForm, user=Depends(get_admin_user)):
|
|
|
- app.state.OPENAI_API_BASE_URL = form_data.url
|
|
|
- return {"OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL}
|
|
|
+@app.get("/urls")
|
|
|
+async def get_openai_urls(user=Depends(get_admin_user)):
|
|
|
+ return {"OPENAI_API_BASE_URLS": app.state.OPENAI_API_BASE_URLS}
|
|
|
|
|
|
|
|
|
-@app.get("/key")
|
|
|
-async def get_openai_key(user=Depends(get_admin_user)):
|
|
|
- return {"OPENAI_API_KEY": app.state.OPENAI_API_KEY}
|
|
|
+@app.post("/urls/update")
|
|
|
+async def update_openai_urls(form_data: UrlsUpdateForm, user=Depends(get_admin_user)):
|
|
|
+ app.state.OPENAI_API_BASE_URLS = form_data.urls
|
|
|
+ return {"OPENAI_API_BASE_URLS": app.state.OPENAI_API_BASE_URLS}
|
|
|
|
|
|
|
|
|
-@app.post("/key/update")
|
|
|
-async def update_openai_key(form_data: KeyUpdateForm, user=Depends(get_admin_user)):
|
|
|
- app.state.OPENAI_API_KEY = form_data.key
|
|
|
- return {"OPENAI_API_KEY": app.state.OPENAI_API_KEY}
|
|
|
+@app.get("/keys")
|
|
|
+async def get_openai_keys(user=Depends(get_admin_user)):
|
|
|
+ return {"OPENAI_API_KEYS": app.state.OPENAI_API_KEYS}
|
|
|
+
|
|
|
+
|
|
|
+@app.post("/keys/update")
|
|
|
+async def update_openai_key(form_data: KeysUpdateForm, user=Depends(get_admin_user)):
|
|
|
+ app.state.OPENAI_API_KEYS = form_data.keys
|
|
|
+ return {"OPENAI_API_KEYS": app.state.OPENAI_API_KEYS}
|
|
|
|
|
|
|
|
|
@app.post("/audio/speech")
|
|
|
async def speech(request: Request, user=Depends(get_verified_user)):
|
|
|
- target_url = f"{app.state.OPENAI_API_BASE_URL}/audio/speech"
|
|
|
+ idx = None
|
|
|
+ try:
|
|
|
+ idx = app.state.OPENAI_API_BASE_URLS.index("https://api.openai.com/v1")
|
|
|
+ body = await request.body()
|
|
|
+ name = hashlib.sha256(body).hexdigest()
|
|
|
+
|
|
|
+ SPEECH_CACHE_DIR = Path(CACHE_DIR).joinpath("./audio/speech/")
|
|
|
+ SPEECH_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
|
|
+ file_path = SPEECH_CACHE_DIR.joinpath(f"{name}.mp3")
|
|
|
+ file_body_path = SPEECH_CACHE_DIR.joinpath(f"{name}.json")
|
|
|
+
|
|
|
+ # Check if the file already exists in the cache
|
|
|
+ if file_path.is_file():
|
|
|
+ return FileResponse(file_path)
|
|
|
+
|
|
|
+ headers = {}
|
|
|
+ headers["Authorization"] = f"Bearer {app.state.OPENAI_API_KEYS[idx]}"
|
|
|
+ headers["Content-Type"] = "application/json"
|
|
|
+
|
|
|
+ try:
|
|
|
+ r = requests.post(
|
|
|
+ url=f"{app.state.OPENAI_API_BASE_URLS[idx]}/audio/speech",
|
|
|
+ data=body,
|
|
|
+ headers=headers,
|
|
|
+ stream=True,
|
|
|
+ )
|
|
|
|
|
|
- if app.state.OPENAI_API_KEY == "":
|
|
|
- raise HTTPException(status_code=401, detail=ERROR_MESSAGES.API_KEY_NOT_FOUND)
|
|
|
+ r.raise_for_status()
|
|
|
|
|
|
- body = await request.body()
|
|
|
+ # Save the streaming content to a file
|
|
|
+ with open(file_path, "wb") as f:
|
|
|
+ for chunk in r.iter_content(chunk_size=8192):
|
|
|
+ f.write(chunk)
|
|
|
|
|
|
- name = hashlib.sha256(body).hexdigest()
|
|
|
+ with open(file_body_path, "w") as f:
|
|
|
+ json.dump(json.loads(body.decode("utf-8")), f)
|
|
|
|
|
|
- SPEECH_CACHE_DIR = Path(CACHE_DIR).joinpath("./audio/speech/")
|
|
|
- SPEECH_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
|
|
- file_path = SPEECH_CACHE_DIR.joinpath(f"{name}.mp3")
|
|
|
- file_body_path = SPEECH_CACHE_DIR.joinpath(f"{name}.json")
|
|
|
+ # Return the saved file
|
|
|
+ return FileResponse(file_path)
|
|
|
|
|
|
- # Check if the file already exists in the cache
|
|
|
- if file_path.is_file():
|
|
|
- return FileResponse(file_path)
|
|
|
+ except Exception as e:
|
|
|
+ print(e)
|
|
|
+ error_detail = "Open WebUI: Server Connection Error"
|
|
|
+ if r is not None:
|
|
|
+ try:
|
|
|
+ res = r.json()
|
|
|
+ if "error" in res:
|
|
|
+ error_detail = f"External: {res['error']}"
|
|
|
+ except:
|
|
|
+ error_detail = f"External: {e}"
|
|
|
+
|
|
|
+ raise HTTPException(status_code=r.status_code, detail=error_detail)
|
|
|
+
|
|
|
+ except ValueError:
|
|
|
+ raise HTTPException(status_code=401, detail=ERROR_MESSAGES.OPENAI_NOT_FOUND)
|
|
|
|
|
|
- headers = {}
|
|
|
- headers["Authorization"] = f"Bearer {app.state.OPENAI_API_KEY}"
|
|
|
- headers["Content-Type"] = "application/json"
|
|
|
|
|
|
+async def fetch_url(url, key):
|
|
|
try:
|
|
|
- print("openai")
|
|
|
- r = requests.post(
|
|
|
- url=target_url,
|
|
|
- data=body,
|
|
|
- headers=headers,
|
|
|
- stream=True,
|
|
|
+ headers = {"Authorization": f"Bearer {key}"}
|
|
|
+ async with aiohttp.ClientSession() as session:
|
|
|
+ async with session.get(url, headers=headers) as response:
|
|
|
+ return await response.json()
|
|
|
+ except Exception as e:
|
|
|
+ # Handle connection error here
|
|
|
+ print(f"Connection error: {e}")
|
|
|
+ return None
|
|
|
+
|
|
|
+
|
|
|
+def merge_models_lists(model_lists):
|
|
|
+ merged_list = []
|
|
|
+
|
|
|
+ for idx, models in enumerate(model_lists):
|
|
|
+ merged_list.extend(
|
|
|
+ [
|
|
|
+ {**model, "urlIdx": idx}
|
|
|
+ for model in models
|
|
|
+ if "api.openai.com" not in app.state.OPENAI_API_BASE_URLS[idx]
|
|
|
+ or "gpt" in model["id"]
|
|
|
+ ]
|
|
|
)
|
|
|
|
|
|
- r.raise_for_status()
|
|
|
+ return merged_list
|
|
|
|
|
|
- # Save the streaming content to a file
|
|
|
- with open(file_path, "wb") as f:
|
|
|
- for chunk in r.iter_content(chunk_size=8192):
|
|
|
- f.write(chunk)
|
|
|
|
|
|
- with open(file_body_path, "w") as f:
|
|
|
- json.dump(json.loads(body.decode("utf-8")), f)
|
|
|
+async def get_all_models():
|
|
|
+ print("get_all_models")
|
|
|
+ tasks = [
|
|
|
+ fetch_url(f"{url}/models", app.state.OPENAI_API_KEYS[idx])
|
|
|
+ for idx, url in enumerate(app.state.OPENAI_API_BASE_URLS)
|
|
|
+ ]
|
|
|
+ responses = await asyncio.gather(*tasks)
|
|
|
+ responses = list(filter(lambda x: x is not None and "error" not in x, responses))
|
|
|
+ models = {
|
|
|
+ "data": merge_models_lists(
|
|
|
+ list(map(lambda response: response["data"], responses))
|
|
|
+ )
|
|
|
+ }
|
|
|
+ app.state.MODELS = {model["id"]: model for model in models["data"]}
|
|
|
|
|
|
- # Return the saved file
|
|
|
- return FileResponse(file_path)
|
|
|
+ return models
|
|
|
|
|
|
- except Exception as e:
|
|
|
- print(e)
|
|
|
- error_detail = "Open WebUI: Server Connection Error"
|
|
|
- if r is not None:
|
|
|
- try:
|
|
|
- res = r.json()
|
|
|
- if "error" in res:
|
|
|
- error_detail = f"External: {res['error']}"
|
|
|
- except:
|
|
|
- error_detail = f"External: {e}"
|
|
|
|
|
|
- raise HTTPException(status_code=r.status_code, detail=error_detail)
|
|
|
+# , user=Depends(get_current_user)
|
|
|
+@app.get("/models")
|
|
|
+@app.get("/models/{url_idx}")
|
|
|
+async def get_models(url_idx: Optional[int] = None):
|
|
|
+ if url_idx == None:
|
|
|
+ return await get_all_models()
|
|
|
+ else:
|
|
|
+ url = app.state.OPENAI_API_BASE_URLS[url_idx]
|
|
|
+ try:
|
|
|
+ r = requests.request(method="GET", url=f"{url}/models")
|
|
|
+ r.raise_for_status()
|
|
|
+
|
|
|
+ response_data = r.json()
|
|
|
+ if "api.openai.com" in url:
|
|
|
+ response_data["data"] = list(
|
|
|
+ filter(lambda model: "gpt" in model["id"], response_data["data"])
|
|
|
+ )
|
|
|
+
|
|
|
+ return response_data
|
|
|
+ except Exception as e:
|
|
|
+ print(e)
|
|
|
+ error_detail = "Open WebUI: Server Connection Error"
|
|
|
+ if r is not None:
|
|
|
+ try:
|
|
|
+ res = r.json()
|
|
|
+ if "error" in res:
|
|
|
+ error_detail = f"External: {res['error']}"
|
|
|
+ except:
|
|
|
+ error_detail = f"External: {e}"
|
|
|
+
|
|
|
+ raise HTTPException(
|
|
|
+ status_code=r.status_code if r else 500,
|
|
|
+ detail=error_detail,
|
|
|
+ )
|
|
|
|
|
|
|
|
|
@app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
|
|
|
async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
|
|
|
- target_url = f"{app.state.OPENAI_API_BASE_URL}/{path}"
|
|
|
- print(target_url, app.state.OPENAI_API_KEY)
|
|
|
-
|
|
|
- if app.state.OPENAI_API_KEY == "":
|
|
|
- raise HTTPException(status_code=401, detail=ERROR_MESSAGES.API_KEY_NOT_FOUND)
|
|
|
+ idx = 0
|
|
|
|
|
|
body = await request.body()
|
|
|
-
|
|
|
# TODO: Remove below after gpt-4-vision fix from Open AI
|
|
|
# Try to decode the body of the request from bytes to a UTF-8 string (Require add max_token to fix gpt-4-vision)
|
|
|
try:
|
|
|
body = body.decode("utf-8")
|
|
|
body = json.loads(body)
|
|
|
|
|
|
+ idx = app.state.MODELS[body.get("model")]["urlIdx"]
|
|
|
+
|
|
|
# Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
|
|
|
# This is a workaround until OpenAI fixes the issue with this model
|
|
|
if body.get("model") == "gpt-4-vision-preview":
|
|
@@ -158,8 +254,16 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
|
|
|
except json.JSONDecodeError as e:
|
|
|
print("Error loading request body into a dictionary:", e)
|
|
|
|
|
|
+ url = app.state.OPENAI_API_BASE_URLS[idx]
|
|
|
+ key = app.state.OPENAI_API_KEYS[idx]
|
|
|
+
|
|
|
+ target_url = f"{url}/{path}"
|
|
|
+
|
|
|
+ if key == "":
|
|
|
+ raise HTTPException(status_code=401, detail=ERROR_MESSAGES.API_KEY_NOT_FOUND)
|
|
|
+
|
|
|
headers = {}
|
|
|
- headers["Authorization"] = f"Bearer {app.state.OPENAI_API_KEY}"
|
|
|
+ headers["Authorization"] = f"Bearer {key}"
|
|
|
headers["Content-Type"] = "application/json"
|
|
|
|
|
|
try:
|
|
@@ -181,21 +285,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
|
|
|
headers=dict(r.headers),
|
|
|
)
|
|
|
else:
|
|
|
- # For non-SSE, read the response and return it
|
|
|
- # response_data = (
|
|
|
- # r.json()
|
|
|
- # if r.headers.get("Content-Type", "")
|
|
|
- # == "application/json"
|
|
|
- # else r.text
|
|
|
- # )
|
|
|
-
|
|
|
response_data = r.json()
|
|
|
-
|
|
|
- if "api.openai.com" in app.state.OPENAI_API_BASE_URL and path == "models":
|
|
|
- response_data["data"] = list(
|
|
|
- filter(lambda model: "gpt" in model["id"], response_data["data"])
|
|
|
- )
|
|
|
-
|
|
|
return response_data
|
|
|
except Exception as e:
|
|
|
print(e)
|