|
@@ -10,9 +10,18 @@ from fastapi import (
|
|
|
File,
|
|
|
Form,
|
|
|
)
|
|
|
+
|
|
|
+from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
|
|
|
+
|
|
|
from fastapi.middleware.cors import CORSMiddleware
|
|
|
from faster_whisper import WhisperModel
|
|
|
|
|
|
+import requests
|
|
|
+import hashlib
|
|
|
+from pathlib import Path
|
|
|
+import json
|
|
|
+
|
|
|
+
|
|
|
from constants import ERROR_MESSAGES
|
|
|
from utils.utils import (
|
|
|
decode_token,
|
|
@@ -30,6 +39,8 @@ from config import (
|
|
|
WHISPER_MODEL_DIR,
|
|
|
WHISPER_MODEL_AUTO_UPDATE,
|
|
|
DEVICE_TYPE,
|
|
|
+ OPENAI_API_BASE_URL,
|
|
|
+ OPENAI_API_KEY,
|
|
|
)
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
@@ -44,12 +55,78 @@ app.add_middleware(
|
|
|
allow_headers=["*"],
|
|
|
)
|
|
|
|
|
|
+
|
|
|
+app.state.OPENAI_API_BASE_URL = OPENAI_API_BASE_URL
|
|
|
+app.state.OPENAI_API_KEY = OPENAI_API_KEY
|
|
|
+
|
|
|
# setting device type for whisper model
|
|
|
whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
|
|
|
log.info(f"whisper_device_type: {whisper_device_type}")
|
|
|
|
|
|
+SPEECH_CACHE_DIR = Path(CACHE_DIR).joinpath("./audio/speech/")
|
|
|
+SPEECH_CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
|
|
+
|
|
|
+
|
|
|
+@app.post("/speech")
|
|
|
+async def speech(request: Request, user=Depends(get_verified_user)):
|
|
|
+ idx = None
|
|
|
+ try:
|
|
|
+ body = await request.body()
|
|
|
+ name = hashlib.sha256(body).hexdigest()
|
|
|
+
|
|
|
+ file_path = SPEECH_CACHE_DIR.joinpath(f"{name}.mp3")
|
|
|
+ file_body_path = SPEECH_CACHE_DIR.joinpath(f"{name}.json")
|
|
|
+
|
|
|
+ # Check if the file already exists in the cache
|
|
|
+ if file_path.is_file():
|
|
|
+ return FileResponse(file_path)
|
|
|
+
|
|
|
+ headers = {}
|
|
|
+ headers["Authorization"] = f"Bearer {app.state.OPENAI_API_KEY}"
|
|
|
+ headers["Content-Type"] = "application/json"
|
|
|
+
|
|
|
+ r = None
|
|
|
+ try:
|
|
|
+ r = requests.post(
|
|
|
+ url=f"{app.state.OPENAI_API_BASE_URL}/audio/speech",
|
|
|
+ data=body,
|
|
|
+ headers=headers,
|
|
|
+ stream=True,
|
|
|
+ )
|
|
|
+
|
|
|
+ r.raise_for_status()
|
|
|
+
|
|
|
+ # Save the streaming content to a file
|
|
|
+ with open(file_path, "wb") as f:
|
|
|
+ for chunk in r.iter_content(chunk_size=8192):
|
|
|
+ f.write(chunk)
|
|
|
+
|
|
|
+ with open(file_body_path, "w") as f:
|
|
|
+ json.dump(json.loads(body.decode("utf-8")), f)
|
|
|
+
|
|
|
+ # Return the saved file
|
|
|
+ return FileResponse(file_path)
|
|
|
+
|
|
|
+ except Exception as e:
|
|
|
+ log.exception(e)
|
|
|
+ error_detail = "Open WebUI: Server Connection Error"
|
|
|
+ if r is not None:
|
|
|
+ try:
|
|
|
+ res = r.json()
|
|
|
+ if "error" in res:
|
|
|
+ error_detail = f"External: {res['error']}"
|
|
|
+ except:
|
|
|
+ error_detail = f"External: {e}"
|
|
|
+
|
|
|
+ raise HTTPException(
|
|
|
+ status_code=r.status_code if r else 500, detail=error_detail
|
|
|
+ )
|
|
|
+
|
|
|
+ except ValueError:
|
|
|
+ raise HTTPException(status_code=401, detail=ERROR_MESSAGES.OPENAI_NOT_FOUND)
|
|
|
+
|
|
|
|
|
|
-@app.post("/transcribe")
|
|
|
+@app.post("/transcriptions")
|
|
|
def transcribe(
|
|
|
file: UploadFile = File(...),
|
|
|
user=Depends(get_current_user),
|