main.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. import hashlib
  2. import json
  3. import logging
  4. import os
  5. import uuid
  6. from functools import lru_cache
  7. from pathlib import Path
  8. from pydub import AudioSegment
  9. from pydub.silence import split_on_silence
  10. import requests
  11. from open_webui.config import (
  12. AUDIO_STT_ENGINE,
  13. AUDIO_STT_MODEL,
  14. AUDIO_STT_OPENAI_API_BASE_URL,
  15. AUDIO_STT_OPENAI_API_KEY,
  16. AUDIO_TTS_API_KEY,
  17. AUDIO_TTS_ENGINE,
  18. AUDIO_TTS_MODEL,
  19. AUDIO_TTS_OPENAI_API_BASE_URL,
  20. AUDIO_TTS_OPENAI_API_KEY,
  21. AUDIO_TTS_SPLIT_ON,
  22. AUDIO_TTS_VOICE,
  23. AUDIO_TTS_AZURE_SPEECH_REGION,
  24. AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT,
  25. CACHE_DIR,
  26. CORS_ALLOW_ORIGIN,
  27. WHISPER_MODEL,
  28. WHISPER_MODEL_AUTO_UPDATE,
  29. WHISPER_MODEL_DIR,
  30. AppConfig,
  31. )
  32. from open_webui.constants import ERROR_MESSAGES
  33. from open_webui.env import ENV, SRC_LOG_LEVELS, DEVICE_TYPE
  34. from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile, status
  35. from fastapi.middleware.cors import CORSMiddleware
  36. from fastapi.responses import FileResponse
  37. from pydantic import BaseModel
  38. from open_webui.utils.utils import get_admin_user, get_verified_user
  39. # Constants
  40. MAX_FILE_SIZE_MB = 25
  41. MAX_FILE_SIZE = MAX_FILE_SIZE_MB * 1024 * 1024 # Convert MB to bytes
  42. log = logging.getLogger(__name__)
  43. log.setLevel(SRC_LOG_LEVELS["AUDIO"])
  44. app = FastAPI(docs_url="/docs" if ENV == "dev" else None, openapi_url="/openapi.json" if ENV == "dev" else None, redoc_url=None)
  45. app.add_middleware(
  46. CORSMiddleware,
  47. allow_origins=CORS_ALLOW_ORIGIN,
  48. allow_credentials=True,
  49. allow_methods=["*"],
  50. allow_headers=["*"],
  51. )
  52. app.state.config = AppConfig()
  53. app.state.config.STT_OPENAI_API_BASE_URL = AUDIO_STT_OPENAI_API_BASE_URL
  54. app.state.config.STT_OPENAI_API_KEY = AUDIO_STT_OPENAI_API_KEY
  55. app.state.config.STT_ENGINE = AUDIO_STT_ENGINE
  56. app.state.config.STT_MODEL = AUDIO_STT_MODEL
  57. app.state.config.WHISPER_MODEL = WHISPER_MODEL
  58. app.state.faster_whisper_model = None
  59. app.state.config.TTS_OPENAI_API_BASE_URL = AUDIO_TTS_OPENAI_API_BASE_URL
  60. app.state.config.TTS_OPENAI_API_KEY = AUDIO_TTS_OPENAI_API_KEY
  61. app.state.config.TTS_ENGINE = AUDIO_TTS_ENGINE
  62. app.state.config.TTS_MODEL = AUDIO_TTS_MODEL
  63. app.state.config.TTS_VOICE = AUDIO_TTS_VOICE
  64. app.state.config.TTS_API_KEY = AUDIO_TTS_API_KEY
  65. app.state.config.TTS_SPLIT_ON = AUDIO_TTS_SPLIT_ON
  66. app.state.config.TTS_AZURE_SPEECH_REGION = AUDIO_TTS_AZURE_SPEECH_REGION
  67. app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT = AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT
  68. # setting device type for whisper model
  69. whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
  70. log.info(f"whisper_device_type: {whisper_device_type}")
  71. SPEECH_CACHE_DIR = Path(CACHE_DIR).joinpath("./audio/speech/")
  72. SPEECH_CACHE_DIR.mkdir(parents=True, exist_ok=True)
  73. def set_faster_whisper_model(model: str, auto_update: bool = False):
  74. if model and app.state.config.STT_ENGINE == "":
  75. from faster_whisper import WhisperModel
  76. faster_whisper_kwargs = {
  77. "model_size_or_path": model,
  78. "device": whisper_device_type,
  79. "compute_type": "int8",
  80. "download_root": WHISPER_MODEL_DIR,
  81. "local_files_only": not auto_update,
  82. }
  83. try:
  84. app.state.faster_whisper_model = WhisperModel(**faster_whisper_kwargs)
  85. except Exception:
  86. log.warning(
  87. "WhisperModel initialization failed, attempting download with local_files_only=False"
  88. )
  89. faster_whisper_kwargs["local_files_only"] = False
  90. app.state.faster_whisper_model = WhisperModel(**faster_whisper_kwargs)
  91. else:
  92. app.state.faster_whisper_model = None
  93. class TTSConfigForm(BaseModel):
  94. OPENAI_API_BASE_URL: str
  95. OPENAI_API_KEY: str
  96. API_KEY: str
  97. ENGINE: str
  98. MODEL: str
  99. VOICE: str
  100. SPLIT_ON: str
  101. AZURE_SPEECH_REGION: str
  102. AZURE_SPEECH_OUTPUT_FORMAT: str
  103. class STTConfigForm(BaseModel):
  104. OPENAI_API_BASE_URL: str
  105. OPENAI_API_KEY: str
  106. ENGINE: str
  107. MODEL: str
  108. WHISPER_MODEL: str
  109. class AudioConfigUpdateForm(BaseModel):
  110. tts: TTSConfigForm
  111. stt: STTConfigForm
  112. from pydub import AudioSegment
  113. from pydub.utils import mediainfo
  114. def is_mp4_audio(file_path):
  115. """Check if the given file is an MP4 audio file."""
  116. if not os.path.isfile(file_path):
  117. print(f"File not found: {file_path}")
  118. return False
  119. info = mediainfo(file_path)
  120. if (
  121. info.get("codec_name") == "aac"
  122. and info.get("codec_type") == "audio"
  123. and info.get("codec_tag_string") == "mp4a"
  124. ):
  125. return True
  126. return False
  127. def convert_mp4_to_wav(file_path, output_path):
  128. """Convert MP4 audio file to WAV format."""
  129. audio = AudioSegment.from_file(file_path, format="mp4")
  130. audio.export(output_path, format="wav")
  131. print(f"Converted {file_path} to {output_path}")
  132. @app.get("/config")
  133. async def get_audio_config(user=Depends(get_admin_user)):
  134. return {
  135. "tts": {
  136. "OPENAI_API_BASE_URL": app.state.config.TTS_OPENAI_API_BASE_URL,
  137. "OPENAI_API_KEY": app.state.config.TTS_OPENAI_API_KEY,
  138. "API_KEY": app.state.config.TTS_API_KEY,
  139. "ENGINE": app.state.config.TTS_ENGINE,
  140. "MODEL": app.state.config.TTS_MODEL,
  141. "VOICE": app.state.config.TTS_VOICE,
  142. "SPLIT_ON": app.state.config.TTS_SPLIT_ON,
  143. "AZURE_SPEECH_REGION": app.state.config.TTS_AZURE_SPEECH_REGION,
  144. "AZURE_SPEECH_OUTPUT_FORMAT": app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT,
  145. },
  146. "stt": {
  147. "OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
  148. "OPENAI_API_KEY": app.state.config.STT_OPENAI_API_KEY,
  149. "ENGINE": app.state.config.STT_ENGINE,
  150. "MODEL": app.state.config.STT_MODEL,
  151. "WHISPER_MODEL": app.state.config.WHISPER_MODEL,
  152. },
  153. }
  154. @app.post("/config/update")
  155. async def update_audio_config(
  156. form_data: AudioConfigUpdateForm, user=Depends(get_admin_user)
  157. ):
  158. app.state.config.TTS_OPENAI_API_BASE_URL = form_data.tts.OPENAI_API_BASE_URL
  159. app.state.config.TTS_OPENAI_API_KEY = form_data.tts.OPENAI_API_KEY
  160. app.state.config.TTS_API_KEY = form_data.tts.API_KEY
  161. app.state.config.TTS_ENGINE = form_data.tts.ENGINE
  162. app.state.config.TTS_MODEL = form_data.tts.MODEL
  163. app.state.config.TTS_VOICE = form_data.tts.VOICE
  164. app.state.config.TTS_SPLIT_ON = form_data.tts.SPLIT_ON
  165. app.state.config.TTS_AZURE_SPEECH_REGION = form_data.tts.AZURE_SPEECH_REGION
  166. app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT = (
  167. form_data.tts.AZURE_SPEECH_OUTPUT_FORMAT
  168. )
  169. app.state.config.STT_OPENAI_API_BASE_URL = form_data.stt.OPENAI_API_BASE_URL
  170. app.state.config.STT_OPENAI_API_KEY = form_data.stt.OPENAI_API_KEY
  171. app.state.config.STT_ENGINE = form_data.stt.ENGINE
  172. app.state.config.STT_MODEL = form_data.stt.MODEL
  173. app.state.config.WHISPER_MODEL = form_data.stt.WHISPER_MODEL
  174. set_faster_whisper_model(form_data.stt.WHISPER_MODEL, WHISPER_MODEL_AUTO_UPDATE)
  175. return {
  176. "tts": {
  177. "OPENAI_API_BASE_URL": app.state.config.TTS_OPENAI_API_BASE_URL,
  178. "OPENAI_API_KEY": app.state.config.TTS_OPENAI_API_KEY,
  179. "API_KEY": app.state.config.TTS_API_KEY,
  180. "ENGINE": app.state.config.TTS_ENGINE,
  181. "MODEL": app.state.config.TTS_MODEL,
  182. "VOICE": app.state.config.TTS_VOICE,
  183. "SPLIT_ON": app.state.config.TTS_SPLIT_ON,
  184. "AZURE_SPEECH_REGION": app.state.config.TTS_AZURE_SPEECH_REGION,
  185. "AZURE_SPEECH_OUTPUT_FORMAT": app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT,
  186. },
  187. "stt": {
  188. "OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
  189. "OPENAI_API_KEY": app.state.config.STT_OPENAI_API_KEY,
  190. "ENGINE": app.state.config.STT_ENGINE,
  191. "MODEL": app.state.config.STT_MODEL,
  192. "WHISPER_MODEL": app.state.config.WHISPER_MODEL,
  193. },
  194. }
  195. @app.post("/speech")
  196. async def speech(request: Request, user=Depends(get_verified_user)):
  197. body = await request.body()
  198. name = hashlib.sha256(body).hexdigest()
  199. file_path = SPEECH_CACHE_DIR.joinpath(f"{name}.mp3")
  200. file_body_path = SPEECH_CACHE_DIR.joinpath(f"{name}.json")
  201. # Check if the file already exists in the cache
  202. if file_path.is_file():
  203. return FileResponse(file_path)
  204. if app.state.config.TTS_ENGINE == "openai":
  205. headers = {}
  206. headers["Authorization"] = f"Bearer {app.state.config.TTS_OPENAI_API_KEY}"
  207. headers["Content-Type"] = "application/json"
  208. try:
  209. body = body.decode("utf-8")
  210. body = json.loads(body)
  211. body["model"] = app.state.config.TTS_MODEL
  212. body = json.dumps(body).encode("utf-8")
  213. except Exception:
  214. pass
  215. r = None
  216. try:
  217. r = requests.post(
  218. url=f"{app.state.config.TTS_OPENAI_API_BASE_URL}/audio/speech",
  219. data=body,
  220. headers=headers,
  221. stream=True,
  222. )
  223. r.raise_for_status()
  224. # Save the streaming content to a file
  225. with open(file_path, "wb") as f:
  226. for chunk in r.iter_content(chunk_size=8192):
  227. f.write(chunk)
  228. with open(file_body_path, "w") as f:
  229. json.dump(json.loads(body.decode("utf-8")), f)
  230. # Return the saved file
  231. return FileResponse(file_path)
  232. except Exception as e:
  233. log.exception(e)
  234. error_detail = "Open WebUI: Server Connection Error"
  235. if r is not None:
  236. try:
  237. res = r.json()
  238. if "error" in res:
  239. error_detail = f"External: {res['error']['message']}"
  240. except Exception:
  241. error_detail = f"External: {e}"
  242. raise HTTPException(
  243. status_code=r.status_code if r != None else 500,
  244. detail=error_detail,
  245. )
  246. elif app.state.config.TTS_ENGINE == "elevenlabs":
  247. payload = None
  248. try:
  249. payload = json.loads(body.decode("utf-8"))
  250. except Exception as e:
  251. log.exception(e)
  252. raise HTTPException(status_code=400, detail="Invalid JSON payload")
  253. voice_id = payload.get("voice", "")
  254. if voice_id not in get_available_voices():
  255. raise HTTPException(
  256. status_code=400,
  257. detail="Invalid voice id",
  258. )
  259. url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
  260. headers = {
  261. "Accept": "audio/mpeg",
  262. "Content-Type": "application/json",
  263. "xi-api-key": app.state.config.TTS_API_KEY,
  264. }
  265. data = {
  266. "text": payload["input"],
  267. "model_id": app.state.config.TTS_MODEL,
  268. "voice_settings": {"stability": 0.5, "similarity_boost": 0.5},
  269. }
  270. try:
  271. r = requests.post(url, json=data, headers=headers)
  272. r.raise_for_status()
  273. # Save the streaming content to a file
  274. with open(file_path, "wb") as f:
  275. for chunk in r.iter_content(chunk_size=8192):
  276. f.write(chunk)
  277. with open(file_body_path, "w") as f:
  278. json.dump(json.loads(body.decode("utf-8")), f)
  279. # Return the saved file
  280. return FileResponse(file_path)
  281. except Exception as e:
  282. log.exception(e)
  283. error_detail = "Open WebUI: Server Connection Error"
  284. if r is not None:
  285. try:
  286. res = r.json()
  287. if "error" in res:
  288. error_detail = f"External: {res['error']['message']}"
  289. except Exception:
  290. error_detail = f"External: {e}"
  291. raise HTTPException(
  292. status_code=r.status_code if r != None else 500,
  293. detail=error_detail,
  294. )
  295. elif app.state.config.TTS_ENGINE == "azure":
  296. payload = None
  297. try:
  298. payload = json.loads(body.decode("utf-8"))
  299. except Exception as e:
  300. log.exception(e)
  301. raise HTTPException(status_code=400, detail="Invalid JSON payload")
  302. region = app.state.config.TTS_AZURE_SPEECH_REGION
  303. language = app.state.config.TTS_VOICE
  304. locale = "-".join(app.state.config.TTS_VOICE.split("-")[:1])
  305. output_format = app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT
  306. url = f"https://{region}.tts.speech.microsoft.com/cognitiveservices/v1"
  307. headers = {
  308. "Ocp-Apim-Subscription-Key": app.state.config.TTS_API_KEY,
  309. "Content-Type": "application/ssml+xml",
  310. "X-Microsoft-OutputFormat": output_format,
  311. }
  312. data = f"""<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="{locale}">
  313. <voice name="{language}">{payload["input"]}</voice>
  314. </speak>"""
  315. response = requests.post(url, headers=headers, data=data)
  316. if response.status_code == 200:
  317. with open(file_path, "wb") as f:
  318. f.write(response.content)
  319. return FileResponse(file_path)
  320. else:
  321. log.error(f"Error synthesizing speech - {response.reason}")
  322. raise HTTPException(
  323. status_code=500, detail=f"Error synthesizing speech - {response.reason}"
  324. )
  325. def transcribe(file_path):
  326. print("transcribe", file_path)
  327. filename = os.path.basename(file_path)
  328. file_dir = os.path.dirname(file_path)
  329. id = filename.split(".")[0]
  330. if app.state.config.STT_ENGINE == "":
  331. if app.state.faster_whisper_model is None:
  332. set_faster_whisper_model(app.state.config.WHISPER_MODEL)
  333. model = app.state.faster_whisper_model
  334. segments, info = model.transcribe(file_path, beam_size=5)
  335. log.info(
  336. "Detected language '%s' with probability %f"
  337. % (info.language, info.language_probability)
  338. )
  339. transcript = "".join([segment.text for segment in list(segments)])
  340. data = {"text": transcript.strip()}
  341. # save the transcript to a json file
  342. transcript_file = f"{file_dir}/{id}.json"
  343. with open(transcript_file, "w") as f:
  344. json.dump(data, f)
  345. log.debug(data)
  346. return data
  347. elif app.state.config.STT_ENGINE == "openai":
  348. if is_mp4_audio(file_path):
  349. print("is_mp4_audio")
  350. os.rename(file_path, file_path.replace(".wav", ".mp4"))
  351. # Convert MP4 audio file to WAV format
  352. convert_mp4_to_wav(file_path.replace(".wav", ".mp4"), file_path)
  353. headers = {"Authorization": f"Bearer {app.state.config.STT_OPENAI_API_KEY}"}
  354. files = {"file": (filename, open(file_path, "rb"))}
  355. data = {"model": app.state.config.STT_MODEL}
  356. log.debug(files, data)
  357. r = None
  358. try:
  359. r = requests.post(
  360. url=f"{app.state.config.STT_OPENAI_API_BASE_URL}/audio/transcriptions",
  361. headers=headers,
  362. files=files,
  363. data=data,
  364. )
  365. r.raise_for_status()
  366. data = r.json()
  367. # save the transcript to a json file
  368. transcript_file = f"{file_dir}/{id}.json"
  369. with open(transcript_file, "w") as f:
  370. json.dump(data, f)
  371. print(data)
  372. return data
  373. except Exception as e:
  374. log.exception(e)
  375. error_detail = "Open WebUI: Server Connection Error"
  376. if r is not None:
  377. try:
  378. res = r.json()
  379. if "error" in res:
  380. error_detail = f"External: {res['error']['message']}"
  381. except Exception:
  382. error_detail = f"External: {e}"
  383. raise Exception(error_detail)
  384. @app.post("/transcriptions")
  385. def transcription(
  386. file: UploadFile = File(...),
  387. user=Depends(get_verified_user),
  388. ):
  389. log.info(f"file.content_type: {file.content_type}")
  390. if file.content_type not in ["audio/mpeg", "audio/wav", "audio/ogg", "audio/x-m4a"]:
  391. raise HTTPException(
  392. status_code=status.HTTP_400_BAD_REQUEST,
  393. detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED,
  394. )
  395. try:
  396. ext = file.filename.split(".")[-1]
  397. id = uuid.uuid4()
  398. filename = f"{id}.{ext}"
  399. contents = file.file.read()
  400. file_dir = f"{CACHE_DIR}/audio/transcriptions"
  401. os.makedirs(file_dir, exist_ok=True)
  402. file_path = f"{file_dir}/{filename}"
  403. with open(file_path, "wb") as f:
  404. f.write(contents)
  405. try:
  406. if os.path.getsize(file_path) > MAX_FILE_SIZE: # file is bigger than 25MB
  407. log.debug(f"File size is larger than {MAX_FILE_SIZE_MB}MB")
  408. audio = AudioSegment.from_file(file_path)
  409. audio = audio.set_frame_rate(16000).set_channels(1) # Compress audio
  410. compressed_path = f"{file_dir}/{id}_compressed.opus"
  411. audio.export(compressed_path, format="opus", bitrate="32k")
  412. log.debug(f"Compressed audio to {compressed_path}")
  413. file_path = compressed_path
  414. if (
  415. os.path.getsize(file_path) > MAX_FILE_SIZE
  416. ): # Still larger than 25MB after compression
  417. log.debug(
  418. f"Compressed file size is still larger than {MAX_FILE_SIZE_MB}MB: {os.path.getsize(file_path)}"
  419. )
  420. raise HTTPException(
  421. status_code=status.HTTP_400_BAD_REQUEST,
  422. detail=ERROR_MESSAGES.FILE_TOO_LARGE(
  423. size=f"{MAX_FILE_SIZE_MB}MB"
  424. ),
  425. )
  426. data = transcribe(file_path)
  427. else:
  428. data = transcribe(file_path)
  429. file_path = file_path.split("/")[-1]
  430. return {**data, "filename": file_path}
  431. except Exception as e:
  432. log.exception(e)
  433. raise HTTPException(
  434. status_code=status.HTTP_400_BAD_REQUEST,
  435. detail=ERROR_MESSAGES.DEFAULT(e),
  436. )
  437. except Exception as e:
  438. log.exception(e)
  439. raise HTTPException(
  440. status_code=status.HTTP_400_BAD_REQUEST,
  441. detail=ERROR_MESSAGES.DEFAULT(e),
  442. )
  443. def get_available_models() -> list[dict]:
  444. if app.state.config.TTS_ENGINE == "openai":
  445. return [{"id": "tts-1"}, {"id": "tts-1-hd"}]
  446. elif app.state.config.TTS_ENGINE == "elevenlabs":
  447. headers = {
  448. "xi-api-key": app.state.config.TTS_API_KEY,
  449. "Content-Type": "application/json",
  450. }
  451. try:
  452. response = requests.get(
  453. "https://api.elevenlabs.io/v1/models", headers=headers, timeout=5
  454. )
  455. response.raise_for_status()
  456. models = response.json()
  457. return [
  458. {"name": model["name"], "id": model["model_id"]} for model in models
  459. ]
  460. except requests.RequestException as e:
  461. log.error(f"Error fetching voices: {str(e)}")
  462. return []
  463. @app.get("/models")
  464. async def get_models(user=Depends(get_verified_user)):
  465. return {"models": get_available_models()}
  466. def get_available_voices() -> dict:
  467. """Returns {voice_id: voice_name} dict"""
  468. ret = {}
  469. if app.state.config.TTS_ENGINE == "openai":
  470. ret = {
  471. "alloy": "alloy",
  472. "echo": "echo",
  473. "fable": "fable",
  474. "onyx": "onyx",
  475. "nova": "nova",
  476. "shimmer": "shimmer",
  477. }
  478. elif app.state.config.TTS_ENGINE == "elevenlabs":
  479. try:
  480. ret = get_elevenlabs_voices()
  481. except Exception:
  482. # Avoided @lru_cache with exception
  483. pass
  484. elif app.state.config.TTS_ENGINE == "azure":
  485. try:
  486. region = app.state.config.TTS_AZURE_SPEECH_REGION
  487. url = f"https://{region}.tts.speech.microsoft.com/cognitiveservices/voices/list"
  488. headers = {"Ocp-Apim-Subscription-Key": app.state.config.TTS_API_KEY}
  489. response = requests.get(url, headers=headers)
  490. response.raise_for_status()
  491. voices = response.json()
  492. for voice in voices:
  493. ret[voice["ShortName"]] = (
  494. f"{voice['DisplayName']} ({voice['ShortName']})"
  495. )
  496. except requests.RequestException as e:
  497. log.error(f"Error fetching voices: {str(e)}")
  498. return ret
  499. @lru_cache
  500. def get_elevenlabs_voices() -> dict:
  501. """
  502. Note, set the following in your .env file to use Elevenlabs:
  503. AUDIO_TTS_ENGINE=elevenlabs
  504. AUDIO_TTS_API_KEY=sk_... # Your Elevenlabs API key
  505. AUDIO_TTS_VOICE=EXAVITQu4vr4xnSDxMaL # From https://api.elevenlabs.io/v1/voices
  506. AUDIO_TTS_MODEL=eleven_multilingual_v2
  507. """
  508. headers = {
  509. "xi-api-key": app.state.config.TTS_API_KEY,
  510. "Content-Type": "application/json",
  511. }
  512. try:
  513. # TODO: Add retries
  514. response = requests.get("https://api.elevenlabs.io/v1/voices", headers=headers)
  515. response.raise_for_status()
  516. voices_data = response.json()
  517. voices = {}
  518. for voice in voices_data.get("voices", []):
  519. voices[voice["voice_id"]] = voice["name"]
  520. except requests.RequestException as e:
  521. # Avoid @lru_cache with exception
  522. log.error(f"Error fetching voices: {str(e)}")
  523. raise RuntimeError(f"Error fetching voices: {str(e)}")
  524. return voices
  525. @app.get("/voices")
  526. async def get_voices(user=Depends(get_verified_user)):
  527. return {"voices": [{"id": k, "name": v} for k, v in get_available_voices().items()]}