main.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. from fastapi import FastAPI, Request, Response, HTTPException, Depends
  2. from fastapi.middleware.cors import CORSMiddleware
  3. from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
  4. import requests
  5. import aiohttp
  6. import asyncio
  7. import json
  8. import logging
  9. from pydantic import BaseModel
  10. from starlette.background import BackgroundTask
  11. from apps.webui.models.models import Models
  12. from apps.webui.models.users import Users
  13. from constants import ERROR_MESSAGES
  14. from utils.utils import (
  15. decode_token,
  16. get_current_user,
  17. get_verified_user,
  18. get_admin_user,
  19. )
  20. from config import (
  21. SRC_LOG_LEVELS,
  22. ENABLE_OPENAI_API,
  23. OPENAI_API_BASE_URLS,
  24. OPENAI_API_KEYS,
  25. CACHE_DIR,
  26. ENABLE_MODEL_FILTER,
  27. MODEL_FILTER_LIST,
  28. AppConfig,
  29. )
  30. from typing import List, Optional
  31. import hashlib
  32. from pathlib import Path
  33. log = logging.getLogger(__name__)
  34. log.setLevel(SRC_LOG_LEVELS["OPENAI"])
  35. app = FastAPI()
  36. app.add_middleware(
  37. CORSMiddleware,
  38. allow_origins=["*"],
  39. allow_credentials=True,
  40. allow_methods=["*"],
  41. allow_headers=["*"],
  42. )
  43. app.state.config = AppConfig()
  44. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  45. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  46. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  47. app.state.config.OPENAI_API_BASE_URLS = OPENAI_API_BASE_URLS
  48. app.state.config.OPENAI_API_KEYS = OPENAI_API_KEYS
  49. app.state.MODELS = {}
  50. @app.middleware("http")
  51. async def check_url(request: Request, call_next):
  52. if len(app.state.MODELS) == 0:
  53. await get_all_models()
  54. else:
  55. pass
  56. response = await call_next(request)
  57. return response
  58. @app.get("/config")
  59. async def get_config(user=Depends(get_admin_user)):
  60. return {"ENABLE_OPENAI_API": app.state.config.ENABLE_OPENAI_API}
  61. class OpenAIConfigForm(BaseModel):
  62. enable_openai_api: Optional[bool] = None
  63. @app.post("/config/update")
  64. async def update_config(form_data: OpenAIConfigForm, user=Depends(get_admin_user)):
  65. app.state.config.ENABLE_OPENAI_API = form_data.enable_openai_api
  66. return {"ENABLE_OPENAI_API": app.state.config.ENABLE_OPENAI_API}
  67. class UrlsUpdateForm(BaseModel):
  68. urls: List[str]
  69. class KeysUpdateForm(BaseModel):
  70. keys: List[str]
  71. @app.get("/urls")
  72. async def get_openai_urls(user=Depends(get_admin_user)):
  73. return {"OPENAI_API_BASE_URLS": app.state.config.OPENAI_API_BASE_URLS}
  74. @app.post("/urls/update")
  75. async def update_openai_urls(form_data: UrlsUpdateForm, user=Depends(get_admin_user)):
  76. await get_all_models()
  77. app.state.config.OPENAI_API_BASE_URLS = form_data.urls
  78. return {"OPENAI_API_BASE_URLS": app.state.config.OPENAI_API_BASE_URLS}
  79. @app.get("/keys")
  80. async def get_openai_keys(user=Depends(get_admin_user)):
  81. return {"OPENAI_API_KEYS": app.state.config.OPENAI_API_KEYS}
  82. @app.post("/keys/update")
  83. async def update_openai_key(form_data: KeysUpdateForm, user=Depends(get_admin_user)):
  84. app.state.config.OPENAI_API_KEYS = form_data.keys
  85. return {"OPENAI_API_KEYS": app.state.config.OPENAI_API_KEYS}
  86. @app.post("/audio/speech")
  87. async def speech(request: Request, user=Depends(get_verified_user)):
  88. idx = None
  89. try:
  90. idx = app.state.config.OPENAI_API_BASE_URLS.index("https://api.openai.com/v1")
  91. body = await request.body()
  92. name = hashlib.sha256(body).hexdigest()
  93. SPEECH_CACHE_DIR = Path(CACHE_DIR).joinpath("./audio/speech/")
  94. SPEECH_CACHE_DIR.mkdir(parents=True, exist_ok=True)
  95. file_path = SPEECH_CACHE_DIR.joinpath(f"{name}.mp3")
  96. file_body_path = SPEECH_CACHE_DIR.joinpath(f"{name}.json")
  97. # Check if the file already exists in the cache
  98. if file_path.is_file():
  99. return FileResponse(file_path)
  100. headers = {}
  101. headers["Authorization"] = f"Bearer {app.state.config.OPENAI_API_KEYS[idx]}"
  102. headers["Content-Type"] = "application/json"
  103. if "openrouter.ai" in app.state.config.OPENAI_API_BASE_URLS[idx]:
  104. headers["HTTP-Referer"] = "https://openwebui.com/"
  105. headers["X-Title"] = "Open WebUI"
  106. r = None
  107. try:
  108. r = requests.post(
  109. url=f"{app.state.config.OPENAI_API_BASE_URLS[idx]}/audio/speech",
  110. data=body,
  111. headers=headers,
  112. stream=True,
  113. )
  114. r.raise_for_status()
  115. # Save the streaming content to a file
  116. with open(file_path, "wb") as f:
  117. for chunk in r.iter_content(chunk_size=8192):
  118. f.write(chunk)
  119. with open(file_body_path, "w") as f:
  120. json.dump(json.loads(body.decode("utf-8")), f)
  121. # Return the saved file
  122. return FileResponse(file_path)
  123. except Exception as e:
  124. log.exception(e)
  125. error_detail = "Open WebUI: Server Connection Error"
  126. if r is not None:
  127. try:
  128. res = r.json()
  129. if "error" in res:
  130. error_detail = f"External: {res['error']}"
  131. except:
  132. error_detail = f"External: {e}"
  133. raise HTTPException(
  134. status_code=r.status_code if r else 500, detail=error_detail
  135. )
  136. except ValueError:
  137. raise HTTPException(status_code=401, detail=ERROR_MESSAGES.OPENAI_NOT_FOUND)
  138. async def fetch_url(url, key):
  139. timeout = aiohttp.ClientTimeout(total=5)
  140. try:
  141. headers = {"Authorization": f"Bearer {key}"}
  142. async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
  143. async with session.get(url, headers=headers) as response:
  144. return await response.json()
  145. except Exception as e:
  146. # Handle connection error here
  147. log.error(f"Connection error: {e}")
  148. return None
  149. async def cleanup_response(
  150. response: Optional[aiohttp.ClientResponse],
  151. session: Optional[aiohttp.ClientSession],
  152. ):
  153. if response:
  154. response.close()
  155. if session:
  156. await session.close()
  157. def merge_models_lists(model_lists):
  158. log.debug(f"merge_models_lists {model_lists}")
  159. merged_list = []
  160. for idx, models in enumerate(model_lists):
  161. if models is not None and "error" not in models:
  162. merged_list.extend(
  163. [
  164. {
  165. **model,
  166. "name": model.get("name", model["id"]),
  167. "owned_by": "openai",
  168. "openai": model,
  169. "urlIdx": idx,
  170. }
  171. for model in models
  172. if "api.openai.com"
  173. not in app.state.config.OPENAI_API_BASE_URLS[idx]
  174. or "gpt" in model["id"]
  175. ]
  176. )
  177. return merged_list
  178. async def get_all_models(raw: bool = False):
  179. log.info("get_all_models()")
  180. if (
  181. len(app.state.config.OPENAI_API_KEYS) == 1
  182. and app.state.config.OPENAI_API_KEYS[0] == ""
  183. ) or not app.state.config.ENABLE_OPENAI_API:
  184. models = {"data": []}
  185. else:
  186. # Check if API KEYS length is same than API URLS length
  187. if len(app.state.config.OPENAI_API_KEYS) != len(
  188. app.state.config.OPENAI_API_BASE_URLS
  189. ):
  190. # if there are more keys than urls, remove the extra keys
  191. if len(app.state.config.OPENAI_API_KEYS) > len(
  192. app.state.config.OPENAI_API_BASE_URLS
  193. ):
  194. app.state.config.OPENAI_API_KEYS = app.state.config.OPENAI_API_KEYS[
  195. : len(app.state.config.OPENAI_API_BASE_URLS)
  196. ]
  197. # if there are more urls than keys, add empty keys
  198. else:
  199. app.state.config.OPENAI_API_KEYS += [
  200. ""
  201. for _ in range(
  202. len(app.state.config.OPENAI_API_BASE_URLS)
  203. - len(app.state.config.OPENAI_API_KEYS)
  204. )
  205. ]
  206. tasks = [
  207. fetch_url(f"{url}/models", app.state.config.OPENAI_API_KEYS[idx])
  208. for idx, url in enumerate(app.state.config.OPENAI_API_BASE_URLS)
  209. ]
  210. responses = await asyncio.gather(*tasks)
  211. log.debug(f"get_all_models:responses() {responses}")
  212. if raw:
  213. return responses
  214. models = {
  215. "data": merge_models_lists(
  216. list(
  217. map(
  218. lambda response: (
  219. response["data"]
  220. if (response and "data" in response)
  221. else (response if isinstance(response, list) else None)
  222. ),
  223. responses,
  224. )
  225. )
  226. )
  227. }
  228. log.debug(f"models: {models}")
  229. app.state.MODELS = {model["id"]: model for model in models["data"]}
  230. return models
  231. @app.get("/models")
  232. @app.get("/models/{url_idx}")
  233. async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_user)):
  234. if url_idx == None:
  235. models = await get_all_models()
  236. if app.state.config.ENABLE_MODEL_FILTER:
  237. if user.role == "user":
  238. models["data"] = list(
  239. filter(
  240. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  241. models["data"],
  242. )
  243. )
  244. return models
  245. return models
  246. else:
  247. url = app.state.config.OPENAI_API_BASE_URLS[url_idx]
  248. key = app.state.config.OPENAI_API_KEYS[url_idx]
  249. headers = {}
  250. headers["Authorization"] = f"Bearer {key}"
  251. headers["Content-Type"] = "application/json"
  252. r = None
  253. try:
  254. r = requests.request(method="GET", url=f"{url}/models", headers=headers)
  255. r.raise_for_status()
  256. response_data = r.json()
  257. if "api.openai.com" in url:
  258. response_data["data"] = list(
  259. filter(lambda model: "gpt" in model["id"], response_data["data"])
  260. )
  261. return response_data
  262. except Exception as e:
  263. log.exception(e)
  264. error_detail = "Open WebUI: Server Connection Error"
  265. if r is not None:
  266. try:
  267. res = r.json()
  268. if "error" in res:
  269. error_detail = f"External: {res['error']}"
  270. except:
  271. error_detail = f"External: {e}"
  272. raise HTTPException(
  273. status_code=r.status_code if r else 500,
  274. detail=error_detail,
  275. )
  276. @app.post("/chat/completions")
  277. @app.post("/chat/completions/{url_idx}")
  278. async def generate_chat_completion(
  279. form_data: dict,
  280. url_idx: Optional[int] = None,
  281. user=Depends(get_verified_user),
  282. ):
  283. idx = 0
  284. payload = {**form_data}
  285. model_id = form_data.get("model")
  286. model_info = Models.get_model_by_id(model_id)
  287. if model_info:
  288. if model_info.base_model_id:
  289. payload["model"] = model_info.base_model_id
  290. model_info.params = model_info.params.model_dump()
  291. if model_info.params:
  292. if model_info.params.get("temperature", None) is not None:
  293. payload["temperature"] = float(model_info.params.get("temperature"))
  294. if model_info.params.get("top_p", None):
  295. payload["top_p"] = int(model_info.params.get("top_p", None))
  296. if model_info.params.get("max_tokens", None):
  297. payload["max_tokens"] = int(model_info.params.get("max_tokens", None))
  298. if model_info.params.get("frequency_penalty", None):
  299. payload["frequency_penalty"] = int(
  300. model_info.params.get("frequency_penalty", None)
  301. )
  302. if model_info.params.get("seed", None):
  303. payload["seed"] = model_info.params.get("seed", None)
  304. if model_info.params.get("stop", None):
  305. payload["stop"] = (
  306. [
  307. bytes(stop, "utf-8").decode("unicode_escape")
  308. for stop in model_info.params["stop"]
  309. ]
  310. if model_info.params.get("stop", None)
  311. else None
  312. )
  313. if model_info.params.get("system", None):
  314. # Check if the payload already has a system message
  315. # If not, add a system message to the payload
  316. if payload.get("messages"):
  317. for message in payload["messages"]:
  318. if message.get("role") == "system":
  319. message["content"] = (
  320. model_info.params.get("system", None) + message["content"]
  321. )
  322. break
  323. else:
  324. payload["messages"].insert(
  325. 0,
  326. {
  327. "role": "system",
  328. "content": model_info.params.get("system", None),
  329. },
  330. )
  331. else:
  332. pass
  333. model = app.state.MODELS[payload.get("model")]
  334. idx = model["urlIdx"]
  335. if "pipeline" in model and model.get("pipeline"):
  336. payload["user"] = {"name": user.name, "id": user.id}
  337. # Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
  338. # This is a workaround until OpenAI fixes the issue with this model
  339. if payload.get("model") == "gpt-4-vision-preview":
  340. if "max_tokens" not in payload:
  341. payload["max_tokens"] = 4000
  342. log.debug("Modified payload:", payload)
  343. # Convert the modified body back to JSON
  344. payload = json.dumps(payload)
  345. print(payload)
  346. url = app.state.config.OPENAI_API_BASE_URLS[idx]
  347. key = app.state.config.OPENAI_API_KEYS[idx]
  348. print(payload)
  349. headers = {}
  350. headers["Authorization"] = f"Bearer {key}"
  351. headers["Content-Type"] = "application/json"
  352. r = None
  353. session = None
  354. streaming = False
  355. try:
  356. session = aiohttp.ClientSession(trust_env=True)
  357. r = await session.request(
  358. method="POST",
  359. url=f"{url}/chat/completions",
  360. data=payload,
  361. headers=headers,
  362. )
  363. r.raise_for_status()
  364. # Check if response is SSE
  365. if "text/event-stream" in r.headers.get("Content-Type", ""):
  366. streaming = True
  367. return StreamingResponse(
  368. r.content,
  369. status_code=r.status,
  370. headers=dict(r.headers),
  371. background=BackgroundTask(
  372. cleanup_response, response=r, session=session
  373. ),
  374. )
  375. else:
  376. response_data = await r.json()
  377. return response_data
  378. except Exception as e:
  379. log.exception(e)
  380. error_detail = "Open WebUI: Server Connection Error"
  381. if r is not None:
  382. try:
  383. res = await r.json()
  384. print(res)
  385. if "error" in res:
  386. error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
  387. except:
  388. error_detail = f"External: {e}"
  389. raise HTTPException(status_code=r.status if r else 500, detail=error_detail)
  390. finally:
  391. if not streaming and session:
  392. if r:
  393. r.close()
  394. await session.close()
  395. @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
  396. async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
  397. idx = 0
  398. body = await request.body()
  399. url = app.state.config.OPENAI_API_BASE_URLS[idx]
  400. key = app.state.config.OPENAI_API_KEYS[idx]
  401. target_url = f"{url}/{path}"
  402. headers = {}
  403. headers["Authorization"] = f"Bearer {key}"
  404. headers["Content-Type"] = "application/json"
  405. r = None
  406. session = None
  407. streaming = False
  408. try:
  409. session = aiohttp.ClientSession(trust_env=True)
  410. r = await session.request(
  411. method=request.method,
  412. url=target_url,
  413. data=body,
  414. headers=headers,
  415. )
  416. r.raise_for_status()
  417. # Check if response is SSE
  418. if "text/event-stream" in r.headers.get("Content-Type", ""):
  419. streaming = True
  420. return StreamingResponse(
  421. r.content,
  422. status_code=r.status,
  423. headers=dict(r.headers),
  424. background=BackgroundTask(
  425. cleanup_response, response=r, session=session
  426. ),
  427. )
  428. else:
  429. response_data = await r.json()
  430. return response_data
  431. except Exception as e:
  432. log.exception(e)
  433. error_detail = "Open WebUI: Server Connection Error"
  434. if r is not None:
  435. try:
  436. res = await r.json()
  437. print(res)
  438. if "error" in res:
  439. error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
  440. except:
  441. error_detail = f"External: {e}"
  442. raise HTTPException(status_code=r.status if r else 500, detail=error_detail)
  443. finally:
  444. if not streaming and session:
  445. if r:
  446. r.close()
  447. await session.close()