main.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. from fastapi import FastAPI, Request, Response, HTTPException, Depends
  2. from fastapi.middleware.cors import CORSMiddleware
  3. from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
  4. import requests
  5. import aiohttp
  6. import asyncio
  7. import json
  8. import logging
  9. from pydantic import BaseModel
  10. from starlette.background import BackgroundTask
  11. from apps.webui.models.models import Models
  12. from apps.webui.models.users import Users
  13. from constants import ERROR_MESSAGES
  14. from utils.utils import (
  15. decode_token,
  16. get_current_user,
  17. get_verified_user,
  18. get_admin_user,
  19. )
  20. from utils.task import prompt_template
  21. from config import (
  22. SRC_LOG_LEVELS,
  23. ENABLE_OPENAI_API,
  24. OPENAI_API_BASE_URLS,
  25. OPENAI_API_KEYS,
  26. CACHE_DIR,
  27. ENABLE_MODEL_FILTER,
  28. MODEL_FILTER_LIST,
  29. AppConfig,
  30. )
  31. from typing import List, Optional
  32. import hashlib
  33. from pathlib import Path
  34. log = logging.getLogger(__name__)
  35. log.setLevel(SRC_LOG_LEVELS["OPENAI"])
  36. app = FastAPI()
  37. app.add_middleware(
  38. CORSMiddleware,
  39. allow_origins=["*"],
  40. allow_credentials=True,
  41. allow_methods=["*"],
  42. allow_headers=["*"],
  43. )
  44. app.state.config = AppConfig()
  45. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  46. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  47. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  48. app.state.config.OPENAI_API_BASE_URLS = OPENAI_API_BASE_URLS
  49. app.state.config.OPENAI_API_KEYS = OPENAI_API_KEYS
  50. app.state.MODELS = {}
  51. @app.middleware("http")
  52. async def check_url(request: Request, call_next):
  53. if len(app.state.MODELS) == 0:
  54. await get_all_models()
  55. else:
  56. pass
  57. response = await call_next(request)
  58. return response
  59. @app.get("/config")
  60. async def get_config(user=Depends(get_admin_user)):
  61. return {"ENABLE_OPENAI_API": app.state.config.ENABLE_OPENAI_API}
  62. class OpenAIConfigForm(BaseModel):
  63. enable_openai_api: Optional[bool] = None
  64. @app.post("/config/update")
  65. async def update_config(form_data: OpenAIConfigForm, user=Depends(get_admin_user)):
  66. app.state.config.ENABLE_OPENAI_API = form_data.enable_openai_api
  67. return {"ENABLE_OPENAI_API": app.state.config.ENABLE_OPENAI_API}
  68. class UrlsUpdateForm(BaseModel):
  69. urls: List[str]
  70. class KeysUpdateForm(BaseModel):
  71. keys: List[str]
  72. @app.get("/urls")
  73. async def get_openai_urls(user=Depends(get_admin_user)):
  74. return {"OPENAI_API_BASE_URLS": app.state.config.OPENAI_API_BASE_URLS}
  75. @app.post("/urls/update")
  76. async def update_openai_urls(form_data: UrlsUpdateForm, user=Depends(get_admin_user)):
  77. await get_all_models()
  78. app.state.config.OPENAI_API_BASE_URLS = form_data.urls
  79. return {"OPENAI_API_BASE_URLS": app.state.config.OPENAI_API_BASE_URLS}
  80. @app.get("/keys")
  81. async def get_openai_keys(user=Depends(get_admin_user)):
  82. return {"OPENAI_API_KEYS": app.state.config.OPENAI_API_KEYS}
  83. @app.post("/keys/update")
  84. async def update_openai_key(form_data: KeysUpdateForm, user=Depends(get_admin_user)):
  85. app.state.config.OPENAI_API_KEYS = form_data.keys
  86. return {"OPENAI_API_KEYS": app.state.config.OPENAI_API_KEYS}
  87. @app.post("/audio/speech")
  88. async def speech(request: Request, user=Depends(get_verified_user)):
  89. idx = None
  90. try:
  91. idx = app.state.config.OPENAI_API_BASE_URLS.index("https://api.openai.com/v1")
  92. body = await request.body()
  93. name = hashlib.sha256(body).hexdigest()
  94. SPEECH_CACHE_DIR = Path(CACHE_DIR).joinpath("./audio/speech/")
  95. SPEECH_CACHE_DIR.mkdir(parents=True, exist_ok=True)
  96. file_path = SPEECH_CACHE_DIR.joinpath(f"{name}.mp3")
  97. file_body_path = SPEECH_CACHE_DIR.joinpath(f"{name}.json")
  98. # Check if the file already exists in the cache
  99. if file_path.is_file():
  100. return FileResponse(file_path)
  101. headers = {}
  102. headers["Authorization"] = f"Bearer {app.state.config.OPENAI_API_KEYS[idx]}"
  103. headers["Content-Type"] = "application/json"
  104. if "openrouter.ai" in app.state.config.OPENAI_API_BASE_URLS[idx]:
  105. headers["HTTP-Referer"] = "https://openwebui.com/"
  106. headers["X-Title"] = "Open WebUI"
  107. r = None
  108. try:
  109. r = requests.post(
  110. url=f"{app.state.config.OPENAI_API_BASE_URLS[idx]}/audio/speech",
  111. data=body,
  112. headers=headers,
  113. stream=True,
  114. )
  115. r.raise_for_status()
  116. # Save the streaming content to a file
  117. with open(file_path, "wb") as f:
  118. for chunk in r.iter_content(chunk_size=8192):
  119. f.write(chunk)
  120. with open(file_body_path, "w") as f:
  121. json.dump(json.loads(body.decode("utf-8")), f)
  122. # Return the saved file
  123. return FileResponse(file_path)
  124. except Exception as e:
  125. log.exception(e)
  126. error_detail = "Open WebUI: Server Connection Error"
  127. if r is not None:
  128. try:
  129. res = r.json()
  130. if "error" in res:
  131. error_detail = f"External: {res['error']}"
  132. except:
  133. error_detail = f"External: {e}"
  134. raise HTTPException(
  135. status_code=r.status_code if r else 500, detail=error_detail
  136. )
  137. except ValueError:
  138. raise HTTPException(status_code=401, detail=ERROR_MESSAGES.OPENAI_NOT_FOUND)
  139. async def fetch_url(url, key):
  140. timeout = aiohttp.ClientTimeout(total=5)
  141. try:
  142. headers = {"Authorization": f"Bearer {key}"}
  143. async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
  144. async with session.get(url, headers=headers) as response:
  145. return await response.json()
  146. except Exception as e:
  147. # Handle connection error here
  148. log.error(f"Connection error: {e}")
  149. return None
  150. async def cleanup_response(
  151. response: Optional[aiohttp.ClientResponse],
  152. session: Optional[aiohttp.ClientSession],
  153. ):
  154. if response:
  155. response.close()
  156. if session:
  157. await session.close()
  158. def merge_models_lists(model_lists):
  159. log.debug(f"merge_models_lists {model_lists}")
  160. merged_list = []
  161. for idx, models in enumerate(model_lists):
  162. if models is not None and "error" not in models:
  163. merged_list.extend(
  164. [
  165. {
  166. **model,
  167. "name": model.get("name", model["id"]),
  168. "owned_by": "openai",
  169. "openai": model,
  170. "urlIdx": idx,
  171. }
  172. for model in models
  173. if "api.openai.com"
  174. not in app.state.config.OPENAI_API_BASE_URLS[idx]
  175. or "gpt" in model["id"]
  176. ]
  177. )
  178. return merged_list
  179. async def get_all_models(raw: bool = False):
  180. log.info("get_all_models()")
  181. if (
  182. len(app.state.config.OPENAI_API_KEYS) == 1
  183. and app.state.config.OPENAI_API_KEYS[0] == ""
  184. ) or not app.state.config.ENABLE_OPENAI_API:
  185. models = {"data": []}
  186. else:
  187. # Check if API KEYS length is same than API URLS length
  188. if len(app.state.config.OPENAI_API_KEYS) != len(
  189. app.state.config.OPENAI_API_BASE_URLS
  190. ):
  191. # if there are more keys than urls, remove the extra keys
  192. if len(app.state.config.OPENAI_API_KEYS) > len(
  193. app.state.config.OPENAI_API_BASE_URLS
  194. ):
  195. app.state.config.OPENAI_API_KEYS = app.state.config.OPENAI_API_KEYS[
  196. : len(app.state.config.OPENAI_API_BASE_URLS)
  197. ]
  198. # if there are more urls than keys, add empty keys
  199. else:
  200. app.state.config.OPENAI_API_KEYS += [
  201. ""
  202. for _ in range(
  203. len(app.state.config.OPENAI_API_BASE_URLS)
  204. - len(app.state.config.OPENAI_API_KEYS)
  205. )
  206. ]
  207. tasks = [
  208. fetch_url(f"{url}/models", app.state.config.OPENAI_API_KEYS[idx])
  209. for idx, url in enumerate(app.state.config.OPENAI_API_BASE_URLS)
  210. ]
  211. responses = await asyncio.gather(*tasks)
  212. log.debug(f"get_all_models:responses() {responses}")
  213. if raw:
  214. return responses
  215. models = {
  216. "data": merge_models_lists(
  217. list(
  218. map(
  219. lambda response: (
  220. response["data"]
  221. if (response and "data" in response)
  222. else (response if isinstance(response, list) else None)
  223. ),
  224. responses,
  225. )
  226. )
  227. )
  228. }
  229. log.debug(f"models: {models}")
  230. app.state.MODELS = {model["id"]: model for model in models["data"]}
  231. return models
  232. @app.get("/models")
  233. @app.get("/models/{url_idx}")
  234. async def get_models(url_idx: Optional[int] = None, user=Depends(get_current_user)):
  235. if url_idx == None:
  236. models = await get_all_models()
  237. if app.state.config.ENABLE_MODEL_FILTER:
  238. if user.role == "user":
  239. models["data"] = list(
  240. filter(
  241. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  242. models["data"],
  243. )
  244. )
  245. return models
  246. return models
  247. else:
  248. url = app.state.config.OPENAI_API_BASE_URLS[url_idx]
  249. key = app.state.config.OPENAI_API_KEYS[url_idx]
  250. headers = {}
  251. headers["Authorization"] = f"Bearer {key}"
  252. headers["Content-Type"] = "application/json"
  253. r = None
  254. try:
  255. r = requests.request(method="GET", url=f"{url}/models", headers=headers)
  256. r.raise_for_status()
  257. response_data = r.json()
  258. if "api.openai.com" in url:
  259. response_data["data"] = list(
  260. filter(lambda model: "gpt" in model["id"], response_data["data"])
  261. )
  262. return response_data
  263. except Exception as e:
  264. log.exception(e)
  265. error_detail = "Open WebUI: Server Connection Error"
  266. if r is not None:
  267. try:
  268. res = r.json()
  269. if "error" in res:
  270. error_detail = f"External: {res['error']}"
  271. except:
  272. error_detail = f"External: {e}"
  273. raise HTTPException(
  274. status_code=r.status_code if r else 500,
  275. detail=error_detail,
  276. )
  277. @app.post("/chat/completions")
  278. @app.post("/chat/completions/{url_idx}")
  279. async def generate_chat_completion(
  280. form_data: dict,
  281. url_idx: Optional[int] = None,
  282. user=Depends(get_verified_user),
  283. ):
  284. idx = 0
  285. payload = {**form_data}
  286. model_id = form_data.get("model")
  287. model_info = Models.get_model_by_id(model_id)
  288. if model_info:
  289. if model_info.base_model_id:
  290. payload["model"] = model_info.base_model_id
  291. model_info.params = model_info.params.model_dump()
  292. if model_info.params:
  293. if model_info.params.get("temperature", None) is not None:
  294. payload["temperature"] = float(model_info.params.get("temperature"))
  295. if model_info.params.get("top_p", None):
  296. payload["top_p"] = int(model_info.params.get("top_p", None))
  297. if model_info.params.get("max_tokens", None):
  298. payload["max_tokens"] = int(model_info.params.get("max_tokens", None))
  299. if model_info.params.get("frequency_penalty", None):
  300. payload["frequency_penalty"] = int(
  301. model_info.params.get("frequency_penalty", None)
  302. )
  303. if model_info.params.get("seed", None):
  304. payload["seed"] = model_info.params.get("seed", None)
  305. if model_info.params.get("stop", None):
  306. payload["stop"] = (
  307. [
  308. bytes(stop, "utf-8").decode("unicode_escape")
  309. for stop in model_info.params["stop"]
  310. ]
  311. if model_info.params.get("stop", None)
  312. else None
  313. )
  314. system = model_info.params.get("system", None)
  315. if system:
  316. system = prompt_template(
  317. system,
  318. **(
  319. {
  320. "user_name": user.name,
  321. "user_location": (
  322. user.info.get("location") if user.info else None
  323. ),
  324. }
  325. if user
  326. else {}
  327. ),
  328. )
  329. # Check if the payload already has a system message
  330. # If not, add a system message to the payload
  331. if payload.get("messages"):
  332. for message in payload["messages"]:
  333. if message.get("role") == "system":
  334. message["content"] = system + message["content"]
  335. break
  336. else:
  337. payload["messages"].insert(
  338. 0,
  339. {
  340. "role": "system",
  341. "content": system,
  342. },
  343. )
  344. else:
  345. pass
  346. model = app.state.MODELS[payload.get("model")]
  347. idx = model["urlIdx"]
  348. if "pipeline" in model and model.get("pipeline"):
  349. payload["user"] = {"name": user.name, "id": user.id}
  350. # Check if the model is "gpt-4-vision-preview" and set "max_tokens" to 4000
  351. # This is a workaround until OpenAI fixes the issue with this model
  352. if payload.get("model") == "gpt-4-vision-preview":
  353. if "max_tokens" not in payload:
  354. payload["max_tokens"] = 4000
  355. log.debug("Modified payload:", payload)
  356. # Convert the modified body back to JSON
  357. payload = json.dumps(payload)
  358. log.debug(payload)
  359. url = app.state.config.OPENAI_API_BASE_URLS[idx]
  360. key = app.state.config.OPENAI_API_KEYS[idx]
  361. headers = {}
  362. headers["Authorization"] = f"Bearer {key}"
  363. headers["Content-Type"] = "application/json"
  364. r = None
  365. session = None
  366. streaming = False
  367. try:
  368. session = aiohttp.ClientSession(trust_env=True)
  369. r = await session.request(
  370. method="POST",
  371. url=f"{url}/chat/completions",
  372. data=payload,
  373. headers=headers,
  374. )
  375. r.raise_for_status()
  376. # Check if response is SSE
  377. if "text/event-stream" in r.headers.get("Content-Type", ""):
  378. streaming = True
  379. return StreamingResponse(
  380. r.content,
  381. status_code=r.status,
  382. headers=dict(r.headers),
  383. background=BackgroundTask(
  384. cleanup_response, response=r, session=session
  385. ),
  386. )
  387. else:
  388. response_data = await r.json()
  389. return response_data
  390. except Exception as e:
  391. log.exception(e)
  392. error_detail = "Open WebUI: Server Connection Error"
  393. if r is not None:
  394. try:
  395. res = await r.json()
  396. print(res)
  397. if "error" in res:
  398. error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
  399. except:
  400. error_detail = f"External: {e}"
  401. raise HTTPException(status_code=r.status if r else 500, detail=error_detail)
  402. finally:
  403. if not streaming and session:
  404. if r:
  405. r.close()
  406. await session.close()
  407. @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
  408. async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
  409. idx = 0
  410. body = await request.body()
  411. url = app.state.config.OPENAI_API_BASE_URLS[idx]
  412. key = app.state.config.OPENAI_API_KEYS[idx]
  413. target_url = f"{url}/{path}"
  414. headers = {}
  415. headers["Authorization"] = f"Bearer {key}"
  416. headers["Content-Type"] = "application/json"
  417. r = None
  418. session = None
  419. streaming = False
  420. try:
  421. session = aiohttp.ClientSession(trust_env=True)
  422. r = await session.request(
  423. method=request.method,
  424. url=target_url,
  425. data=body,
  426. headers=headers,
  427. )
  428. r.raise_for_status()
  429. # Check if response is SSE
  430. if "text/event-stream" in r.headers.get("Content-Type", ""):
  431. streaming = True
  432. return StreamingResponse(
  433. r.content,
  434. status_code=r.status,
  435. headers=dict(r.headers),
  436. background=BackgroundTask(
  437. cleanup_response, response=r, session=session
  438. ),
  439. )
  440. else:
  441. response_data = await r.json()
  442. return response_data
  443. except Exception as e:
  444. log.exception(e)
  445. error_detail = "Open WebUI: Server Connection Error"
  446. if r is not None:
  447. try:
  448. res = await r.json()
  449. print(res)
  450. if "error" in res:
  451. error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
  452. except:
  453. error_detail = f"External: {e}"
  454. raise HTTPException(status_code=r.status if r else 500, detail=error_detail)
  455. finally:
  456. if not streaming and session:
  457. if r:
  458. r.close()
  459. await session.close()