main.py 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. from fastapi import FastAPI, Depends, HTTPException
  2. from fastapi.routing import APIRoute
  3. from fastapi.middleware.cors import CORSMiddleware
  4. import logging
  5. from fastapi import FastAPI, Request, Depends, status, Response
  6. from fastapi.responses import JSONResponse
  7. from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint
  8. from starlette.responses import StreamingResponse
  9. import json
  10. import time
  11. import requests
  12. from pydantic import BaseModel
  13. from typing import Optional, List
  14. from utils.utils import get_verified_user, get_current_user, get_admin_user
  15. from config import SRC_LOG_LEVELS, ENV
  16. from constants import MESSAGES
  17. log = logging.getLogger(__name__)
  18. log.setLevel(SRC_LOG_LEVELS["LITELLM"])
  19. from config import MODEL_FILTER_ENABLED, MODEL_FILTER_LIST, DATA_DIR
  20. import asyncio
  21. import subprocess
  22. import yaml
  23. app = FastAPI()
  24. origins = ["*"]
  25. app.add_middleware(
  26. CORSMiddleware,
  27. allow_origins=origins,
  28. allow_credentials=True,
  29. allow_methods=["*"],
  30. allow_headers=["*"],
  31. )
  32. LITELLM_CONFIG_DIR = f"{DATA_DIR}/litellm/config.yaml"
  33. with open(LITELLM_CONFIG_DIR, "r") as file:
  34. litellm_config = yaml.safe_load(file)
  35. app.state.CONFIG = litellm_config
  36. # Global variable to store the subprocess reference
  37. background_process = None
  38. async def run_background_process(command):
  39. global background_process
  40. log.info("run_background_process")
  41. try:
  42. # Log the command to be executed
  43. log.info(f"Executing command: {command}")
  44. # Execute the command and create a subprocess
  45. process = await asyncio.create_subprocess_exec(
  46. *command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE
  47. )
  48. background_process = process
  49. log.info("Subprocess started successfully.")
  50. # Capture STDERR for debugging purposes
  51. stderr_output = await process.stderr.read()
  52. stderr_text = stderr_output.decode().strip()
  53. if stderr_text:
  54. log.info(f"Subprocess STDERR: {stderr_text}")
  55. # log.info output line by line
  56. async for line in process.stdout:
  57. log.info(line.decode().strip())
  58. # Wait for the process to finish
  59. returncode = await process.wait()
  60. log.info(f"Subprocess exited with return code {returncode}")
  61. except Exception as e:
  62. log.error(f"Failed to start subprocess: {e}")
  63. raise # Optionally re-raise the exception if you want it to propagate
  64. async def start_litellm_background():
  65. log.info("start_litellm_background")
  66. # Command to run in the background
  67. command = (
  68. "litellm --port 14365 --telemetry False --config ./data/litellm/config.yaml"
  69. )
  70. await run_background_process(command)
  71. async def shutdown_litellm_background():
  72. log.info("shutdown_litellm_background")
  73. global background_process
  74. if background_process:
  75. background_process.terminate()
  76. await background_process.wait() # Ensure the process has terminated
  77. log.info("Subprocess terminated")
  78. background_process = None
  79. @app.on_event("startup")
  80. async def startup_event():
  81. log.info("startup_event")
  82. # TODO: Check config.yaml file and create one
  83. asyncio.create_task(start_litellm_background())
  84. app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED
  85. app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  86. @app.get("/")
  87. async def get_status():
  88. return {"status": True}
  89. async def restart_litellm():
  90. """
  91. Endpoint to restart the litellm background service.
  92. """
  93. log.info("Requested restart of litellm service.")
  94. try:
  95. # Shut down the existing process if it is running
  96. await shutdown_litellm_background()
  97. log.info("litellm service shutdown complete.")
  98. # Restart the background service
  99. asyncio.create_task(start_litellm_background())
  100. log.info("litellm service restart complete.")
  101. return {
  102. "status": "success",
  103. "message": "litellm service restarted successfully.",
  104. }
  105. except Exception as e:
  106. log.info(f"Error restarting litellm service: {e}")
  107. raise HTTPException(
  108. status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e)
  109. )
  110. @app.get("/restart")
  111. async def restart_litellm_handler(user=Depends(get_admin_user)):
  112. return await restart_litellm()
  113. @app.get("/config")
  114. async def get_config(user=Depends(get_admin_user)):
  115. return app.state.CONFIG
  116. class LiteLLMConfigForm(BaseModel):
  117. general_settings: Optional[dict] = None
  118. litellm_settings: Optional[dict] = None
  119. model_list: Optional[List[dict]] = None
  120. router_settings: Optional[dict] = None
  121. @app.post("/config/update")
  122. async def update_config(form_data: LiteLLMConfigForm, user=Depends(get_admin_user)):
  123. app.state.CONFIG = form_data.model_dump(exclude_none=True)
  124. with open(LITELLM_CONFIG_DIR, "w") as file:
  125. yaml.dump(app.state.CONFIG, file)
  126. await restart_litellm()
  127. return app.state.CONFIG
  128. @app.get("/models")
  129. @app.get("/v1/models")
  130. async def get_models(user=Depends(get_current_user)):
  131. while not background_process:
  132. await asyncio.sleep(0.1)
  133. url = "http://localhost:14365/v1"
  134. r = None
  135. try:
  136. r = requests.request(method="GET", url=f"{url}/models")
  137. r.raise_for_status()
  138. data = r.json()
  139. if app.state.MODEL_FILTER_ENABLED:
  140. if user and user.role == "user":
  141. data["data"] = list(
  142. filter(
  143. lambda model: model["id"] in app.state.MODEL_FILTER_LIST,
  144. data["data"],
  145. )
  146. )
  147. return data
  148. except Exception as e:
  149. log.exception(e)
  150. error_detail = "Open WebUI: Server Connection Error"
  151. if r is not None:
  152. try:
  153. res = r.json()
  154. if "error" in res:
  155. error_detail = f"External: {res['error']}"
  156. except:
  157. error_detail = f"External: {e}"
  158. return {
  159. "data": [
  160. {
  161. "id": model["model_name"],
  162. "object": "model",
  163. "created": int(time.time()),
  164. "owned_by": "openai",
  165. }
  166. for model in app.state.CONFIG["model_list"]
  167. ],
  168. "object": "list",
  169. }
  170. @app.get("/model/info")
  171. async def get_model_list(user=Depends(get_admin_user)):
  172. return {"data": app.state.CONFIG["model_list"]}
  173. class AddLiteLLMModelForm(BaseModel):
  174. model_name: str
  175. litellm_params: dict
  176. @app.post("/model/new")
  177. async def add_model_to_config(
  178. form_data: AddLiteLLMModelForm, user=Depends(get_admin_user)
  179. ):
  180. # TODO: Validate model form
  181. app.state.CONFIG["model_list"].append(form_data.model_dump())
  182. with open(LITELLM_CONFIG_DIR, "w") as file:
  183. yaml.dump(app.state.CONFIG, file)
  184. await restart_litellm()
  185. return {"message": MESSAGES.MODEL_ADDED(form_data.model_name)}
  186. class DeleteLiteLLMModelForm(BaseModel):
  187. id: str
  188. @app.post("/model/delete")
  189. async def delete_model_from_config(
  190. form_data: DeleteLiteLLMModelForm, user=Depends(get_admin_user)
  191. ):
  192. app.state.CONFIG["model_list"] = [
  193. model
  194. for model in app.state.CONFIG["model_list"]
  195. if model["model_name"] != form_data.id
  196. ]
  197. with open(LITELLM_CONFIG_DIR, "w") as file:
  198. yaml.dump(app.state.CONFIG, file)
  199. await restart_litellm()
  200. return {"message": MESSAGES.MODEL_DELETED(form_data.id)}
  201. @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
  202. async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
  203. body = await request.body()
  204. url = "http://localhost:14365"
  205. target_url = f"{url}/{path}"
  206. headers = {}
  207. # headers["Authorization"] = f"Bearer {key}"
  208. headers["Content-Type"] = "application/json"
  209. r = None
  210. try:
  211. r = requests.request(
  212. method=request.method,
  213. url=target_url,
  214. data=body,
  215. headers=headers,
  216. stream=True,
  217. )
  218. r.raise_for_status()
  219. # Check if response is SSE
  220. if "text/event-stream" in r.headers.get("Content-Type", ""):
  221. return StreamingResponse(
  222. r.iter_content(chunk_size=8192),
  223. status_code=r.status_code,
  224. headers=dict(r.headers),
  225. )
  226. else:
  227. response_data = r.json()
  228. return response_data
  229. except Exception as e:
  230. log.exception(e)
  231. error_detail = "Open WebUI: Server Connection Error"
  232. if r is not None:
  233. try:
  234. res = r.json()
  235. if "error" in res:
  236. error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
  237. except:
  238. error_detail = f"External: {e}"
  239. raise HTTPException(
  240. status_code=r.status_code if r else 500, detail=error_detail
  241. )