main.py 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336
  1. from fastapi import (
  2. FastAPI,
  3. Request,
  4. Response,
  5. HTTPException,
  6. Depends,
  7. status,
  8. UploadFile,
  9. File,
  10. BackgroundTasks,
  11. )
  12. from fastapi.middleware.cors import CORSMiddleware
  13. from fastapi.responses import StreamingResponse
  14. from fastapi.concurrency import run_in_threadpool
  15. from pydantic import BaseModel, ConfigDict
  16. import os
  17. import re
  18. import copy
  19. import random
  20. import requests
  21. import json
  22. import uuid
  23. import aiohttp
  24. import asyncio
  25. import logging
  26. from urllib.parse import urlparse
  27. from typing import Optional, List, Union
  28. from apps.web.models.users import Users
  29. from constants import ERROR_MESSAGES
  30. from utils.utils import decode_token, get_current_user, get_admin_user
  31. from config import (
  32. SRC_LOG_LEVELS,
  33. OLLAMA_BASE_URLS,
  34. ENABLE_MODEL_FILTER,
  35. MODEL_FILTER_LIST,
  36. UPLOAD_DIR,
  37. )
  38. from utils.misc import calculate_sha256
  39. log = logging.getLogger(__name__)
  40. log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
  41. app = FastAPI()
  42. app.add_middleware(
  43. CORSMiddleware,
  44. allow_origins=["*"],
  45. allow_credentials=True,
  46. allow_methods=["*"],
  47. allow_headers=["*"],
  48. )
  49. app.state.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  50. app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  51. app.state.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
  52. app.state.MODELS = {}
  53. REQUEST_POOL = []
  54. # TODO: Implement a more intelligent load balancing mechanism for distributing requests among multiple backend instances.
  55. # Current implementation uses a simple round-robin approach (random.choice). Consider incorporating algorithms like weighted round-robin,
  56. # least connections, or least response time for better resource utilization and performance optimization.
  57. @app.middleware("http")
  58. async def check_url(request: Request, call_next):
  59. if len(app.state.MODELS) == 0:
  60. await get_all_models()
  61. else:
  62. pass
  63. response = await call_next(request)
  64. return response
  65. @app.head("/")
  66. @app.get("/")
  67. async def get_status():
  68. return {"status": True}
  69. @app.get("/urls")
  70. async def get_ollama_api_urls(user=Depends(get_admin_user)):
  71. return {"OLLAMA_BASE_URLS": app.state.OLLAMA_BASE_URLS}
  72. class UrlUpdateForm(BaseModel):
  73. urls: List[str]
  74. @app.post("/urls/update")
  75. async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin_user)):
  76. app.state.OLLAMA_BASE_URLS = form_data.urls
  77. log.info(f"app.state.OLLAMA_BASE_URLS: {app.state.OLLAMA_BASE_URLS}")
  78. return {"OLLAMA_BASE_URLS": app.state.OLLAMA_BASE_URLS}
  79. @app.get("/cancel/{request_id}")
  80. async def cancel_ollama_request(request_id: str, user=Depends(get_current_user)):
  81. if user:
  82. if request_id in REQUEST_POOL:
  83. REQUEST_POOL.remove(request_id)
  84. return True
  85. else:
  86. raise HTTPException(status_code=401, detail=ERROR_MESSAGES.ACCESS_PROHIBITED)
  87. async def fetch_url(url):
  88. try:
  89. async with aiohttp.ClientSession() as session:
  90. async with session.get(url) as response:
  91. return await response.json()
  92. except Exception as e:
  93. # Handle connection error here
  94. log.error(f"Connection error: {e}")
  95. return None
  96. def merge_models_lists(model_lists):
  97. merged_models = {}
  98. for idx, model_list in enumerate(model_lists):
  99. if model_list is not None:
  100. for model in model_list:
  101. digest = model["digest"]
  102. if digest not in merged_models:
  103. model["urls"] = [idx]
  104. merged_models[digest] = model
  105. else:
  106. merged_models[digest]["urls"].append(idx)
  107. return list(merged_models.values())
  108. # user=Depends(get_current_user)
  109. async def get_all_models():
  110. log.info("get_all_models()")
  111. tasks = [fetch_url(f"{url}/api/tags") for url in app.state.OLLAMA_BASE_URLS]
  112. responses = await asyncio.gather(*tasks)
  113. models = {
  114. "models": merge_models_lists(
  115. map(lambda response: response["models"] if response else None, responses)
  116. )
  117. }
  118. app.state.MODELS = {model["model"]: model for model in models["models"]}
  119. return models
  120. @app.get("/api/tags")
  121. @app.get("/api/tags/{url_idx}")
  122. async def get_ollama_tags(
  123. url_idx: Optional[int] = None, user=Depends(get_current_user)
  124. ):
  125. if url_idx == None:
  126. models = await get_all_models()
  127. if app.state.ENABLE_MODEL_FILTER:
  128. if user.role == "user":
  129. models["models"] = list(
  130. filter(
  131. lambda model: model["name"] in app.state.MODEL_FILTER_LIST,
  132. models["models"],
  133. )
  134. )
  135. return models
  136. return models
  137. else:
  138. url = app.state.OLLAMA_BASE_URLS[url_idx]
  139. try:
  140. r = requests.request(method="GET", url=f"{url}/api/tags")
  141. r.raise_for_status()
  142. return r.json()
  143. except Exception as e:
  144. log.exception(e)
  145. error_detail = "Open WebUI: Server Connection Error"
  146. if r is not None:
  147. try:
  148. res = r.json()
  149. if "error" in res:
  150. error_detail = f"Ollama: {res['error']}"
  151. except:
  152. error_detail = f"Ollama: {e}"
  153. raise HTTPException(
  154. status_code=r.status_code if r else 500,
  155. detail=error_detail,
  156. )
  157. @app.get("/api/version")
  158. @app.get("/api/version/{url_idx}")
  159. async def get_ollama_versions(url_idx: Optional[int] = None):
  160. if url_idx == None:
  161. # returns lowest version
  162. tasks = [fetch_url(f"{url}/api/version") for url in app.state.OLLAMA_BASE_URLS]
  163. responses = await asyncio.gather(*tasks)
  164. responses = list(filter(lambda x: x is not None, responses))
  165. if len(responses) > 0:
  166. lowest_version = min(
  167. responses,
  168. key=lambda x: tuple(
  169. map(int, re.sub(r"^v|-.*", "", x["version"]).split("."))
  170. ),
  171. )
  172. return {"version": lowest_version["version"]}
  173. else:
  174. raise HTTPException(
  175. status_code=500,
  176. detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND,
  177. )
  178. else:
  179. url = app.state.OLLAMA_BASE_URLS[url_idx]
  180. try:
  181. r = requests.request(method="GET", url=f"{url}/api/version")
  182. r.raise_for_status()
  183. return r.json()
  184. except Exception as e:
  185. log.exception(e)
  186. error_detail = "Open WebUI: Server Connection Error"
  187. if r is not None:
  188. try:
  189. res = r.json()
  190. if "error" in res:
  191. error_detail = f"Ollama: {res['error']}"
  192. except:
  193. error_detail = f"Ollama: {e}"
  194. raise HTTPException(
  195. status_code=r.status_code if r else 500,
  196. detail=error_detail,
  197. )
  198. class ModelNameForm(BaseModel):
  199. name: str
  200. @app.post("/api/pull")
  201. @app.post("/api/pull/{url_idx}")
  202. async def pull_model(
  203. form_data: ModelNameForm, url_idx: int = 0, user=Depends(get_admin_user)
  204. ):
  205. url = app.state.OLLAMA_BASE_URLS[url_idx]
  206. log.info(f"url: {url}")
  207. r = None
  208. def get_request():
  209. nonlocal url
  210. nonlocal r
  211. request_id = str(uuid.uuid4())
  212. try:
  213. REQUEST_POOL.append(request_id)
  214. def stream_content():
  215. try:
  216. yield json.dumps({"id": request_id, "done": False}) + "\n"
  217. for chunk in r.iter_content(chunk_size=8192):
  218. if request_id in REQUEST_POOL:
  219. yield chunk
  220. else:
  221. log.warning("User: canceled request")
  222. break
  223. finally:
  224. if hasattr(r, "close"):
  225. r.close()
  226. if request_id in REQUEST_POOL:
  227. REQUEST_POOL.remove(request_id)
  228. r = requests.request(
  229. method="POST",
  230. url=f"{url}/api/pull",
  231. data=form_data.model_dump_json(exclude_none=True).encode(),
  232. stream=True,
  233. )
  234. r.raise_for_status()
  235. return StreamingResponse(
  236. stream_content(),
  237. status_code=r.status_code,
  238. headers=dict(r.headers),
  239. )
  240. except Exception as e:
  241. raise e
  242. try:
  243. return await run_in_threadpool(get_request)
  244. except Exception as e:
  245. log.exception(e)
  246. error_detail = "Open WebUI: Server Connection Error"
  247. if r is not None:
  248. try:
  249. res = r.json()
  250. if "error" in res:
  251. error_detail = f"Ollama: {res['error']}"
  252. except:
  253. error_detail = f"Ollama: {e}"
  254. raise HTTPException(
  255. status_code=r.status_code if r else 500,
  256. detail=error_detail,
  257. )
  258. class PushModelForm(BaseModel):
  259. name: str
  260. insecure: Optional[bool] = None
  261. stream: Optional[bool] = None
  262. @app.delete("/api/push")
  263. @app.delete("/api/push/{url_idx}")
  264. async def push_model(
  265. form_data: PushModelForm,
  266. url_idx: Optional[int] = None,
  267. user=Depends(get_admin_user),
  268. ):
  269. if url_idx == None:
  270. if form_data.name in app.state.MODELS:
  271. url_idx = app.state.MODELS[form_data.name]["urls"][0]
  272. else:
  273. raise HTTPException(
  274. status_code=400,
  275. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  276. )
  277. url = app.state.OLLAMA_BASE_URLS[url_idx]
  278. log.debug(f"url: {url}")
  279. r = None
  280. def get_request():
  281. nonlocal url
  282. nonlocal r
  283. try:
  284. def stream_content():
  285. for chunk in r.iter_content(chunk_size=8192):
  286. yield chunk
  287. r = requests.request(
  288. method="POST",
  289. url=f"{url}/api/push",
  290. data=form_data.model_dump_json(exclude_none=True).encode(),
  291. )
  292. r.raise_for_status()
  293. return StreamingResponse(
  294. stream_content(),
  295. status_code=r.status_code,
  296. headers=dict(r.headers),
  297. )
  298. except Exception as e:
  299. raise e
  300. try:
  301. return await run_in_threadpool(get_request)
  302. except Exception as e:
  303. log.exception(e)
  304. error_detail = "Open WebUI: Server Connection Error"
  305. if r is not None:
  306. try:
  307. res = r.json()
  308. if "error" in res:
  309. error_detail = f"Ollama: {res['error']}"
  310. except:
  311. error_detail = f"Ollama: {e}"
  312. raise HTTPException(
  313. status_code=r.status_code if r else 500,
  314. detail=error_detail,
  315. )
  316. class CreateModelForm(BaseModel):
  317. name: str
  318. modelfile: Optional[str] = None
  319. stream: Optional[bool] = None
  320. path: Optional[str] = None
  321. @app.post("/api/create")
  322. @app.post("/api/create/{url_idx}")
  323. async def create_model(
  324. form_data: CreateModelForm, url_idx: int = 0, user=Depends(get_admin_user)
  325. ):
  326. log.debug(f"form_data: {form_data}")
  327. url = app.state.OLLAMA_BASE_URLS[url_idx]
  328. log.info(f"url: {url}")
  329. r = None
  330. def get_request():
  331. nonlocal url
  332. nonlocal r
  333. try:
  334. def stream_content():
  335. for chunk in r.iter_content(chunk_size=8192):
  336. yield chunk
  337. r = requests.request(
  338. method="POST",
  339. url=f"{url}/api/create",
  340. data=form_data.model_dump_json(exclude_none=True).encode(),
  341. stream=True,
  342. )
  343. r.raise_for_status()
  344. log.debug(f"r: {r}")
  345. return StreamingResponse(
  346. stream_content(),
  347. status_code=r.status_code,
  348. headers=dict(r.headers),
  349. )
  350. except Exception as e:
  351. raise e
  352. try:
  353. return await run_in_threadpool(get_request)
  354. except Exception as e:
  355. log.exception(e)
  356. error_detail = "Open WebUI: Server Connection Error"
  357. if r is not None:
  358. try:
  359. res = r.json()
  360. if "error" in res:
  361. error_detail = f"Ollama: {res['error']}"
  362. except:
  363. error_detail = f"Ollama: {e}"
  364. raise HTTPException(
  365. status_code=r.status_code if r else 500,
  366. detail=error_detail,
  367. )
  368. class CopyModelForm(BaseModel):
  369. source: str
  370. destination: str
  371. @app.post("/api/copy")
  372. @app.post("/api/copy/{url_idx}")
  373. async def copy_model(
  374. form_data: CopyModelForm,
  375. url_idx: Optional[int] = None,
  376. user=Depends(get_admin_user),
  377. ):
  378. if url_idx == None:
  379. if form_data.source in app.state.MODELS:
  380. url_idx = app.state.MODELS[form_data.source]["urls"][0]
  381. else:
  382. raise HTTPException(
  383. status_code=400,
  384. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.source),
  385. )
  386. url = app.state.OLLAMA_BASE_URLS[url_idx]
  387. log.info(f"url: {url}")
  388. try:
  389. r = requests.request(
  390. method="POST",
  391. url=f"{url}/api/copy",
  392. data=form_data.model_dump_json(exclude_none=True).encode(),
  393. )
  394. r.raise_for_status()
  395. log.debug(f"r.text: {r.text}")
  396. return True
  397. except Exception as e:
  398. log.exception(e)
  399. error_detail = "Open WebUI: Server Connection Error"
  400. if r is not None:
  401. try:
  402. res = r.json()
  403. if "error" in res:
  404. error_detail = f"Ollama: {res['error']}"
  405. except:
  406. error_detail = f"Ollama: {e}"
  407. raise HTTPException(
  408. status_code=r.status_code if r else 500,
  409. detail=error_detail,
  410. )
  411. @app.delete("/api/delete")
  412. @app.delete("/api/delete/{url_idx}")
  413. async def delete_model(
  414. form_data: ModelNameForm,
  415. url_idx: Optional[int] = None,
  416. user=Depends(get_admin_user),
  417. ):
  418. if url_idx == None:
  419. if form_data.name in app.state.MODELS:
  420. url_idx = app.state.MODELS[form_data.name]["urls"][0]
  421. else:
  422. raise HTTPException(
  423. status_code=400,
  424. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  425. )
  426. url = app.state.OLLAMA_BASE_URLS[url_idx]
  427. log.info(f"url: {url}")
  428. try:
  429. r = requests.request(
  430. method="DELETE",
  431. url=f"{url}/api/delete",
  432. data=form_data.model_dump_json(exclude_none=True).encode(),
  433. )
  434. r.raise_for_status()
  435. log.debug(f"r.text: {r.text}")
  436. return True
  437. except Exception as e:
  438. log.exception(e)
  439. error_detail = "Open WebUI: Server Connection Error"
  440. if r is not None:
  441. try:
  442. res = r.json()
  443. if "error" in res:
  444. error_detail = f"Ollama: {res['error']}"
  445. except:
  446. error_detail = f"Ollama: {e}"
  447. raise HTTPException(
  448. status_code=r.status_code if r else 500,
  449. detail=error_detail,
  450. )
  451. @app.post("/api/show")
  452. async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_user)):
  453. if form_data.name not in app.state.MODELS:
  454. raise HTTPException(
  455. status_code=400,
  456. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  457. )
  458. url_idx = random.choice(app.state.MODELS[form_data.name]["urls"])
  459. url = app.state.OLLAMA_BASE_URLS[url_idx]
  460. log.info(f"url: {url}")
  461. try:
  462. r = requests.request(
  463. method="POST",
  464. url=f"{url}/api/show",
  465. data=form_data.model_dump_json(exclude_none=True).encode(),
  466. )
  467. r.raise_for_status()
  468. return r.json()
  469. except Exception as e:
  470. log.exception(e)
  471. error_detail = "Open WebUI: Server Connection Error"
  472. if r is not None:
  473. try:
  474. res = r.json()
  475. if "error" in res:
  476. error_detail = f"Ollama: {res['error']}"
  477. except:
  478. error_detail = f"Ollama: {e}"
  479. raise HTTPException(
  480. status_code=r.status_code if r else 500,
  481. detail=error_detail,
  482. )
  483. class GenerateEmbeddingsForm(BaseModel):
  484. model: str
  485. prompt: str
  486. options: Optional[dict] = None
  487. keep_alive: Optional[Union[int, str]] = None
  488. @app.post("/api/embeddings")
  489. @app.post("/api/embeddings/{url_idx}")
  490. async def generate_embeddings(
  491. form_data: GenerateEmbeddingsForm,
  492. url_idx: Optional[int] = None,
  493. user=Depends(get_current_user),
  494. ):
  495. if url_idx == None:
  496. model = form_data.model
  497. if ":" not in model:
  498. model = f"{model}:latest"
  499. if model in app.state.MODELS:
  500. url_idx = random.choice(app.state.MODELS[model]["urls"])
  501. else:
  502. raise HTTPException(
  503. status_code=400,
  504. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  505. )
  506. url = app.state.OLLAMA_BASE_URLS[url_idx]
  507. log.info(f"url: {url}")
  508. try:
  509. r = requests.request(
  510. method="POST",
  511. url=f"{url}/api/embeddings",
  512. data=form_data.model_dump_json(exclude_none=True).encode(),
  513. )
  514. r.raise_for_status()
  515. return r.json()
  516. except Exception as e:
  517. log.exception(e)
  518. error_detail = "Open WebUI: Server Connection Error"
  519. if r is not None:
  520. try:
  521. res = r.json()
  522. if "error" in res:
  523. error_detail = f"Ollama: {res['error']}"
  524. except:
  525. error_detail = f"Ollama: {e}"
  526. raise HTTPException(
  527. status_code=r.status_code if r else 500,
  528. detail=error_detail,
  529. )
  530. def generate_ollama_embeddings(
  531. form_data: GenerateEmbeddingsForm,
  532. url_idx: Optional[int] = None,
  533. ):
  534. log.info(f"generate_ollama_embeddings {form_data}")
  535. if url_idx == None:
  536. model = form_data.model
  537. if ":" not in model:
  538. model = f"{model}:latest"
  539. if model in app.state.MODELS:
  540. url_idx = random.choice(app.state.MODELS[model]["urls"])
  541. else:
  542. raise HTTPException(
  543. status_code=400,
  544. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  545. )
  546. url = app.state.OLLAMA_BASE_URLS[url_idx]
  547. log.info(f"url: {url}")
  548. try:
  549. r = requests.request(
  550. method="POST",
  551. url=f"{url}/api/embeddings",
  552. data=form_data.model_dump_json(exclude_none=True).encode(),
  553. )
  554. r.raise_for_status()
  555. data = r.json()
  556. log.info(f"generate_ollama_embeddings {data}")
  557. if "embedding" in data:
  558. return data["embedding"]
  559. else:
  560. raise "Something went wrong :/"
  561. except Exception as e:
  562. log.exception(e)
  563. error_detail = "Open WebUI: Server Connection Error"
  564. if r is not None:
  565. try:
  566. res = r.json()
  567. if "error" in res:
  568. error_detail = f"Ollama: {res['error']}"
  569. except:
  570. error_detail = f"Ollama: {e}"
  571. raise error_detail
  572. class GenerateCompletionForm(BaseModel):
  573. model: str
  574. prompt: str
  575. images: Optional[List[str]] = None
  576. format: Optional[str] = None
  577. options: Optional[dict] = None
  578. system: Optional[str] = None
  579. template: Optional[str] = None
  580. context: Optional[str] = None
  581. stream: Optional[bool] = True
  582. raw: Optional[bool] = None
  583. keep_alive: Optional[Union[int, str]] = None
  584. @app.post("/api/generate")
  585. @app.post("/api/generate/{url_idx}")
  586. async def generate_completion(
  587. form_data: GenerateCompletionForm,
  588. url_idx: Optional[int] = None,
  589. user=Depends(get_current_user),
  590. ):
  591. if url_idx == None:
  592. model = form_data.model
  593. if ":" not in model:
  594. model = f"{model}:latest"
  595. if model in app.state.MODELS:
  596. url_idx = random.choice(app.state.MODELS[model]["urls"])
  597. else:
  598. raise HTTPException(
  599. status_code=400,
  600. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  601. )
  602. url = app.state.OLLAMA_BASE_URLS[url_idx]
  603. log.info(f"url: {url}")
  604. r = None
  605. def get_request():
  606. nonlocal form_data
  607. nonlocal r
  608. request_id = str(uuid.uuid4())
  609. try:
  610. REQUEST_POOL.append(request_id)
  611. def stream_content():
  612. try:
  613. if form_data.stream:
  614. yield json.dumps({"id": request_id, "done": False}) + "\n"
  615. for chunk in r.iter_content(chunk_size=8192):
  616. if request_id in REQUEST_POOL:
  617. yield chunk
  618. else:
  619. log.warning("User: canceled request")
  620. break
  621. finally:
  622. if hasattr(r, "close"):
  623. r.close()
  624. if request_id in REQUEST_POOL:
  625. REQUEST_POOL.remove(request_id)
  626. r = requests.request(
  627. method="POST",
  628. url=f"{url}/api/generate",
  629. data=form_data.model_dump_json(exclude_none=True).encode(),
  630. stream=True,
  631. )
  632. r.raise_for_status()
  633. return StreamingResponse(
  634. stream_content(),
  635. status_code=r.status_code,
  636. headers=dict(r.headers),
  637. )
  638. except Exception as e:
  639. raise e
  640. try:
  641. return await run_in_threadpool(get_request)
  642. except Exception as e:
  643. error_detail = "Open WebUI: Server Connection Error"
  644. if r is not None:
  645. try:
  646. res = r.json()
  647. if "error" in res:
  648. error_detail = f"Ollama: {res['error']}"
  649. except:
  650. error_detail = f"Ollama: {e}"
  651. raise HTTPException(
  652. status_code=r.status_code if r else 500,
  653. detail=error_detail,
  654. )
  655. class ChatMessage(BaseModel):
  656. role: str
  657. content: str
  658. images: Optional[List[str]] = None
  659. class GenerateChatCompletionForm(BaseModel):
  660. model: str
  661. messages: List[ChatMessage]
  662. format: Optional[str] = None
  663. options: Optional[dict] = None
  664. template: Optional[str] = None
  665. stream: Optional[bool] = None
  666. keep_alive: Optional[Union[int, str]] = None
  667. @app.post("/api/chat")
  668. @app.post("/api/chat/{url_idx}")
  669. async def generate_chat_completion(
  670. form_data: GenerateChatCompletionForm,
  671. url_idx: Optional[int] = None,
  672. user=Depends(get_current_user),
  673. ):
  674. if url_idx == None:
  675. model = form_data.model
  676. if ":" not in model:
  677. model = f"{model}:latest"
  678. if model in app.state.MODELS:
  679. url_idx = random.choice(app.state.MODELS[model]["urls"])
  680. else:
  681. raise HTTPException(
  682. status_code=400,
  683. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  684. )
  685. url = app.state.OLLAMA_BASE_URLS[url_idx]
  686. log.info(f"url: {url}")
  687. r = None
  688. log.debug(
  689. "form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
  690. form_data.model_dump_json(exclude_none=True).encode()
  691. )
  692. )
  693. def get_request():
  694. nonlocal form_data
  695. nonlocal r
  696. request_id = str(uuid.uuid4())
  697. try:
  698. REQUEST_POOL.append(request_id)
  699. def stream_content():
  700. try:
  701. if form_data.stream:
  702. yield json.dumps({"id": request_id, "done": False}) + "\n"
  703. for chunk in r.iter_content(chunk_size=8192):
  704. if request_id in REQUEST_POOL:
  705. yield chunk
  706. else:
  707. log.warning("User: canceled request")
  708. break
  709. finally:
  710. if hasattr(r, "close"):
  711. r.close()
  712. if request_id in REQUEST_POOL:
  713. REQUEST_POOL.remove(request_id)
  714. r = requests.request(
  715. method="POST",
  716. url=f"{url}/api/chat",
  717. data=form_data.model_dump_json(exclude_none=True).encode(),
  718. stream=True,
  719. )
  720. r.raise_for_status()
  721. return StreamingResponse(
  722. stream_content(),
  723. status_code=r.status_code,
  724. headers=dict(r.headers),
  725. )
  726. except Exception as e:
  727. log.exception(e)
  728. raise e
  729. try:
  730. return await run_in_threadpool(get_request)
  731. except Exception as e:
  732. error_detail = "Open WebUI: Server Connection Error"
  733. if r is not None:
  734. try:
  735. res = r.json()
  736. if "error" in res:
  737. error_detail = f"Ollama: {res['error']}"
  738. except:
  739. error_detail = f"Ollama: {e}"
  740. raise HTTPException(
  741. status_code=r.status_code if r else 500,
  742. detail=error_detail,
  743. )
  744. # TODO: we should update this part once Ollama supports other types
  745. class OpenAIChatMessage(BaseModel):
  746. role: str
  747. content: str
  748. model_config = ConfigDict(extra="allow")
  749. class OpenAIChatCompletionForm(BaseModel):
  750. model: str
  751. messages: List[OpenAIChatMessage]
  752. model_config = ConfigDict(extra="allow")
  753. @app.post("/v1/chat/completions")
  754. @app.post("/v1/chat/completions/{url_idx}")
  755. async def generate_openai_chat_completion(
  756. form_data: OpenAIChatCompletionForm,
  757. url_idx: Optional[int] = None,
  758. user=Depends(get_current_user),
  759. ):
  760. if url_idx == None:
  761. model = form_data.model
  762. if ":" not in model:
  763. model = f"{model}:latest"
  764. if model in app.state.MODELS:
  765. url_idx = random.choice(app.state.MODELS[model]["urls"])
  766. else:
  767. raise HTTPException(
  768. status_code=400,
  769. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  770. )
  771. url = app.state.OLLAMA_BASE_URLS[url_idx]
  772. log.info(f"url: {url}")
  773. r = None
  774. def get_request():
  775. nonlocal form_data
  776. nonlocal r
  777. request_id = str(uuid.uuid4())
  778. try:
  779. REQUEST_POOL.append(request_id)
  780. def stream_content():
  781. try:
  782. if form_data.stream:
  783. yield json.dumps(
  784. {"request_id": request_id, "done": False}
  785. ) + "\n"
  786. for chunk in r.iter_content(chunk_size=8192):
  787. if request_id in REQUEST_POOL:
  788. yield chunk
  789. else:
  790. log.warning("User: canceled request")
  791. break
  792. finally:
  793. if hasattr(r, "close"):
  794. r.close()
  795. if request_id in REQUEST_POOL:
  796. REQUEST_POOL.remove(request_id)
  797. r = requests.request(
  798. method="POST",
  799. url=f"{url}/v1/chat/completions",
  800. data=form_data.model_dump_json(exclude_none=True).encode(),
  801. stream=True,
  802. )
  803. r.raise_for_status()
  804. return StreamingResponse(
  805. stream_content(),
  806. status_code=r.status_code,
  807. headers=dict(r.headers),
  808. )
  809. except Exception as e:
  810. raise e
  811. try:
  812. return await run_in_threadpool(get_request)
  813. except Exception as e:
  814. error_detail = "Open WebUI: Server Connection Error"
  815. if r is not None:
  816. try:
  817. res = r.json()
  818. if "error" in res:
  819. error_detail = f"Ollama: {res['error']}"
  820. except:
  821. error_detail = f"Ollama: {e}"
  822. raise HTTPException(
  823. status_code=r.status_code if r else 500,
  824. detail=error_detail,
  825. )
  826. class UrlForm(BaseModel):
  827. url: str
  828. class UploadBlobForm(BaseModel):
  829. filename: str
  830. def parse_huggingface_url(hf_url):
  831. try:
  832. # Parse the URL
  833. parsed_url = urlparse(hf_url)
  834. # Get the path and split it into components
  835. path_components = parsed_url.path.split("/")
  836. # Extract the desired output
  837. user_repo = "/".join(path_components[1:3])
  838. model_file = path_components[-1]
  839. return model_file
  840. except ValueError:
  841. return None
  842. async def download_file_stream(
  843. ollama_url, file_url, file_path, file_name, chunk_size=1024 * 1024
  844. ):
  845. done = False
  846. if os.path.exists(file_path):
  847. current_size = os.path.getsize(file_path)
  848. else:
  849. current_size = 0
  850. headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {}
  851. timeout = aiohttp.ClientTimeout(total=600) # Set the timeout
  852. async with aiohttp.ClientSession(timeout=timeout) as session:
  853. async with session.get(file_url, headers=headers) as response:
  854. total_size = int(response.headers.get("content-length", 0)) + current_size
  855. with open(file_path, "ab+") as file:
  856. async for data in response.content.iter_chunked(chunk_size):
  857. current_size += len(data)
  858. file.write(data)
  859. done = current_size == total_size
  860. progress = round((current_size / total_size) * 100, 2)
  861. yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
  862. if done:
  863. file.seek(0)
  864. hashed = calculate_sha256(file)
  865. file.seek(0)
  866. url = f"{ollama_url}/api/blobs/sha256:{hashed}"
  867. response = requests.post(url, data=file)
  868. if response.ok:
  869. res = {
  870. "done": done,
  871. "blob": f"sha256:{hashed}",
  872. "name": file_name,
  873. }
  874. os.remove(file_path)
  875. yield f"data: {json.dumps(res)}\n\n"
  876. else:
  877. raise "Ollama: Could not create blob, Please try again."
  878. # def number_generator():
  879. # for i in range(1, 101):
  880. # yield f"data: {i}\n"
  881. # url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
  882. @app.post("/models/download")
  883. @app.post("/models/download/{url_idx}")
  884. async def download_model(
  885. form_data: UrlForm,
  886. url_idx: Optional[int] = None,
  887. ):
  888. allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
  889. if not any(form_data.url.startswith(host) for host in allowed_hosts):
  890. raise HTTPException(
  891. status_code=400,
  892. detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
  893. )
  894. if url_idx == None:
  895. url_idx = 0
  896. url = app.state.OLLAMA_BASE_URLS[url_idx]
  897. file_name = parse_huggingface_url(form_data.url)
  898. if file_name:
  899. file_path = f"{UPLOAD_DIR}/{file_name}"
  900. return StreamingResponse(
  901. download_file_stream(url, form_data.url, file_path, file_name),
  902. )
  903. else:
  904. return None
  905. @app.post("/models/upload")
  906. @app.post("/models/upload/{url_idx}")
  907. def upload_model(file: UploadFile = File(...), url_idx: Optional[int] = None):
  908. if url_idx == None:
  909. url_idx = 0
  910. ollama_url = app.state.OLLAMA_BASE_URLS[url_idx]
  911. file_path = f"{UPLOAD_DIR}/{file.filename}"
  912. # Save file in chunks
  913. with open(file_path, "wb+") as f:
  914. for chunk in file.file:
  915. f.write(chunk)
  916. def file_process_stream():
  917. nonlocal ollama_url
  918. total_size = os.path.getsize(file_path)
  919. chunk_size = 1024 * 1024
  920. try:
  921. with open(file_path, "rb") as f:
  922. total = 0
  923. done = False
  924. while not done:
  925. chunk = f.read(chunk_size)
  926. if not chunk:
  927. done = True
  928. continue
  929. total += len(chunk)
  930. progress = round((total / total_size) * 100, 2)
  931. res = {
  932. "progress": progress,
  933. "total": total_size,
  934. "completed": total,
  935. }
  936. yield f"data: {json.dumps(res)}\n\n"
  937. if done:
  938. f.seek(0)
  939. hashed = calculate_sha256(f)
  940. f.seek(0)
  941. url = f"{ollama_url}/api/blobs/sha256:{hashed}"
  942. response = requests.post(url, data=f)
  943. if response.ok:
  944. res = {
  945. "done": done,
  946. "blob": f"sha256:{hashed}",
  947. "name": file.filename,
  948. }
  949. os.remove(file_path)
  950. yield f"data: {json.dumps(res)}\n\n"
  951. else:
  952. raise Exception(
  953. "Ollama: Could not create blob, Please try again."
  954. )
  955. except Exception as e:
  956. res = {"error": str(e)}
  957. yield f"data: {json.dumps(res)}\n\n"
  958. return StreamingResponse(file_process_stream(), media_type="text/event-stream")
  959. # async def upload_model(file: UploadFile = File(), url_idx: Optional[int] = None):
  960. # if url_idx == None:
  961. # url_idx = 0
  962. # url = app.state.OLLAMA_BASE_URLS[url_idx]
  963. # file_location = os.path.join(UPLOAD_DIR, file.filename)
  964. # total_size = file.size
  965. # async def file_upload_generator(file):
  966. # print(file)
  967. # try:
  968. # async with aiofiles.open(file_location, "wb") as f:
  969. # completed_size = 0
  970. # while True:
  971. # chunk = await file.read(1024*1024)
  972. # if not chunk:
  973. # break
  974. # await f.write(chunk)
  975. # completed_size += len(chunk)
  976. # progress = (completed_size / total_size) * 100
  977. # print(progress)
  978. # yield f'data: {json.dumps({"status": "uploading", "percentage": progress, "total": total_size, "completed": completed_size, "done": False})}\n'
  979. # except Exception as e:
  980. # print(e)
  981. # yield f"data: {json.dumps({'status': 'error', 'message': str(e)})}\n"
  982. # finally:
  983. # await file.close()
  984. # print("done")
  985. # yield f'data: {json.dumps({"status": "completed", "percentage": 100, "total": total_size, "completed": completed_size, "done": True})}\n'
  986. # return StreamingResponse(
  987. # file_upload_generator(copy.deepcopy(file)), media_type="text/event-stream"
  988. # )
  989. @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
  990. async def deprecated_proxy(path: str, request: Request, user=Depends(get_current_user)):
  991. url = app.state.OLLAMA_BASE_URLS[0]
  992. target_url = f"{url}/{path}"
  993. body = await request.body()
  994. headers = dict(request.headers)
  995. if user.role in ["user", "admin"]:
  996. if path in ["pull", "delete", "push", "copy", "create"]:
  997. if user.role != "admin":
  998. raise HTTPException(
  999. status_code=status.HTTP_401_UNAUTHORIZED,
  1000. detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
  1001. )
  1002. else:
  1003. raise HTTPException(
  1004. status_code=status.HTTP_401_UNAUTHORIZED,
  1005. detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
  1006. )
  1007. headers.pop("host", None)
  1008. headers.pop("authorization", None)
  1009. headers.pop("origin", None)
  1010. headers.pop("referer", None)
  1011. r = None
  1012. def get_request():
  1013. nonlocal r
  1014. request_id = str(uuid.uuid4())
  1015. try:
  1016. REQUEST_POOL.append(request_id)
  1017. def stream_content():
  1018. try:
  1019. if path == "generate":
  1020. data = json.loads(body.decode("utf-8"))
  1021. if not ("stream" in data and data["stream"] == False):
  1022. yield json.dumps({"id": request_id, "done": False}) + "\n"
  1023. elif path == "chat":
  1024. yield json.dumps({"id": request_id, "done": False}) + "\n"
  1025. for chunk in r.iter_content(chunk_size=8192):
  1026. if request_id in REQUEST_POOL:
  1027. yield chunk
  1028. else:
  1029. log.warning("User: canceled request")
  1030. break
  1031. finally:
  1032. if hasattr(r, "close"):
  1033. r.close()
  1034. if request_id in REQUEST_POOL:
  1035. REQUEST_POOL.remove(request_id)
  1036. r = requests.request(
  1037. method=request.method,
  1038. url=target_url,
  1039. data=body,
  1040. headers=headers,
  1041. stream=True,
  1042. )
  1043. r.raise_for_status()
  1044. # r.close()
  1045. return StreamingResponse(
  1046. stream_content(),
  1047. status_code=r.status_code,
  1048. headers=dict(r.headers),
  1049. )
  1050. except Exception as e:
  1051. raise e
  1052. try:
  1053. return await run_in_threadpool(get_request)
  1054. except Exception as e:
  1055. error_detail = "Open WebUI: Server Connection Error"
  1056. if r is not None:
  1057. try:
  1058. res = r.json()
  1059. if "error" in res:
  1060. error_detail = f"Ollama: {res['error']}"
  1061. except:
  1062. error_detail = f"Ollama: {e}"
  1063. raise HTTPException(
  1064. status_code=r.status_code if r else 500,
  1065. detail=error_detail,
  1066. )