main.py 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447
  1. from fastapi import (
  2. FastAPI,
  3. Request,
  4. Response,
  5. HTTPException,
  6. Depends,
  7. status,
  8. UploadFile,
  9. File,
  10. BackgroundTasks,
  11. )
  12. from fastapi.middleware.cors import CORSMiddleware
  13. from fastapi.responses import StreamingResponse
  14. from fastapi.concurrency import run_in_threadpool
  15. from pydantic import BaseModel, ConfigDict
  16. import os
  17. import re
  18. import copy
  19. import random
  20. import requests
  21. import json
  22. import uuid
  23. import aiohttp
  24. import asyncio
  25. import logging
  26. import time
  27. from urllib.parse import urlparse
  28. from typing import Optional, List, Union
  29. from apps.web.models.users import Users
  30. from constants import ERROR_MESSAGES
  31. from utils.utils import (
  32. decode_token,
  33. get_current_user,
  34. get_verified_user,
  35. get_admin_user,
  36. )
  37. from config import (
  38. SRC_LOG_LEVELS,
  39. OLLAMA_BASE_URLS,
  40. ENABLE_OLLAMA_API,
  41. ENABLE_MODEL_FILTER,
  42. MODEL_FILTER_LIST,
  43. UPLOAD_DIR,
  44. AppConfig,
  45. )
  46. from utils.misc import calculate_sha256
  47. log = logging.getLogger(__name__)
  48. log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
  49. app = FastAPI()
  50. app.add_middleware(
  51. CORSMiddleware,
  52. allow_origins=["*"],
  53. allow_credentials=True,
  54. allow_methods=["*"],
  55. allow_headers=["*"],
  56. )
  57. app.state.config = AppConfig()
  58. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  59. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  60. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  61. app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
  62. app.state.MODELS = {}
  63. REQUEST_POOL = []
  64. # TODO: Implement a more intelligent load balancing mechanism for distributing requests among multiple backend instances.
  65. # Current implementation uses a simple round-robin approach (random.choice). Consider incorporating algorithms like weighted round-robin,
  66. # least connections, or least response time for better resource utilization and performance optimization.
  67. @app.middleware("http")
  68. async def check_url(request: Request, call_next):
  69. if len(app.state.MODELS) == 0:
  70. await get_all_models()
  71. else:
  72. pass
  73. response = await call_next(request)
  74. return response
  75. @app.head("/")
  76. @app.get("/")
  77. async def get_status():
  78. return {"status": True}
  79. @app.get("/config")
  80. async def get_config(user=Depends(get_admin_user)):
  81. return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
  82. class OllamaConfigForm(BaseModel):
  83. enable_ollama_api: Optional[bool] = None
  84. @app.post("/config/update")
  85. async def update_config(form_data: OllamaConfigForm, user=Depends(get_admin_user)):
  86. app.state.config.ENABLE_OLLAMA_API = form_data.enable_ollama_api
  87. return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
  88. @app.get("/urls")
  89. async def get_ollama_api_urls(user=Depends(get_admin_user)):
  90. return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS}
  91. class UrlUpdateForm(BaseModel):
  92. urls: List[str]
  93. @app.post("/urls/update")
  94. async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin_user)):
  95. app.state.config.OLLAMA_BASE_URLS = form_data.urls
  96. log.info(f"app.state.config.OLLAMA_BASE_URLS: {app.state.config.OLLAMA_BASE_URLS}")
  97. return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS}
  98. @app.get("/cancel/{request_id}")
  99. async def cancel_ollama_request(request_id: str, user=Depends(get_current_user)):
  100. if user:
  101. if request_id in REQUEST_POOL:
  102. REQUEST_POOL.remove(request_id)
  103. return True
  104. else:
  105. raise HTTPException(status_code=401, detail=ERROR_MESSAGES.ACCESS_PROHIBITED)
  106. async def fetch_url(url):
  107. timeout = aiohttp.ClientTimeout(total=5)
  108. try:
  109. async with aiohttp.ClientSession(timeout=timeout) as session:
  110. async with session.get(url) as response:
  111. return await response.json()
  112. except Exception as e:
  113. # Handle connection error here
  114. log.error(f"Connection error: {e}")
  115. return None
  116. def merge_models_lists(model_lists):
  117. merged_models = {}
  118. for idx, model_list in enumerate(model_lists):
  119. if model_list is not None:
  120. for model in model_list:
  121. digest = model["digest"]
  122. if digest not in merged_models:
  123. model["urls"] = [idx]
  124. merged_models[digest] = model
  125. else:
  126. merged_models[digest]["urls"].append(idx)
  127. return list(merged_models.values())
  128. # user=Depends(get_current_user)
  129. async def get_all_models():
  130. log.info("get_all_models()")
  131. if app.state.config.ENABLE_OLLAMA_API:
  132. tasks = [
  133. fetch_url(f"{url}/api/tags") for url in app.state.config.OLLAMA_BASE_URLS
  134. ]
  135. responses = await asyncio.gather(*tasks)
  136. models = {
  137. "models": merge_models_lists(
  138. map(
  139. lambda response: response["models"] if response else None, responses
  140. )
  141. )
  142. }
  143. else:
  144. models = {"models": []}
  145. app.state.MODELS = {model["model"]: model for model in models["models"]}
  146. return models
  147. @app.get("/api/tags")
  148. @app.get("/api/tags/{url_idx}")
  149. async def get_ollama_tags(
  150. url_idx: Optional[int] = None, user=Depends(get_verified_user)
  151. ):
  152. if url_idx == None:
  153. models = await get_all_models()
  154. if app.state.config.ENABLE_MODEL_FILTER:
  155. if user.role == "user":
  156. models["models"] = list(
  157. filter(
  158. lambda model: model["name"]
  159. in app.state.config.MODEL_FILTER_LIST,
  160. models["models"],
  161. )
  162. )
  163. return models
  164. return models
  165. else:
  166. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  167. try:
  168. r = requests.request(method="GET", url=f"{url}/api/tags")
  169. r.raise_for_status()
  170. return r.json()
  171. except Exception as e:
  172. log.exception(e)
  173. error_detail = "Open WebUI: Server Connection Error"
  174. if r is not None:
  175. try:
  176. res = r.json()
  177. if "error" in res:
  178. error_detail = f"Ollama: {res['error']}"
  179. except:
  180. error_detail = f"Ollama: {e}"
  181. raise HTTPException(
  182. status_code=r.status_code if r else 500,
  183. detail=error_detail,
  184. )
  185. @app.get("/api/version")
  186. @app.get("/api/version/{url_idx}")
  187. async def get_ollama_versions(url_idx: Optional[int] = None):
  188. if url_idx == None:
  189. # returns lowest version
  190. tasks = [
  191. fetch_url(f"{url}/api/version") for url in app.state.config.OLLAMA_BASE_URLS
  192. ]
  193. responses = await asyncio.gather(*tasks)
  194. responses = list(filter(lambda x: x is not None, responses))
  195. if len(responses) > 0:
  196. lowest_version = min(
  197. responses,
  198. key=lambda x: tuple(
  199. map(int, re.sub(r"^v|-.*", "", x["version"]).split("."))
  200. ),
  201. )
  202. return {"version": lowest_version["version"]}
  203. else:
  204. raise HTTPException(
  205. status_code=500,
  206. detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND,
  207. )
  208. else:
  209. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  210. try:
  211. r = requests.request(method="GET", url=f"{url}/api/version")
  212. r.raise_for_status()
  213. return r.json()
  214. except Exception as e:
  215. log.exception(e)
  216. error_detail = "Open WebUI: Server Connection Error"
  217. if r is not None:
  218. try:
  219. res = r.json()
  220. if "error" in res:
  221. error_detail = f"Ollama: {res['error']}"
  222. except:
  223. error_detail = f"Ollama: {e}"
  224. raise HTTPException(
  225. status_code=r.status_code if r else 500,
  226. detail=error_detail,
  227. )
  228. class ModelNameForm(BaseModel):
  229. name: str
  230. @app.post("/api/pull")
  231. @app.post("/api/pull/{url_idx}")
  232. async def pull_model(
  233. form_data: ModelNameForm, url_idx: int = 0, user=Depends(get_admin_user)
  234. ):
  235. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  236. log.info(f"url: {url}")
  237. r = None
  238. def get_request():
  239. nonlocal url
  240. nonlocal r
  241. request_id = str(uuid.uuid4())
  242. try:
  243. REQUEST_POOL.append(request_id)
  244. def stream_content():
  245. try:
  246. yield json.dumps({"id": request_id, "done": False}) + "\n"
  247. for chunk in r.iter_content(chunk_size=8192):
  248. if request_id in REQUEST_POOL:
  249. yield chunk
  250. else:
  251. log.warning("User: canceled request")
  252. break
  253. finally:
  254. if hasattr(r, "close"):
  255. r.close()
  256. if request_id in REQUEST_POOL:
  257. REQUEST_POOL.remove(request_id)
  258. r = requests.request(
  259. method="POST",
  260. url=f"{url}/api/pull",
  261. data=form_data.model_dump_json(exclude_none=True).encode(),
  262. stream=True,
  263. )
  264. r.raise_for_status()
  265. return StreamingResponse(
  266. stream_content(),
  267. status_code=r.status_code,
  268. headers=dict(r.headers),
  269. )
  270. except Exception as e:
  271. raise e
  272. try:
  273. return await run_in_threadpool(get_request)
  274. except Exception as e:
  275. log.exception(e)
  276. error_detail = "Open WebUI: Server Connection Error"
  277. if r is not None:
  278. try:
  279. res = r.json()
  280. if "error" in res:
  281. error_detail = f"Ollama: {res['error']}"
  282. except:
  283. error_detail = f"Ollama: {e}"
  284. raise HTTPException(
  285. status_code=r.status_code if r else 500,
  286. detail=error_detail,
  287. )
  288. class PushModelForm(BaseModel):
  289. name: str
  290. insecure: Optional[bool] = None
  291. stream: Optional[bool] = None
  292. @app.delete("/api/push")
  293. @app.delete("/api/push/{url_idx}")
  294. async def push_model(
  295. form_data: PushModelForm,
  296. url_idx: Optional[int] = None,
  297. user=Depends(get_admin_user),
  298. ):
  299. if url_idx == None:
  300. if form_data.name in app.state.MODELS:
  301. url_idx = app.state.MODELS[form_data.name]["urls"][0]
  302. else:
  303. raise HTTPException(
  304. status_code=400,
  305. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  306. )
  307. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  308. log.debug(f"url: {url}")
  309. r = None
  310. def get_request():
  311. nonlocal url
  312. nonlocal r
  313. try:
  314. def stream_content():
  315. for chunk in r.iter_content(chunk_size=8192):
  316. yield chunk
  317. r = requests.request(
  318. method="POST",
  319. url=f"{url}/api/push",
  320. data=form_data.model_dump_json(exclude_none=True).encode(),
  321. )
  322. r.raise_for_status()
  323. return StreamingResponse(
  324. stream_content(),
  325. status_code=r.status_code,
  326. headers=dict(r.headers),
  327. )
  328. except Exception as e:
  329. raise e
  330. try:
  331. return await run_in_threadpool(get_request)
  332. except Exception as e:
  333. log.exception(e)
  334. error_detail = "Open WebUI: Server Connection Error"
  335. if r is not None:
  336. try:
  337. res = r.json()
  338. if "error" in res:
  339. error_detail = f"Ollama: {res['error']}"
  340. except:
  341. error_detail = f"Ollama: {e}"
  342. raise HTTPException(
  343. status_code=r.status_code if r else 500,
  344. detail=error_detail,
  345. )
  346. class CreateModelForm(BaseModel):
  347. name: str
  348. modelfile: Optional[str] = None
  349. stream: Optional[bool] = None
  350. path: Optional[str] = None
  351. @app.post("/api/create")
  352. @app.post("/api/create/{url_idx}")
  353. async def create_model(
  354. form_data: CreateModelForm, url_idx: int = 0, user=Depends(get_admin_user)
  355. ):
  356. log.debug(f"form_data: {form_data}")
  357. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  358. log.info(f"url: {url}")
  359. r = None
  360. def get_request():
  361. nonlocal url
  362. nonlocal r
  363. try:
  364. def stream_content():
  365. for chunk in r.iter_content(chunk_size=8192):
  366. yield chunk
  367. r = requests.request(
  368. method="POST",
  369. url=f"{url}/api/create",
  370. data=form_data.model_dump_json(exclude_none=True).encode(),
  371. stream=True,
  372. )
  373. r.raise_for_status()
  374. log.debug(f"r: {r}")
  375. return StreamingResponse(
  376. stream_content(),
  377. status_code=r.status_code,
  378. headers=dict(r.headers),
  379. )
  380. except Exception as e:
  381. raise e
  382. try:
  383. return await run_in_threadpool(get_request)
  384. except Exception as e:
  385. log.exception(e)
  386. error_detail = "Open WebUI: Server Connection Error"
  387. if r is not None:
  388. try:
  389. res = r.json()
  390. if "error" in res:
  391. error_detail = f"Ollama: {res['error']}"
  392. except:
  393. error_detail = f"Ollama: {e}"
  394. raise HTTPException(
  395. status_code=r.status_code if r else 500,
  396. detail=error_detail,
  397. )
  398. class CopyModelForm(BaseModel):
  399. source: str
  400. destination: str
  401. @app.post("/api/copy")
  402. @app.post("/api/copy/{url_idx}")
  403. async def copy_model(
  404. form_data: CopyModelForm,
  405. url_idx: Optional[int] = None,
  406. user=Depends(get_admin_user),
  407. ):
  408. if url_idx == None:
  409. if form_data.source in app.state.MODELS:
  410. url_idx = app.state.MODELS[form_data.source]["urls"][0]
  411. else:
  412. raise HTTPException(
  413. status_code=400,
  414. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.source),
  415. )
  416. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  417. log.info(f"url: {url}")
  418. try:
  419. r = requests.request(
  420. method="POST",
  421. url=f"{url}/api/copy",
  422. data=form_data.model_dump_json(exclude_none=True).encode(),
  423. )
  424. r.raise_for_status()
  425. log.debug(f"r.text: {r.text}")
  426. return True
  427. except Exception as e:
  428. log.exception(e)
  429. error_detail = "Open WebUI: Server Connection Error"
  430. if r is not None:
  431. try:
  432. res = r.json()
  433. if "error" in res:
  434. error_detail = f"Ollama: {res['error']}"
  435. except:
  436. error_detail = f"Ollama: {e}"
  437. raise HTTPException(
  438. status_code=r.status_code if r else 500,
  439. detail=error_detail,
  440. )
  441. @app.delete("/api/delete")
  442. @app.delete("/api/delete/{url_idx}")
  443. async def delete_model(
  444. form_data: ModelNameForm,
  445. url_idx: Optional[int] = None,
  446. user=Depends(get_admin_user),
  447. ):
  448. if url_idx == None:
  449. if form_data.name in app.state.MODELS:
  450. url_idx = app.state.MODELS[form_data.name]["urls"][0]
  451. else:
  452. raise HTTPException(
  453. status_code=400,
  454. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  455. )
  456. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  457. log.info(f"url: {url}")
  458. try:
  459. r = requests.request(
  460. method="DELETE",
  461. url=f"{url}/api/delete",
  462. data=form_data.model_dump_json(exclude_none=True).encode(),
  463. )
  464. r.raise_for_status()
  465. log.debug(f"r.text: {r.text}")
  466. return True
  467. except Exception as e:
  468. log.exception(e)
  469. error_detail = "Open WebUI: Server Connection Error"
  470. if r is not None:
  471. try:
  472. res = r.json()
  473. if "error" in res:
  474. error_detail = f"Ollama: {res['error']}"
  475. except:
  476. error_detail = f"Ollama: {e}"
  477. raise HTTPException(
  478. status_code=r.status_code if r else 500,
  479. detail=error_detail,
  480. )
  481. @app.post("/api/show")
  482. async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_user)):
  483. if form_data.name not in app.state.MODELS:
  484. raise HTTPException(
  485. status_code=400,
  486. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  487. )
  488. url_idx = random.choice(app.state.MODELS[form_data.name]["urls"])
  489. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  490. log.info(f"url: {url}")
  491. try:
  492. r = requests.request(
  493. method="POST",
  494. url=f"{url}/api/show",
  495. data=form_data.model_dump_json(exclude_none=True).encode(),
  496. )
  497. r.raise_for_status()
  498. return r.json()
  499. except Exception as e:
  500. log.exception(e)
  501. error_detail = "Open WebUI: Server Connection Error"
  502. if r is not None:
  503. try:
  504. res = r.json()
  505. if "error" in res:
  506. error_detail = f"Ollama: {res['error']}"
  507. except:
  508. error_detail = f"Ollama: {e}"
  509. raise HTTPException(
  510. status_code=r.status_code if r else 500,
  511. detail=error_detail,
  512. )
  513. class GenerateEmbeddingsForm(BaseModel):
  514. model: str
  515. prompt: str
  516. options: Optional[dict] = None
  517. keep_alive: Optional[Union[int, str]] = None
  518. @app.post("/api/embeddings")
  519. @app.post("/api/embeddings/{url_idx}")
  520. async def generate_embeddings(
  521. form_data: GenerateEmbeddingsForm,
  522. url_idx: Optional[int] = None,
  523. user=Depends(get_verified_user),
  524. ):
  525. if url_idx == None:
  526. model = form_data.model
  527. if ":" not in model:
  528. model = f"{model}:latest"
  529. if model in app.state.MODELS:
  530. url_idx = random.choice(app.state.MODELS[model]["urls"])
  531. else:
  532. raise HTTPException(
  533. status_code=400,
  534. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  535. )
  536. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  537. log.info(f"url: {url}")
  538. try:
  539. r = requests.request(
  540. method="POST",
  541. url=f"{url}/api/embeddings",
  542. data=form_data.model_dump_json(exclude_none=True).encode(),
  543. )
  544. r.raise_for_status()
  545. return r.json()
  546. except Exception as e:
  547. log.exception(e)
  548. error_detail = "Open WebUI: Server Connection Error"
  549. if r is not None:
  550. try:
  551. res = r.json()
  552. if "error" in res:
  553. error_detail = f"Ollama: {res['error']}"
  554. except:
  555. error_detail = f"Ollama: {e}"
  556. raise HTTPException(
  557. status_code=r.status_code if r else 500,
  558. detail=error_detail,
  559. )
  560. def generate_ollama_embeddings(
  561. form_data: GenerateEmbeddingsForm,
  562. url_idx: Optional[int] = None,
  563. ):
  564. log.info(f"generate_ollama_embeddings {form_data}")
  565. if url_idx == None:
  566. model = form_data.model
  567. if ":" not in model:
  568. model = f"{model}:latest"
  569. if model in app.state.MODELS:
  570. url_idx = random.choice(app.state.MODELS[model]["urls"])
  571. else:
  572. raise HTTPException(
  573. status_code=400,
  574. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  575. )
  576. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  577. log.info(f"url: {url}")
  578. try:
  579. r = requests.request(
  580. method="POST",
  581. url=f"{url}/api/embeddings",
  582. data=form_data.model_dump_json(exclude_none=True).encode(),
  583. )
  584. r.raise_for_status()
  585. data = r.json()
  586. log.info(f"generate_ollama_embeddings {data}")
  587. if "embedding" in data:
  588. return data["embedding"]
  589. else:
  590. raise "Something went wrong :/"
  591. except Exception as e:
  592. log.exception(e)
  593. error_detail = "Open WebUI: Server Connection Error"
  594. if r is not None:
  595. try:
  596. res = r.json()
  597. if "error" in res:
  598. error_detail = f"Ollama: {res['error']}"
  599. except:
  600. error_detail = f"Ollama: {e}"
  601. raise error_detail
  602. class GenerateCompletionForm(BaseModel):
  603. model: str
  604. prompt: str
  605. images: Optional[List[str]] = None
  606. format: Optional[str] = None
  607. options: Optional[dict] = None
  608. system: Optional[str] = None
  609. template: Optional[str] = None
  610. context: Optional[str] = None
  611. stream: Optional[bool] = True
  612. raw: Optional[bool] = None
  613. keep_alive: Optional[Union[int, str]] = None
  614. @app.post("/api/generate")
  615. @app.post("/api/generate/{url_idx}")
  616. async def generate_completion(
  617. form_data: GenerateCompletionForm,
  618. url_idx: Optional[int] = None,
  619. user=Depends(get_verified_user),
  620. ):
  621. if url_idx == None:
  622. model = form_data.model
  623. if ":" not in model:
  624. model = f"{model}:latest"
  625. if model in app.state.MODELS:
  626. url_idx = random.choice(app.state.MODELS[model]["urls"])
  627. else:
  628. raise HTTPException(
  629. status_code=400,
  630. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  631. )
  632. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  633. log.info(f"url: {url}")
  634. r = None
  635. def get_request():
  636. nonlocal form_data
  637. nonlocal r
  638. request_id = str(uuid.uuid4())
  639. try:
  640. REQUEST_POOL.append(request_id)
  641. def stream_content():
  642. try:
  643. if form_data.stream:
  644. yield json.dumps({"id": request_id, "done": False}) + "\n"
  645. for chunk in r.iter_content(chunk_size=8192):
  646. if request_id in REQUEST_POOL:
  647. yield chunk
  648. else:
  649. log.warning("User: canceled request")
  650. break
  651. finally:
  652. if hasattr(r, "close"):
  653. r.close()
  654. if request_id in REQUEST_POOL:
  655. REQUEST_POOL.remove(request_id)
  656. r = requests.request(
  657. method="POST",
  658. url=f"{url}/api/generate",
  659. data=form_data.model_dump_json(exclude_none=True).encode(),
  660. stream=True,
  661. )
  662. r.raise_for_status()
  663. return StreamingResponse(
  664. stream_content(),
  665. status_code=r.status_code,
  666. headers=dict(r.headers),
  667. )
  668. except Exception as e:
  669. raise e
  670. try:
  671. return await run_in_threadpool(get_request)
  672. except Exception as e:
  673. error_detail = "Open WebUI: Server Connection Error"
  674. if r is not None:
  675. try:
  676. res = r.json()
  677. if "error" in res:
  678. error_detail = f"Ollama: {res['error']}"
  679. except:
  680. error_detail = f"Ollama: {e}"
  681. raise HTTPException(
  682. status_code=r.status_code if r else 500,
  683. detail=error_detail,
  684. )
  685. class ChatMessage(BaseModel):
  686. role: str
  687. content: str
  688. images: Optional[List[str]] = None
  689. class GenerateChatCompletionForm(BaseModel):
  690. model: str
  691. messages: List[ChatMessage]
  692. format: Optional[str] = None
  693. options: Optional[dict] = None
  694. template: Optional[str] = None
  695. stream: Optional[bool] = None
  696. keep_alive: Optional[Union[int, str]] = None
  697. @app.post("/api/chat")
  698. @app.post("/api/chat/{url_idx}")
  699. async def generate_chat_completion(
  700. form_data: GenerateChatCompletionForm,
  701. url_idx: Optional[int] = None,
  702. user=Depends(get_verified_user),
  703. ):
  704. if url_idx == None:
  705. model = form_data.model
  706. if ":" not in model:
  707. model = f"{model}:latest"
  708. if model in app.state.MODELS:
  709. url_idx = random.choice(app.state.MODELS[model]["urls"])
  710. else:
  711. raise HTTPException(
  712. status_code=400,
  713. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  714. )
  715. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  716. log.info(f"url: {url}")
  717. r = None
  718. log.debug(
  719. "form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
  720. form_data.model_dump_json(exclude_none=True).encode()
  721. )
  722. )
  723. def get_request():
  724. nonlocal form_data
  725. nonlocal r
  726. request_id = str(uuid.uuid4())
  727. try:
  728. REQUEST_POOL.append(request_id)
  729. def stream_content():
  730. try:
  731. if form_data.stream:
  732. yield json.dumps({"id": request_id, "done": False}) + "\n"
  733. for chunk in r.iter_content(chunk_size=8192):
  734. if request_id in REQUEST_POOL:
  735. yield chunk
  736. else:
  737. log.warning("User: canceled request")
  738. break
  739. finally:
  740. if hasattr(r, "close"):
  741. r.close()
  742. if request_id in REQUEST_POOL:
  743. REQUEST_POOL.remove(request_id)
  744. r = requests.request(
  745. method="POST",
  746. url=f"{url}/api/chat",
  747. data=form_data.model_dump_json(exclude_none=True).encode(),
  748. stream=True,
  749. )
  750. r.raise_for_status()
  751. return StreamingResponse(
  752. stream_content(),
  753. status_code=r.status_code,
  754. headers=dict(r.headers),
  755. )
  756. except Exception as e:
  757. log.exception(e)
  758. raise e
  759. try:
  760. return await run_in_threadpool(get_request)
  761. except Exception as e:
  762. error_detail = "Open WebUI: Server Connection Error"
  763. if r is not None:
  764. try:
  765. res = r.json()
  766. if "error" in res:
  767. error_detail = f"Ollama: {res['error']}"
  768. except:
  769. error_detail = f"Ollama: {e}"
  770. raise HTTPException(
  771. status_code=r.status_code if r else 500,
  772. detail=error_detail,
  773. )
  774. # TODO: we should update this part once Ollama supports other types
  775. class OpenAIChatMessage(BaseModel):
  776. role: str
  777. content: str
  778. model_config = ConfigDict(extra="allow")
  779. class OpenAIChatCompletionForm(BaseModel):
  780. model: str
  781. messages: List[OpenAIChatMessage]
  782. model_config = ConfigDict(extra="allow")
  783. @app.post("/v1/chat/completions")
  784. @app.post("/v1/chat/completions/{url_idx}")
  785. async def generate_openai_chat_completion(
  786. form_data: OpenAIChatCompletionForm,
  787. url_idx: Optional[int] = None,
  788. user=Depends(get_verified_user),
  789. ):
  790. if url_idx == None:
  791. model = form_data.model
  792. if ":" not in model:
  793. model = f"{model}:latest"
  794. if model in app.state.MODELS:
  795. url_idx = random.choice(app.state.MODELS[model]["urls"])
  796. else:
  797. raise HTTPException(
  798. status_code=400,
  799. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  800. )
  801. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  802. log.info(f"url: {url}")
  803. r = None
  804. def get_request():
  805. nonlocal form_data
  806. nonlocal r
  807. request_id = str(uuid.uuid4())
  808. try:
  809. REQUEST_POOL.append(request_id)
  810. def stream_content():
  811. try:
  812. if form_data.stream:
  813. yield json.dumps(
  814. {"request_id": request_id, "done": False}
  815. ) + "\n"
  816. for chunk in r.iter_content(chunk_size=8192):
  817. if request_id in REQUEST_POOL:
  818. yield chunk
  819. else:
  820. log.warning("User: canceled request")
  821. break
  822. finally:
  823. if hasattr(r, "close"):
  824. r.close()
  825. if request_id in REQUEST_POOL:
  826. REQUEST_POOL.remove(request_id)
  827. r = requests.request(
  828. method="POST",
  829. url=f"{url}/v1/chat/completions",
  830. data=form_data.model_dump_json(exclude_none=True).encode(),
  831. stream=True,
  832. )
  833. r.raise_for_status()
  834. return StreamingResponse(
  835. stream_content(),
  836. status_code=r.status_code,
  837. headers=dict(r.headers),
  838. )
  839. except Exception as e:
  840. raise e
  841. try:
  842. return await run_in_threadpool(get_request)
  843. except Exception as e:
  844. error_detail = "Open WebUI: Server Connection Error"
  845. if r is not None:
  846. try:
  847. res = r.json()
  848. if "error" in res:
  849. error_detail = f"Ollama: {res['error']}"
  850. except:
  851. error_detail = f"Ollama: {e}"
  852. raise HTTPException(
  853. status_code=r.status_code if r else 500,
  854. detail=error_detail,
  855. )
  856. @app.get("/v1/models")
  857. @app.get("/v1/models/{url_idx}")
  858. async def get_openai_models(
  859. url_idx: Optional[int] = None,
  860. user=Depends(get_verified_user),
  861. ):
  862. if url_idx == None:
  863. models = await get_all_models()
  864. if app.state.config.ENABLE_MODEL_FILTER:
  865. if user.role == "user":
  866. models["models"] = list(
  867. filter(
  868. lambda model: model["name"]
  869. in app.state.config.MODEL_FILTER_LIST,
  870. models["models"],
  871. )
  872. )
  873. return {
  874. "data": [
  875. {
  876. "id": model["model"],
  877. "object": "model",
  878. "created": int(time.time()),
  879. "owned_by": "openai",
  880. }
  881. for model in models["models"]
  882. ],
  883. "object": "list",
  884. }
  885. else:
  886. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  887. try:
  888. r = requests.request(method="GET", url=f"{url}/api/tags")
  889. r.raise_for_status()
  890. models = r.json()
  891. return {
  892. "data": [
  893. {
  894. "id": model["model"],
  895. "object": "model",
  896. "created": int(time.time()),
  897. "owned_by": "openai",
  898. }
  899. for model in models["models"]
  900. ],
  901. "object": "list",
  902. }
  903. except Exception as e:
  904. log.exception(e)
  905. error_detail = "Open WebUI: Server Connection Error"
  906. if r is not None:
  907. try:
  908. res = r.json()
  909. if "error" in res:
  910. error_detail = f"Ollama: {res['error']}"
  911. except:
  912. error_detail = f"Ollama: {e}"
  913. raise HTTPException(
  914. status_code=r.status_code if r else 500,
  915. detail=error_detail,
  916. )
  917. class UrlForm(BaseModel):
  918. url: str
  919. class UploadBlobForm(BaseModel):
  920. filename: str
  921. def parse_huggingface_url(hf_url):
  922. try:
  923. # Parse the URL
  924. parsed_url = urlparse(hf_url)
  925. # Get the path and split it into components
  926. path_components = parsed_url.path.split("/")
  927. # Extract the desired output
  928. user_repo = "/".join(path_components[1:3])
  929. model_file = path_components[-1]
  930. return model_file
  931. except ValueError:
  932. return None
  933. async def download_file_stream(
  934. ollama_url, file_url, file_path, file_name, chunk_size=1024 * 1024
  935. ):
  936. done = False
  937. if os.path.exists(file_path):
  938. current_size = os.path.getsize(file_path)
  939. else:
  940. current_size = 0
  941. headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {}
  942. timeout = aiohttp.ClientTimeout(total=600) # Set the timeout
  943. async with aiohttp.ClientSession(timeout=timeout) as session:
  944. async with session.get(file_url, headers=headers) as response:
  945. total_size = int(response.headers.get("content-length", 0)) + current_size
  946. with open(file_path, "ab+") as file:
  947. async for data in response.content.iter_chunked(chunk_size):
  948. current_size += len(data)
  949. file.write(data)
  950. done = current_size == total_size
  951. progress = round((current_size / total_size) * 100, 2)
  952. yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
  953. if done:
  954. file.seek(0)
  955. hashed = calculate_sha256(file)
  956. file.seek(0)
  957. url = f"{ollama_url}/api/blobs/sha256:{hashed}"
  958. response = requests.post(url, data=file)
  959. if response.ok:
  960. res = {
  961. "done": done,
  962. "blob": f"sha256:{hashed}",
  963. "name": file_name,
  964. }
  965. os.remove(file_path)
  966. yield f"data: {json.dumps(res)}\n\n"
  967. else:
  968. raise "Ollama: Could not create blob, Please try again."
  969. # def number_generator():
  970. # for i in range(1, 101):
  971. # yield f"data: {i}\n"
  972. # url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
  973. @app.post("/models/download")
  974. @app.post("/models/download/{url_idx}")
  975. async def download_model(
  976. form_data: UrlForm,
  977. url_idx: Optional[int] = None,
  978. ):
  979. allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
  980. if not any(form_data.url.startswith(host) for host in allowed_hosts):
  981. raise HTTPException(
  982. status_code=400,
  983. detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
  984. )
  985. if url_idx == None:
  986. url_idx = 0
  987. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  988. file_name = parse_huggingface_url(form_data.url)
  989. if file_name:
  990. file_path = f"{UPLOAD_DIR}/{file_name}"
  991. return StreamingResponse(
  992. download_file_stream(url, form_data.url, file_path, file_name),
  993. )
  994. else:
  995. return None
  996. @app.post("/models/upload")
  997. @app.post("/models/upload/{url_idx}")
  998. def upload_model(file: UploadFile = File(...), url_idx: Optional[int] = None):
  999. if url_idx == None:
  1000. url_idx = 0
  1001. ollama_url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  1002. file_path = f"{UPLOAD_DIR}/{file.filename}"
  1003. # Save file in chunks
  1004. with open(file_path, "wb+") as f:
  1005. for chunk in file.file:
  1006. f.write(chunk)
  1007. def file_process_stream():
  1008. nonlocal ollama_url
  1009. total_size = os.path.getsize(file_path)
  1010. chunk_size = 1024 * 1024
  1011. try:
  1012. with open(file_path, "rb") as f:
  1013. total = 0
  1014. done = False
  1015. while not done:
  1016. chunk = f.read(chunk_size)
  1017. if not chunk:
  1018. done = True
  1019. continue
  1020. total += len(chunk)
  1021. progress = round((total / total_size) * 100, 2)
  1022. res = {
  1023. "progress": progress,
  1024. "total": total_size,
  1025. "completed": total,
  1026. }
  1027. yield f"data: {json.dumps(res)}\n\n"
  1028. if done:
  1029. f.seek(0)
  1030. hashed = calculate_sha256(f)
  1031. f.seek(0)
  1032. url = f"{ollama_url}/api/blobs/sha256:{hashed}"
  1033. response = requests.post(url, data=f)
  1034. if response.ok:
  1035. res = {
  1036. "done": done,
  1037. "blob": f"sha256:{hashed}",
  1038. "name": file.filename,
  1039. }
  1040. os.remove(file_path)
  1041. yield f"data: {json.dumps(res)}\n\n"
  1042. else:
  1043. raise Exception(
  1044. "Ollama: Could not create blob, Please try again."
  1045. )
  1046. except Exception as e:
  1047. res = {"error": str(e)}
  1048. yield f"data: {json.dumps(res)}\n\n"
  1049. return StreamingResponse(file_process_stream(), media_type="text/event-stream")
  1050. # async def upload_model(file: UploadFile = File(), url_idx: Optional[int] = None):
  1051. # if url_idx == None:
  1052. # url_idx = 0
  1053. # url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  1054. # file_location = os.path.join(UPLOAD_DIR, file.filename)
  1055. # total_size = file.size
  1056. # async def file_upload_generator(file):
  1057. # print(file)
  1058. # try:
  1059. # async with aiofiles.open(file_location, "wb") as f:
  1060. # completed_size = 0
  1061. # while True:
  1062. # chunk = await file.read(1024*1024)
  1063. # if not chunk:
  1064. # break
  1065. # await f.write(chunk)
  1066. # completed_size += len(chunk)
  1067. # progress = (completed_size / total_size) * 100
  1068. # print(progress)
  1069. # yield f'data: {json.dumps({"status": "uploading", "percentage": progress, "total": total_size, "completed": completed_size, "done": False})}\n'
  1070. # except Exception as e:
  1071. # print(e)
  1072. # yield f"data: {json.dumps({'status': 'error', 'message': str(e)})}\n"
  1073. # finally:
  1074. # await file.close()
  1075. # print("done")
  1076. # yield f'data: {json.dumps({"status": "completed", "percentage": 100, "total": total_size, "completed": completed_size, "done": True})}\n'
  1077. # return StreamingResponse(
  1078. # file_upload_generator(copy.deepcopy(file)), media_type="text/event-stream"
  1079. # )
  1080. @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
  1081. async def deprecated_proxy(
  1082. path: str, request: Request, user=Depends(get_verified_user)
  1083. ):
  1084. url = app.state.config.OLLAMA_BASE_URLS[0]
  1085. target_url = f"{url}/{path}"
  1086. body = await request.body()
  1087. headers = dict(request.headers)
  1088. if user.role in ["user", "admin"]:
  1089. if path in ["pull", "delete", "push", "copy", "create"]:
  1090. if user.role != "admin":
  1091. raise HTTPException(
  1092. status_code=status.HTTP_401_UNAUTHORIZED,
  1093. detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
  1094. )
  1095. else:
  1096. raise HTTPException(
  1097. status_code=status.HTTP_401_UNAUTHORIZED,
  1098. detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
  1099. )
  1100. headers.pop("host", None)
  1101. headers.pop("authorization", None)
  1102. headers.pop("origin", None)
  1103. headers.pop("referer", None)
  1104. r = None
  1105. def get_request():
  1106. nonlocal r
  1107. request_id = str(uuid.uuid4())
  1108. try:
  1109. REQUEST_POOL.append(request_id)
  1110. def stream_content():
  1111. try:
  1112. if path == "generate":
  1113. data = json.loads(body.decode("utf-8"))
  1114. if not ("stream" in data and data["stream"] == False):
  1115. yield json.dumps({"id": request_id, "done": False}) + "\n"
  1116. elif path == "chat":
  1117. yield json.dumps({"id": request_id, "done": False}) + "\n"
  1118. for chunk in r.iter_content(chunk_size=8192):
  1119. if request_id in REQUEST_POOL:
  1120. yield chunk
  1121. else:
  1122. log.warning("User: canceled request")
  1123. break
  1124. finally:
  1125. if hasattr(r, "close"):
  1126. r.close()
  1127. if request_id in REQUEST_POOL:
  1128. REQUEST_POOL.remove(request_id)
  1129. r = requests.request(
  1130. method=request.method,
  1131. url=target_url,
  1132. data=body,
  1133. headers=headers,
  1134. stream=True,
  1135. )
  1136. r.raise_for_status()
  1137. # r.close()
  1138. return StreamingResponse(
  1139. stream_content(),
  1140. status_code=r.status_code,
  1141. headers=dict(r.headers),
  1142. )
  1143. except Exception as e:
  1144. raise e
  1145. try:
  1146. return await run_in_threadpool(get_request)
  1147. except Exception as e:
  1148. error_detail = "Open WebUI: Server Connection Error"
  1149. if r is not None:
  1150. try:
  1151. res = r.json()
  1152. if "error" in res:
  1153. error_detail = f"Ollama: {res['error']}"
  1154. except:
  1155. error_detail = f"Ollama: {e}"
  1156. raise HTTPException(
  1157. status_code=r.status_code if r else 500,
  1158. detail=error_detail,
  1159. )