main.py 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. from fastapi import (
  2. FastAPI,
  3. Request,
  4. Response,
  5. HTTPException,
  6. Depends,
  7. status,
  8. UploadFile,
  9. File,
  10. BackgroundTasks,
  11. )
  12. from fastapi.middleware.cors import CORSMiddleware
  13. from fastapi.responses import StreamingResponse
  14. from fastapi.concurrency import run_in_threadpool
  15. from pydantic import BaseModel, ConfigDict
  16. import os
  17. import re
  18. import copy
  19. import random
  20. import requests
  21. import json
  22. import uuid
  23. import aiohttp
  24. import asyncio
  25. import logging
  26. import time
  27. from urllib.parse import urlparse
  28. from typing import Optional, List, Union
  29. from starlette.background import BackgroundTask
  30. from apps.webui.models.models import Models
  31. from apps.webui.models.users import Users
  32. from constants import ERROR_MESSAGES
  33. from utils.utils import (
  34. decode_token,
  35. get_current_user,
  36. get_verified_user,
  37. get_admin_user,
  38. )
  39. from config import (
  40. SRC_LOG_LEVELS,
  41. OLLAMA_BASE_URLS,
  42. ENABLE_OLLAMA_API,
  43. OLLAMA_GENERATE_TIMEOUT,
  44. ENABLE_MODEL_FILTER,
  45. MODEL_FILTER_LIST,
  46. UPLOAD_DIR,
  47. AppConfig,
  48. )
  49. from utils.misc import calculate_sha256
  50. log = logging.getLogger(__name__)
  51. log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
  52. app = FastAPI()
  53. app.add_middleware(
  54. CORSMiddleware,
  55. allow_origins=["*"],
  56. allow_credentials=True,
  57. allow_methods=["*"],
  58. allow_headers=["*"],
  59. )
  60. app.state.config = AppConfig()
  61. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  62. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  63. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  64. app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
  65. app.state.MODELS = {}
  66. # TODO: Implement a more intelligent load balancing mechanism for distributing requests among multiple backend instances.
  67. # Current implementation uses a simple round-robin approach (random.choice). Consider incorporating algorithms like weighted round-robin,
  68. # least connections, or least response time for better resource utilization and performance optimization.
  69. @app.middleware("http")
  70. async def check_url(request: Request, call_next):
  71. if len(app.state.MODELS) == 0:
  72. await get_all_models()
  73. else:
  74. pass
  75. response = await call_next(request)
  76. return response
  77. @app.head("/")
  78. @app.get("/")
  79. async def get_status():
  80. return {"status": True}
  81. @app.get("/config")
  82. async def get_config(user=Depends(get_admin_user)):
  83. return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
  84. class OllamaConfigForm(BaseModel):
  85. enable_ollama_api: Optional[bool] = None
  86. @app.post("/config/update")
  87. async def update_config(form_data: OllamaConfigForm, user=Depends(get_admin_user)):
  88. app.state.config.ENABLE_OLLAMA_API = form_data.enable_ollama_api
  89. return {"ENABLE_OLLAMA_API": app.state.config.ENABLE_OLLAMA_API}
  90. @app.get("/urls")
  91. async def get_ollama_api_urls(user=Depends(get_admin_user)):
  92. return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS}
  93. class UrlUpdateForm(BaseModel):
  94. urls: List[str]
  95. @app.post("/urls/update")
  96. async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin_user)):
  97. app.state.config.OLLAMA_BASE_URLS = form_data.urls
  98. log.info(f"app.state.config.OLLAMA_BASE_URLS: {app.state.config.OLLAMA_BASE_URLS}")
  99. return {"OLLAMA_BASE_URLS": app.state.config.OLLAMA_BASE_URLS}
  100. async def fetch_url(url):
  101. timeout = aiohttp.ClientTimeout(total=5)
  102. try:
  103. async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
  104. async with session.get(url) as response:
  105. return await response.json()
  106. except Exception as e:
  107. # Handle connection error here
  108. log.error(f"Connection error: {e}")
  109. return None
  110. async def cleanup_response(
  111. response: Optional[aiohttp.ClientResponse],
  112. session: Optional[aiohttp.ClientSession],
  113. ):
  114. if response:
  115. response.close()
  116. if session:
  117. await session.close()
  118. async def post_streaming_url(url: str, payload: str):
  119. r = None
  120. try:
  121. session = aiohttp.ClientSession(trust_env=True, timeout=aiohttp.ClientTimeout(total=OLLAMA_GENERATE_TIMEOUT))
  122. r = await session.post(url, data=payload)
  123. r.raise_for_status()
  124. return StreamingResponse(
  125. r.content,
  126. status_code=r.status,
  127. headers=dict(r.headers),
  128. background=BackgroundTask(cleanup_response, response=r, session=session),
  129. )
  130. except Exception as e:
  131. error_detail = "Open WebUI: Server Connection Error"
  132. if r is not None:
  133. try:
  134. res = await r.json()
  135. if "error" in res:
  136. error_detail = f"Ollama: {res['error']}"
  137. except:
  138. error_detail = f"Ollama: {e}"
  139. raise HTTPException(
  140. status_code=r.status if r else 500,
  141. detail=error_detail,
  142. )
  143. def merge_models_lists(model_lists):
  144. merged_models = {}
  145. for idx, model_list in enumerate(model_lists):
  146. if model_list is not None:
  147. for model in model_list:
  148. digest = model["digest"]
  149. if digest not in merged_models:
  150. model["urls"] = [idx]
  151. merged_models[digest] = model
  152. else:
  153. merged_models[digest]["urls"].append(idx)
  154. return list(merged_models.values())
  155. # user=Depends(get_current_user)
  156. async def get_all_models():
  157. log.info("get_all_models()")
  158. if app.state.config.ENABLE_OLLAMA_API:
  159. tasks = [
  160. fetch_url(f"{url}/api/tags") for url in app.state.config.OLLAMA_BASE_URLS
  161. ]
  162. responses = await asyncio.gather(*tasks)
  163. models = {
  164. "models": merge_models_lists(
  165. map(
  166. lambda response: response["models"] if response else None, responses
  167. )
  168. )
  169. }
  170. else:
  171. models = {"models": []}
  172. app.state.MODELS = {model["model"]: model for model in models["models"]}
  173. return models
  174. @app.get("/api/tags")
  175. @app.get("/api/tags/{url_idx}")
  176. async def get_ollama_tags(
  177. url_idx: Optional[int] = None, user=Depends(get_verified_user)
  178. ):
  179. if url_idx == None:
  180. models = await get_all_models()
  181. if app.state.config.ENABLE_MODEL_FILTER:
  182. if user.role == "user":
  183. models["models"] = list(
  184. filter(
  185. lambda model: model["name"]
  186. in app.state.config.MODEL_FILTER_LIST,
  187. models["models"],
  188. )
  189. )
  190. return models
  191. return models
  192. else:
  193. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  194. r = None
  195. try:
  196. r = requests.request(method="GET", url=f"{url}/api/tags")
  197. r.raise_for_status()
  198. return r.json()
  199. except Exception as e:
  200. log.exception(e)
  201. error_detail = "Open WebUI: Server Connection Error"
  202. if r is not None:
  203. try:
  204. res = r.json()
  205. if "error" in res:
  206. error_detail = f"Ollama: {res['error']}"
  207. except:
  208. error_detail = f"Ollama: {e}"
  209. raise HTTPException(
  210. status_code=r.status_code if r else 500,
  211. detail=error_detail,
  212. )
  213. @app.get("/api/version")
  214. @app.get("/api/version/{url_idx}")
  215. async def get_ollama_versions(url_idx: Optional[int] = None):
  216. if app.state.config.ENABLE_OLLAMA_API:
  217. if url_idx == None:
  218. # returns lowest version
  219. tasks = [
  220. fetch_url(f"{url}/api/version")
  221. for url in app.state.config.OLLAMA_BASE_URLS
  222. ]
  223. responses = await asyncio.gather(*tasks)
  224. responses = list(filter(lambda x: x is not None, responses))
  225. if len(responses) > 0:
  226. lowest_version = min(
  227. responses,
  228. key=lambda x: tuple(
  229. map(int, re.sub(r"^v|-.*", "", x["version"]).split("."))
  230. ),
  231. )
  232. return {"version": lowest_version["version"]}
  233. else:
  234. raise HTTPException(
  235. status_code=500,
  236. detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND,
  237. )
  238. else:
  239. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  240. r = None
  241. try:
  242. r = requests.request(method="GET", url=f"{url}/api/version")
  243. r.raise_for_status()
  244. return r.json()
  245. except Exception as e:
  246. log.exception(e)
  247. error_detail = "Open WebUI: Server Connection Error"
  248. if r is not None:
  249. try:
  250. res = r.json()
  251. if "error" in res:
  252. error_detail = f"Ollama: {res['error']}"
  253. except:
  254. error_detail = f"Ollama: {e}"
  255. raise HTTPException(
  256. status_code=r.status_code if r else 500,
  257. detail=error_detail,
  258. )
  259. else:
  260. return {"version": False}
  261. class ModelNameForm(BaseModel):
  262. name: str
  263. @app.post("/api/pull")
  264. @app.post("/api/pull/{url_idx}")
  265. async def pull_model(
  266. form_data: ModelNameForm, url_idx: int = 0, user=Depends(get_admin_user)
  267. ):
  268. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  269. log.info(f"url: {url}")
  270. r = None
  271. # Admin should be able to pull models from any source
  272. payload = {**form_data.model_dump(exclude_none=True), "insecure": True}
  273. return await post_streaming_url(f"{url}/api/pull", json.dumps(payload))
  274. class PushModelForm(BaseModel):
  275. name: str
  276. insecure: Optional[bool] = None
  277. stream: Optional[bool] = None
  278. @app.delete("/api/push")
  279. @app.delete("/api/push/{url_idx}")
  280. async def push_model(
  281. form_data: PushModelForm,
  282. url_idx: Optional[int] = None,
  283. user=Depends(get_admin_user),
  284. ):
  285. if url_idx == None:
  286. if form_data.name in app.state.MODELS:
  287. url_idx = app.state.MODELS[form_data.name]["urls"][0]
  288. else:
  289. raise HTTPException(
  290. status_code=400,
  291. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  292. )
  293. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  294. log.debug(f"url: {url}")
  295. return await post_streaming_url(
  296. f"{url}/api/push", form_data.model_dump_json(exclude_none=True).encode()
  297. )
  298. class CreateModelForm(BaseModel):
  299. name: str
  300. modelfile: Optional[str] = None
  301. stream: Optional[bool] = None
  302. path: Optional[str] = None
  303. @app.post("/api/create")
  304. @app.post("/api/create/{url_idx}")
  305. async def create_model(
  306. form_data: CreateModelForm, url_idx: int = 0, user=Depends(get_admin_user)
  307. ):
  308. log.debug(f"form_data: {form_data}")
  309. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  310. log.info(f"url: {url}")
  311. return await post_streaming_url(
  312. f"{url}/api/create", form_data.model_dump_json(exclude_none=True).encode()
  313. )
  314. class CopyModelForm(BaseModel):
  315. source: str
  316. destination: str
  317. @app.post("/api/copy")
  318. @app.post("/api/copy/{url_idx}")
  319. async def copy_model(
  320. form_data: CopyModelForm,
  321. url_idx: Optional[int] = None,
  322. user=Depends(get_admin_user),
  323. ):
  324. if url_idx == None:
  325. if form_data.source in app.state.MODELS:
  326. url_idx = app.state.MODELS[form_data.source]["urls"][0]
  327. else:
  328. raise HTTPException(
  329. status_code=400,
  330. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.source),
  331. )
  332. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  333. log.info(f"url: {url}")
  334. try:
  335. r = requests.request(
  336. method="POST",
  337. url=f"{url}/api/copy",
  338. data=form_data.model_dump_json(exclude_none=True).encode(),
  339. )
  340. r.raise_for_status()
  341. log.debug(f"r.text: {r.text}")
  342. return True
  343. except Exception as e:
  344. log.exception(e)
  345. error_detail = "Open WebUI: Server Connection Error"
  346. if r is not None:
  347. try:
  348. res = r.json()
  349. if "error" in res:
  350. error_detail = f"Ollama: {res['error']}"
  351. except:
  352. error_detail = f"Ollama: {e}"
  353. raise HTTPException(
  354. status_code=r.status_code if r else 500,
  355. detail=error_detail,
  356. )
  357. @app.delete("/api/delete")
  358. @app.delete("/api/delete/{url_idx}")
  359. async def delete_model(
  360. form_data: ModelNameForm,
  361. url_idx: Optional[int] = None,
  362. user=Depends(get_admin_user),
  363. ):
  364. if url_idx == None:
  365. if form_data.name in app.state.MODELS:
  366. url_idx = app.state.MODELS[form_data.name]["urls"][0]
  367. else:
  368. raise HTTPException(
  369. status_code=400,
  370. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  371. )
  372. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  373. log.info(f"url: {url}")
  374. try:
  375. r = requests.request(
  376. method="DELETE",
  377. url=f"{url}/api/delete",
  378. data=form_data.model_dump_json(exclude_none=True).encode(),
  379. )
  380. r.raise_for_status()
  381. log.debug(f"r.text: {r.text}")
  382. return True
  383. except Exception as e:
  384. log.exception(e)
  385. error_detail = "Open WebUI: Server Connection Error"
  386. if r is not None:
  387. try:
  388. res = r.json()
  389. if "error" in res:
  390. error_detail = f"Ollama: {res['error']}"
  391. except:
  392. error_detail = f"Ollama: {e}"
  393. raise HTTPException(
  394. status_code=r.status_code if r else 500,
  395. detail=error_detail,
  396. )
  397. @app.post("/api/show")
  398. async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_user)):
  399. if form_data.name not in app.state.MODELS:
  400. raise HTTPException(
  401. status_code=400,
  402. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
  403. )
  404. url_idx = random.choice(app.state.MODELS[form_data.name]["urls"])
  405. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  406. log.info(f"url: {url}")
  407. try:
  408. r = requests.request(
  409. method="POST",
  410. url=f"{url}/api/show",
  411. data=form_data.model_dump_json(exclude_none=True).encode(),
  412. )
  413. r.raise_for_status()
  414. return r.json()
  415. except Exception as e:
  416. log.exception(e)
  417. error_detail = "Open WebUI: Server Connection Error"
  418. if r is not None:
  419. try:
  420. res = r.json()
  421. if "error" in res:
  422. error_detail = f"Ollama: {res['error']}"
  423. except:
  424. error_detail = f"Ollama: {e}"
  425. raise HTTPException(
  426. status_code=r.status_code if r else 500,
  427. detail=error_detail,
  428. )
  429. class GenerateEmbeddingsForm(BaseModel):
  430. model: str
  431. prompt: str
  432. options: Optional[dict] = None
  433. keep_alive: Optional[Union[int, str]] = None
  434. @app.post("/api/embeddings")
  435. @app.post("/api/embeddings/{url_idx}")
  436. async def generate_embeddings(
  437. form_data: GenerateEmbeddingsForm,
  438. url_idx: Optional[int] = None,
  439. user=Depends(get_verified_user),
  440. ):
  441. if url_idx == None:
  442. model = form_data.model
  443. if ":" not in model:
  444. model = f"{model}:latest"
  445. if model in app.state.MODELS:
  446. url_idx = random.choice(app.state.MODELS[model]["urls"])
  447. else:
  448. raise HTTPException(
  449. status_code=400,
  450. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  451. )
  452. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  453. log.info(f"url: {url}")
  454. try:
  455. r = requests.request(
  456. method="POST",
  457. url=f"{url}/api/embeddings",
  458. data=form_data.model_dump_json(exclude_none=True).encode(),
  459. )
  460. r.raise_for_status()
  461. return r.json()
  462. except Exception as e:
  463. log.exception(e)
  464. error_detail = "Open WebUI: Server Connection Error"
  465. if r is not None:
  466. try:
  467. res = r.json()
  468. if "error" in res:
  469. error_detail = f"Ollama: {res['error']}"
  470. except:
  471. error_detail = f"Ollama: {e}"
  472. raise HTTPException(
  473. status_code=r.status_code if r else 500,
  474. detail=error_detail,
  475. )
  476. def generate_ollama_embeddings(
  477. form_data: GenerateEmbeddingsForm,
  478. url_idx: Optional[int] = None,
  479. ):
  480. log.info(f"generate_ollama_embeddings {form_data}")
  481. if url_idx == None:
  482. model = form_data.model
  483. if ":" not in model:
  484. model = f"{model}:latest"
  485. if model in app.state.MODELS:
  486. url_idx = random.choice(app.state.MODELS[model]["urls"])
  487. else:
  488. raise HTTPException(
  489. status_code=400,
  490. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  491. )
  492. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  493. log.info(f"url: {url}")
  494. try:
  495. r = requests.request(
  496. method="POST",
  497. url=f"{url}/api/embeddings",
  498. data=form_data.model_dump_json(exclude_none=True).encode(),
  499. )
  500. r.raise_for_status()
  501. data = r.json()
  502. log.info(f"generate_ollama_embeddings {data}")
  503. if "embedding" in data:
  504. return data["embedding"]
  505. else:
  506. raise "Something went wrong :/"
  507. except Exception as e:
  508. log.exception(e)
  509. error_detail = "Open WebUI: Server Connection Error"
  510. if r is not None:
  511. try:
  512. res = r.json()
  513. if "error" in res:
  514. error_detail = f"Ollama: {res['error']}"
  515. except:
  516. error_detail = f"Ollama: {e}"
  517. raise error_detail
  518. class GenerateCompletionForm(BaseModel):
  519. model: str
  520. prompt: str
  521. images: Optional[List[str]] = None
  522. format: Optional[str] = None
  523. options: Optional[dict] = None
  524. system: Optional[str] = None
  525. template: Optional[str] = None
  526. context: Optional[str] = None
  527. stream: Optional[bool] = True
  528. raw: Optional[bool] = None
  529. keep_alive: Optional[Union[int, str]] = None
  530. @app.post("/api/generate")
  531. @app.post("/api/generate/{url_idx}")
  532. async def generate_completion(
  533. form_data: GenerateCompletionForm,
  534. url_idx: Optional[int] = None,
  535. user=Depends(get_verified_user),
  536. ):
  537. if url_idx == None:
  538. model = form_data.model
  539. if ":" not in model:
  540. model = f"{model}:latest"
  541. if model in app.state.MODELS:
  542. url_idx = random.choice(app.state.MODELS[model]["urls"])
  543. else:
  544. raise HTTPException(
  545. status_code=400,
  546. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  547. )
  548. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  549. log.info(f"url: {url}")
  550. return await post_streaming_url(
  551. f"{url}/api/generate", form_data.model_dump_json(exclude_none=True).encode()
  552. )
  553. class ChatMessage(BaseModel):
  554. role: str
  555. content: str
  556. images: Optional[List[str]] = None
  557. class GenerateChatCompletionForm(BaseModel):
  558. model: str
  559. messages: List[ChatMessage]
  560. format: Optional[str] = None
  561. options: Optional[dict] = None
  562. template: Optional[str] = None
  563. stream: Optional[bool] = None
  564. keep_alive: Optional[Union[int, str]] = None
  565. @app.post("/api/chat")
  566. @app.post("/api/chat/{url_idx}")
  567. async def generate_chat_completion(
  568. form_data: GenerateChatCompletionForm,
  569. url_idx: Optional[int] = None,
  570. user=Depends(get_verified_user),
  571. ):
  572. log.debug(
  573. "form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
  574. form_data.model_dump_json(exclude_none=True).encode()
  575. )
  576. )
  577. payload = {
  578. **form_data.model_dump(exclude_none=True),
  579. }
  580. model_id = form_data.model
  581. model_info = Models.get_model_by_id(model_id)
  582. if model_info:
  583. if model_info.base_model_id:
  584. payload["model"] = model_info.base_model_id
  585. model_info.params = model_info.params.model_dump()
  586. if model_info.params:
  587. payload["options"] = {}
  588. if model_info.params.get("mirostat", None):
  589. payload["options"]["mirostat"] = model_info.params.get("mirostat", None)
  590. if model_info.params.get("mirostat_eta", None):
  591. payload["options"]["mirostat_eta"] = model_info.params.get(
  592. "mirostat_eta", None
  593. )
  594. if model_info.params.get("mirostat_tau", None):
  595. payload["options"]["mirostat_tau"] = model_info.params.get(
  596. "mirostat_tau", None
  597. )
  598. if model_info.params.get("num_ctx", None):
  599. payload["options"]["num_ctx"] = model_info.params.get("num_ctx", None)
  600. if model_info.params.get("repeat_last_n", None):
  601. payload["options"]["repeat_last_n"] = model_info.params.get(
  602. "repeat_last_n", None
  603. )
  604. if model_info.params.get("frequency_penalty", None):
  605. payload["options"]["repeat_penalty"] = model_info.params.get(
  606. "frequency_penalty", None
  607. )
  608. if model_info.params.get("temperature", None) is not None:
  609. payload["options"]["temperature"] = model_info.params.get(
  610. "temperature", None
  611. )
  612. if model_info.params.get("seed", None):
  613. payload["options"]["seed"] = model_info.params.get("seed", None)
  614. if model_info.params.get("stop", None):
  615. payload["options"]["stop"] = (
  616. [
  617. bytes(stop, "utf-8").decode("unicode_escape")
  618. for stop in model_info.params["stop"]
  619. ]
  620. if model_info.params.get("stop", None)
  621. else None
  622. )
  623. if model_info.params.get("tfs_z", None):
  624. payload["options"]["tfs_z"] = model_info.params.get("tfs_z", None)
  625. if model_info.params.get("max_tokens", None):
  626. payload["options"]["num_predict"] = model_info.params.get(
  627. "max_tokens", None
  628. )
  629. if model_info.params.get("top_k", None):
  630. payload["options"]["top_k"] = model_info.params.get("top_k", None)
  631. if model_info.params.get("top_p", None):
  632. payload["options"]["top_p"] = model_info.params.get("top_p", None)
  633. if model_info.params.get("use_mmap", None):
  634. payload["options"]["use_mmap"] = model_info.params.get("use_mmap", None)
  635. if model_info.params.get("use_mlock", None):
  636. payload["options"]["use_mlock"] = model_info.params.get(
  637. "use_mlock", None
  638. )
  639. if model_info.params.get("num_thread", None):
  640. payload["options"]["num_thread"] = model_info.params.get(
  641. "num_thread", None
  642. )
  643. if model_info.params.get("system", None):
  644. # Check if the payload already has a system message
  645. # If not, add a system message to the payload
  646. if payload.get("messages"):
  647. for message in payload["messages"]:
  648. if message.get("role") == "system":
  649. message["content"] = (
  650. model_info.params.get("system", None) + message["content"]
  651. )
  652. break
  653. else:
  654. payload["messages"].insert(
  655. 0,
  656. {
  657. "role": "system",
  658. "content": model_info.params.get("system", None),
  659. },
  660. )
  661. if url_idx == None:
  662. if ":" not in payload["model"]:
  663. payload["model"] = f"{payload['model']}:latest"
  664. if payload["model"] in app.state.MODELS:
  665. url_idx = random.choice(app.state.MODELS[payload["model"]]["urls"])
  666. else:
  667. raise HTTPException(
  668. status_code=400,
  669. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  670. )
  671. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  672. log.info(f"url: {url}")
  673. print(payload)
  674. return await post_streaming_url(f"{url}/api/chat", json.dumps(payload))
  675. # TODO: we should update this part once Ollama supports other types
  676. class OpenAIChatMessageContent(BaseModel):
  677. type: str
  678. model_config = ConfigDict(extra="allow")
  679. class OpenAIChatMessage(BaseModel):
  680. role: str
  681. content: Union[str, OpenAIChatMessageContent]
  682. model_config = ConfigDict(extra="allow")
  683. class OpenAIChatCompletionForm(BaseModel):
  684. model: str
  685. messages: List[OpenAIChatMessage]
  686. model_config = ConfigDict(extra="allow")
  687. @app.post("/v1/chat/completions")
  688. @app.post("/v1/chat/completions/{url_idx}")
  689. async def generate_openai_chat_completion(
  690. form_data: OpenAIChatCompletionForm,
  691. url_idx: Optional[int] = None,
  692. user=Depends(get_verified_user),
  693. ):
  694. payload = {
  695. **form_data.model_dump(exclude_none=True),
  696. }
  697. model_id = form_data.model
  698. model_info = Models.get_model_by_id(model_id)
  699. if model_info:
  700. if model_info.base_model_id:
  701. payload["model"] = model_info.base_model_id
  702. model_info.params = model_info.params.model_dump()
  703. if model_info.params:
  704. payload["temperature"] = model_info.params.get("temperature", None)
  705. payload["top_p"] = model_info.params.get("top_p", None)
  706. payload["max_tokens"] = model_info.params.get("max_tokens", None)
  707. payload["frequency_penalty"] = model_info.params.get(
  708. "frequency_penalty", None
  709. )
  710. payload["seed"] = model_info.params.get("seed", None)
  711. payload["stop"] = (
  712. [
  713. bytes(stop, "utf-8").decode("unicode_escape")
  714. for stop in model_info.params["stop"]
  715. ]
  716. if model_info.params.get("stop", None)
  717. else None
  718. )
  719. if model_info.params.get("system", None):
  720. # Check if the payload already has a system message
  721. # If not, add a system message to the payload
  722. if payload.get("messages"):
  723. for message in payload["messages"]:
  724. if message.get("role") == "system":
  725. message["content"] = (
  726. model_info.params.get("system", None) + message["content"]
  727. )
  728. break
  729. else:
  730. payload["messages"].insert(
  731. 0,
  732. {
  733. "role": "system",
  734. "content": model_info.params.get("system", None),
  735. },
  736. )
  737. if url_idx == None:
  738. if ":" not in payload["model"]:
  739. payload["model"] = f"{payload['model']}:latest"
  740. if payload["model"] in app.state.MODELS:
  741. url_idx = random.choice(app.state.MODELS[payload["model"]]["urls"])
  742. else:
  743. raise HTTPException(
  744. status_code=400,
  745. detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
  746. )
  747. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  748. log.info(f"url: {url}")
  749. return await post_streaming_url(f"{url}/v1/chat/completions", json.dumps(payload))
  750. @app.get("/v1/models")
  751. @app.get("/v1/models/{url_idx}")
  752. async def get_openai_models(
  753. url_idx: Optional[int] = None,
  754. user=Depends(get_verified_user),
  755. ):
  756. if url_idx == None:
  757. models = await get_all_models()
  758. if app.state.config.ENABLE_MODEL_FILTER:
  759. if user.role == "user":
  760. models["models"] = list(
  761. filter(
  762. lambda model: model["name"]
  763. in app.state.config.MODEL_FILTER_LIST,
  764. models["models"],
  765. )
  766. )
  767. return {
  768. "data": [
  769. {
  770. "id": model["model"],
  771. "object": "model",
  772. "created": int(time.time()),
  773. "owned_by": "openai",
  774. }
  775. for model in models["models"]
  776. ],
  777. "object": "list",
  778. }
  779. else:
  780. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  781. try:
  782. r = requests.request(method="GET", url=f"{url}/api/tags")
  783. r.raise_for_status()
  784. models = r.json()
  785. return {
  786. "data": [
  787. {
  788. "id": model["model"],
  789. "object": "model",
  790. "created": int(time.time()),
  791. "owned_by": "openai",
  792. }
  793. for model in models["models"]
  794. ],
  795. "object": "list",
  796. }
  797. except Exception as e:
  798. log.exception(e)
  799. error_detail = "Open WebUI: Server Connection Error"
  800. if r is not None:
  801. try:
  802. res = r.json()
  803. if "error" in res:
  804. error_detail = f"Ollama: {res['error']}"
  805. except:
  806. error_detail = f"Ollama: {e}"
  807. raise HTTPException(
  808. status_code=r.status_code if r else 500,
  809. detail=error_detail,
  810. )
  811. class UrlForm(BaseModel):
  812. url: str
  813. class UploadBlobForm(BaseModel):
  814. filename: str
  815. def parse_huggingface_url(hf_url):
  816. try:
  817. # Parse the URL
  818. parsed_url = urlparse(hf_url)
  819. # Get the path and split it into components
  820. path_components = parsed_url.path.split("/")
  821. # Extract the desired output
  822. user_repo = "/".join(path_components[1:3])
  823. model_file = path_components[-1]
  824. return model_file
  825. except ValueError:
  826. return None
  827. async def download_file_stream(
  828. ollama_url, file_url, file_path, file_name, chunk_size=1024 * 1024
  829. ):
  830. done = False
  831. if os.path.exists(file_path):
  832. current_size = os.path.getsize(file_path)
  833. else:
  834. current_size = 0
  835. headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {}
  836. timeout = aiohttp.ClientTimeout(total=600) # Set the timeout
  837. async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
  838. async with session.get(file_url, headers=headers) as response:
  839. total_size = int(response.headers.get("content-length", 0)) + current_size
  840. with open(file_path, "ab+") as file:
  841. async for data in response.content.iter_chunked(chunk_size):
  842. current_size += len(data)
  843. file.write(data)
  844. done = current_size == total_size
  845. progress = round((current_size / total_size) * 100, 2)
  846. yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
  847. if done:
  848. file.seek(0)
  849. hashed = calculate_sha256(file)
  850. file.seek(0)
  851. url = f"{ollama_url}/api/blobs/sha256:{hashed}"
  852. response = requests.post(url, data=file)
  853. if response.ok:
  854. res = {
  855. "done": done,
  856. "blob": f"sha256:{hashed}",
  857. "name": file_name,
  858. }
  859. os.remove(file_path)
  860. yield f"data: {json.dumps(res)}\n\n"
  861. else:
  862. raise "Ollama: Could not create blob, Please try again."
  863. # def number_generator():
  864. # for i in range(1, 101):
  865. # yield f"data: {i}\n"
  866. # url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
  867. @app.post("/models/download")
  868. @app.post("/models/download/{url_idx}")
  869. async def download_model(
  870. form_data: UrlForm,
  871. url_idx: Optional[int] = None,
  872. ):
  873. allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
  874. if not any(form_data.url.startswith(host) for host in allowed_hosts):
  875. raise HTTPException(
  876. status_code=400,
  877. detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
  878. )
  879. if url_idx == None:
  880. url_idx = 0
  881. url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  882. file_name = parse_huggingface_url(form_data.url)
  883. if file_name:
  884. file_path = f"{UPLOAD_DIR}/{file_name}"
  885. return StreamingResponse(
  886. download_file_stream(url, form_data.url, file_path, file_name),
  887. )
  888. else:
  889. return None
  890. @app.post("/models/upload")
  891. @app.post("/models/upload/{url_idx}")
  892. def upload_model(file: UploadFile = File(...), url_idx: Optional[int] = None):
  893. if url_idx == None:
  894. url_idx = 0
  895. ollama_url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  896. file_path = f"{UPLOAD_DIR}/{file.filename}"
  897. # Save file in chunks
  898. with open(file_path, "wb+") as f:
  899. for chunk in file.file:
  900. f.write(chunk)
  901. def file_process_stream():
  902. nonlocal ollama_url
  903. total_size = os.path.getsize(file_path)
  904. chunk_size = 1024 * 1024
  905. try:
  906. with open(file_path, "rb") as f:
  907. total = 0
  908. done = False
  909. while not done:
  910. chunk = f.read(chunk_size)
  911. if not chunk:
  912. done = True
  913. continue
  914. total += len(chunk)
  915. progress = round((total / total_size) * 100, 2)
  916. res = {
  917. "progress": progress,
  918. "total": total_size,
  919. "completed": total,
  920. }
  921. yield f"data: {json.dumps(res)}\n\n"
  922. if done:
  923. f.seek(0)
  924. hashed = calculate_sha256(f)
  925. f.seek(0)
  926. url = f"{ollama_url}/api/blobs/sha256:{hashed}"
  927. response = requests.post(url, data=f)
  928. if response.ok:
  929. res = {
  930. "done": done,
  931. "blob": f"sha256:{hashed}",
  932. "name": file.filename,
  933. }
  934. os.remove(file_path)
  935. yield f"data: {json.dumps(res)}\n\n"
  936. else:
  937. raise Exception(
  938. "Ollama: Could not create blob, Please try again."
  939. )
  940. except Exception as e:
  941. res = {"error": str(e)}
  942. yield f"data: {json.dumps(res)}\n\n"
  943. return StreamingResponse(file_process_stream(), media_type="text/event-stream")
  944. # async def upload_model(file: UploadFile = File(), url_idx: Optional[int] = None):
  945. # if url_idx == None:
  946. # url_idx = 0
  947. # url = app.state.config.OLLAMA_BASE_URLS[url_idx]
  948. # file_location = os.path.join(UPLOAD_DIR, file.filename)
  949. # total_size = file.size
  950. # async def file_upload_generator(file):
  951. # print(file)
  952. # try:
  953. # async with aiofiles.open(file_location, "wb") as f:
  954. # completed_size = 0
  955. # while True:
  956. # chunk = await file.read(1024*1024)
  957. # if not chunk:
  958. # break
  959. # await f.write(chunk)
  960. # completed_size += len(chunk)
  961. # progress = (completed_size / total_size) * 100
  962. # print(progress)
  963. # yield f'data: {json.dumps({"status": "uploading", "percentage": progress, "total": total_size, "completed": completed_size, "done": False})}\n'
  964. # except Exception as e:
  965. # print(e)
  966. # yield f"data: {json.dumps({'status': 'error', 'message': str(e)})}\n"
  967. # finally:
  968. # await file.close()
  969. # print("done")
  970. # yield f'data: {json.dumps({"status": "completed", "percentage": 100, "total": total_size, "completed": completed_size, "done": True})}\n'
  971. # return StreamingResponse(
  972. # file_upload_generator(copy.deepcopy(file)), media_type="text/event-stream"
  973. # )
  974. @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
  975. async def deprecated_proxy(
  976. path: str, request: Request, user=Depends(get_verified_user)
  977. ):
  978. url = app.state.config.OLLAMA_BASE_URLS[0]
  979. target_url = f"{url}/{path}"
  980. body = await request.body()
  981. headers = dict(request.headers)
  982. if user.role in ["user", "admin"]:
  983. if path in ["pull", "delete", "push", "copy", "create"]:
  984. if user.role != "admin":
  985. raise HTTPException(
  986. status_code=status.HTTP_401_UNAUTHORIZED,
  987. detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
  988. )
  989. else:
  990. raise HTTPException(
  991. status_code=status.HTTP_401_UNAUTHORIZED,
  992. detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
  993. )
  994. headers.pop("host", None)
  995. headers.pop("authorization", None)
  996. headers.pop("origin", None)
  997. headers.pop("referer", None)
  998. r = None
  999. def get_request():
  1000. nonlocal r
  1001. request_id = str(uuid.uuid4())
  1002. try:
  1003. REQUEST_POOL.append(request_id)
  1004. def stream_content():
  1005. try:
  1006. if path == "generate":
  1007. data = json.loads(body.decode("utf-8"))
  1008. if data.get("stream", True):
  1009. yield json.dumps({"id": request_id, "done": False}) + "\n"
  1010. elif path == "chat":
  1011. yield json.dumps({"id": request_id, "done": False}) + "\n"
  1012. for chunk in r.iter_content(chunk_size=8192):
  1013. if request_id in REQUEST_POOL:
  1014. yield chunk
  1015. else:
  1016. log.warning("User: canceled request")
  1017. break
  1018. finally:
  1019. if hasattr(r, "close"):
  1020. r.close()
  1021. if request_id in REQUEST_POOL:
  1022. REQUEST_POOL.remove(request_id)
  1023. r = requests.request(
  1024. method=request.method,
  1025. url=target_url,
  1026. data=body,
  1027. headers=headers,
  1028. stream=True,
  1029. )
  1030. r.raise_for_status()
  1031. # r.close()
  1032. return StreamingResponse(
  1033. stream_content(),
  1034. status_code=r.status_code,
  1035. headers=dict(r.headers),
  1036. )
  1037. except Exception as e:
  1038. raise e
  1039. try:
  1040. return await run_in_threadpool(get_request)
  1041. except Exception as e:
  1042. error_detail = "Open WebUI: Server Connection Error"
  1043. if r is not None:
  1044. try:
  1045. res = r.json()
  1046. if "error" in res:
  1047. error_detail = f"Ollama: {res['error']}"
  1048. except:
  1049. error_detail = f"Ollama: {e}"
  1050. raise HTTPException(
  1051. status_code=r.status_code if r else 500,
  1052. detail=error_detail,
  1053. )