1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333 |
- from fastapi import (
- FastAPI,
- Request,
- Response,
- HTTPException,
- Depends,
- status,
- UploadFile,
- File,
- BackgroundTasks,
- )
- from fastapi.middleware.cors import CORSMiddleware
- from fastapi.responses import StreamingResponse
- from fastapi.concurrency import run_in_threadpool
- from pydantic import BaseModel, ConfigDict
- import os
- import copy
- import random
- import requests
- import json
- import uuid
- import aiohttp
- import asyncio
- import logging
- from urllib.parse import urlparse
- from typing import Optional, List, Union
- from apps.web.models.users import Users
- from constants import ERROR_MESSAGES
- from utils.utils import decode_token, get_current_user, get_admin_user
- from config import (
- SRC_LOG_LEVELS,
- OLLAMA_BASE_URLS,
- MODEL_FILTER_ENABLED,
- MODEL_FILTER_LIST,
- UPLOAD_DIR,
- )
- from utils.misc import calculate_sha256
- log = logging.getLogger(__name__)
- log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
- app = FastAPI()
- app.add_middleware(
- CORSMiddleware,
- allow_origins=["*"],
- allow_credentials=True,
- allow_methods=["*"],
- allow_headers=["*"],
- )
- app.state.MODEL_FILTER_ENABLED = MODEL_FILTER_ENABLED
- app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST
- app.state.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
- app.state.MODELS = {}
- REQUEST_POOL = []
- # TODO: Implement a more intelligent load balancing mechanism for distributing requests among multiple backend instances.
- # Current implementation uses a simple round-robin approach (random.choice). Consider incorporating algorithms like weighted round-robin,
- # least connections, or least response time for better resource utilization and performance optimization.
- @app.middleware("http")
- async def check_url(request: Request, call_next):
- if len(app.state.MODELS) == 0:
- await get_all_models()
- else:
- pass
- response = await call_next(request)
- return response
- @app.head("/")
- @app.get("/")
- async def get_status():
- return {"status": True}
- @app.get("/urls")
- async def get_ollama_api_urls(user=Depends(get_admin_user)):
- return {"OLLAMA_BASE_URLS": app.state.OLLAMA_BASE_URLS}
- class UrlUpdateForm(BaseModel):
- urls: List[str]
- @app.post("/urls/update")
- async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin_user)):
- app.state.OLLAMA_BASE_URLS = form_data.urls
- log.info(f"app.state.OLLAMA_BASE_URLS: {app.state.OLLAMA_BASE_URLS}")
- return {"OLLAMA_BASE_URLS": app.state.OLLAMA_BASE_URLS}
- @app.get("/cancel/{request_id}")
- async def cancel_ollama_request(request_id: str, user=Depends(get_current_user)):
- if user:
- if request_id in REQUEST_POOL:
- REQUEST_POOL.remove(request_id)
- return True
- else:
- raise HTTPException(status_code=401, detail=ERROR_MESSAGES.ACCESS_PROHIBITED)
- async def fetch_url(url):
- try:
- async with aiohttp.ClientSession() as session:
- async with session.get(url) as response:
- return await response.json()
- except Exception as e:
- # Handle connection error here
- log.error(f"Connection error: {e}")
- return None
- def merge_models_lists(model_lists):
- merged_models = {}
- for idx, model_list in enumerate(model_lists):
- if model_list is not None:
- for model in model_list:
- digest = model["digest"]
- if digest not in merged_models:
- model["urls"] = [idx]
- merged_models[digest] = model
- else:
- merged_models[digest]["urls"].append(idx)
- return list(merged_models.values())
- # user=Depends(get_current_user)
- async def get_all_models():
- log.info("get_all_models()")
- tasks = [fetch_url(f"{url}/api/tags") for url in app.state.OLLAMA_BASE_URLS]
- responses = await asyncio.gather(*tasks)
- models = {
- "models": merge_models_lists(
- map(lambda response: response["models"] if response else None, responses)
- )
- }
- app.state.MODELS = {model["model"]: model for model in models["models"]}
- return models
- @app.get("/api/tags")
- @app.get("/api/tags/{url_idx}")
- async def get_ollama_tags(
- url_idx: Optional[int] = None, user=Depends(get_current_user)
- ):
- if url_idx == None:
- models = await get_all_models()
- if app.state.MODEL_FILTER_ENABLED:
- if user.role == "user":
- models["models"] = list(
- filter(
- lambda model: model["name"] in app.state.MODEL_FILTER_LIST,
- models["models"],
- )
- )
- return models
- return models
- else:
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- try:
- r = requests.request(method="GET", url=f"{url}/api/tags")
- r.raise_for_status()
- return r.json()
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- @app.get("/api/version")
- @app.get("/api/version/{url_idx}")
- async def get_ollama_versions(url_idx: Optional[int] = None):
- if url_idx == None:
- # returns lowest version
- tasks = [fetch_url(f"{url}/api/version") for url in app.state.OLLAMA_BASE_URLS]
- responses = await asyncio.gather(*tasks)
- responses = list(filter(lambda x: x is not None, responses))
- if len(responses) > 0:
- lowest_version = min(
- responses,
- key=lambda x: tuple(map(int, x["version"].split("-")[0].split("."))),
- )
- return {"version": lowest_version["version"]}
- else:
- raise HTTPException(
- status_code=500,
- detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND,
- )
- else:
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- try:
- r = requests.request(method="GET", url=f"{url}/api/version")
- r.raise_for_status()
- return r.json()
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- class ModelNameForm(BaseModel):
- name: str
- @app.post("/api/pull")
- @app.post("/api/pull/{url_idx}")
- async def pull_model(
- form_data: ModelNameForm, url_idx: int = 0, user=Depends(get_admin_user)
- ):
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- r = None
- def get_request():
- nonlocal url
- nonlocal r
- request_id = str(uuid.uuid4())
- try:
- REQUEST_POOL.append(request_id)
- def stream_content():
- try:
- yield json.dumps({"id": request_id, "done": False}) + "\n"
- for chunk in r.iter_content(chunk_size=8192):
- if request_id in REQUEST_POOL:
- yield chunk
- else:
- log.warning("User: canceled request")
- break
- finally:
- if hasattr(r, "close"):
- r.close()
- if request_id in REQUEST_POOL:
- REQUEST_POOL.remove(request_id)
- r = requests.request(
- method="POST",
- url=f"{url}/api/pull",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- stream=True,
- )
- r.raise_for_status()
- return StreamingResponse(
- stream_content(),
- status_code=r.status_code,
- headers=dict(r.headers),
- )
- except Exception as e:
- raise e
- try:
- return await run_in_threadpool(get_request)
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- class PushModelForm(BaseModel):
- name: str
- insecure: Optional[bool] = None
- stream: Optional[bool] = None
- @app.delete("/api/push")
- @app.delete("/api/push/{url_idx}")
- async def push_model(
- form_data: PushModelForm,
- url_idx: Optional[int] = None,
- user=Depends(get_admin_user),
- ):
- if url_idx == None:
- if form_data.name in app.state.MODELS:
- url_idx = app.state.MODELS[form_data.name]["urls"][0]
- else:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
- )
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.debug(f"url: {url}")
- r = None
- def get_request():
- nonlocal url
- nonlocal r
- try:
- def stream_content():
- for chunk in r.iter_content(chunk_size=8192):
- yield chunk
- r = requests.request(
- method="POST",
- url=f"{url}/api/push",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- )
- r.raise_for_status()
- return StreamingResponse(
- stream_content(),
- status_code=r.status_code,
- headers=dict(r.headers),
- )
- except Exception as e:
- raise e
- try:
- return await run_in_threadpool(get_request)
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- class CreateModelForm(BaseModel):
- name: str
- modelfile: Optional[str] = None
- stream: Optional[bool] = None
- path: Optional[str] = None
- @app.post("/api/create")
- @app.post("/api/create/{url_idx}")
- async def create_model(
- form_data: CreateModelForm, url_idx: int = 0, user=Depends(get_admin_user)
- ):
- log.debug(f"form_data: {form_data}")
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- r = None
- def get_request():
- nonlocal url
- nonlocal r
- try:
- def stream_content():
- for chunk in r.iter_content(chunk_size=8192):
- yield chunk
- r = requests.request(
- method="POST",
- url=f"{url}/api/create",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- stream=True,
- )
- r.raise_for_status()
- log.debug(f"r: {r}")
- return StreamingResponse(
- stream_content(),
- status_code=r.status_code,
- headers=dict(r.headers),
- )
- except Exception as e:
- raise e
- try:
- return await run_in_threadpool(get_request)
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- class CopyModelForm(BaseModel):
- source: str
- destination: str
- @app.post("/api/copy")
- @app.post("/api/copy/{url_idx}")
- async def copy_model(
- form_data: CopyModelForm,
- url_idx: Optional[int] = None,
- user=Depends(get_admin_user),
- ):
- if url_idx == None:
- if form_data.source in app.state.MODELS:
- url_idx = app.state.MODELS[form_data.source]["urls"][0]
- else:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.source),
- )
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- try:
- r = requests.request(
- method="POST",
- url=f"{url}/api/copy",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- )
- r.raise_for_status()
- log.debug(f"r.text: {r.text}")
- return True
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- @app.delete("/api/delete")
- @app.delete("/api/delete/{url_idx}")
- async def delete_model(
- form_data: ModelNameForm,
- url_idx: Optional[int] = None,
- user=Depends(get_admin_user),
- ):
- if url_idx == None:
- if form_data.name in app.state.MODELS:
- url_idx = app.state.MODELS[form_data.name]["urls"][0]
- else:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
- )
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- try:
- r = requests.request(
- method="DELETE",
- url=f"{url}/api/delete",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- )
- r.raise_for_status()
- log.debug(f"r.text: {r.text}")
- return True
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- @app.post("/api/show")
- async def show_model_info(form_data: ModelNameForm, user=Depends(get_current_user)):
- if form_data.name not in app.state.MODELS:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.name),
- )
- url_idx = random.choice(app.state.MODELS[form_data.name]["urls"])
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- try:
- r = requests.request(
- method="POST",
- url=f"{url}/api/show",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- )
- r.raise_for_status()
- return r.json()
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- class GenerateEmbeddingsForm(BaseModel):
- model: str
- prompt: str
- options: Optional[dict] = None
- keep_alive: Optional[Union[int, str]] = None
- @app.post("/api/embeddings")
- @app.post("/api/embeddings/{url_idx}")
- async def generate_embeddings(
- form_data: GenerateEmbeddingsForm,
- url_idx: Optional[int] = None,
- user=Depends(get_current_user),
- ):
- if url_idx == None:
- model = form_data.model
- if ":" not in model:
- model = f"{model}:latest"
- if model in app.state.MODELS:
- url_idx = random.choice(app.state.MODELS[model]["urls"])
- else:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
- )
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- try:
- r = requests.request(
- method="POST",
- url=f"{url}/api/embeddings",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- )
- r.raise_for_status()
- return r.json()
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- def generate_ollama_embeddings(
- form_data: GenerateEmbeddingsForm,
- url_idx: Optional[int] = None,
- ):
- log.info(f"generate_ollama_embeddings {form_data}")
- if url_idx == None:
- model = form_data.model
- if ":" not in model:
- model = f"{model}:latest"
- if model in app.state.MODELS:
- url_idx = random.choice(app.state.MODELS[model]["urls"])
- else:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
- )
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- try:
- r = requests.request(
- method="POST",
- url=f"{url}/api/embeddings",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- )
- r.raise_for_status()
- data = r.json()
- log.info(f"generate_ollama_embeddings {data}")
- if "embedding" in data:
- return data["embedding"]
- else:
- raise "Something went wrong :/"
- except Exception as e:
- log.exception(e)
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise error_detail
- class GenerateCompletionForm(BaseModel):
- model: str
- prompt: str
- images: Optional[List[str]] = None
- format: Optional[str] = None
- options: Optional[dict] = None
- system: Optional[str] = None
- template: Optional[str] = None
- context: Optional[str] = None
- stream: Optional[bool] = True
- raw: Optional[bool] = None
- keep_alive: Optional[Union[int, str]] = None
- @app.post("/api/generate")
- @app.post("/api/generate/{url_idx}")
- async def generate_completion(
- form_data: GenerateCompletionForm,
- url_idx: Optional[int] = None,
- user=Depends(get_current_user),
- ):
- if url_idx == None:
- model = form_data.model
- if ":" not in model:
- model = f"{model}:latest"
- if model in app.state.MODELS:
- url_idx = random.choice(app.state.MODELS[model]["urls"])
- else:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
- )
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- r = None
- def get_request():
- nonlocal form_data
- nonlocal r
- request_id = str(uuid.uuid4())
- try:
- REQUEST_POOL.append(request_id)
- def stream_content():
- try:
- if form_data.stream:
- yield json.dumps({"id": request_id, "done": False}) + "\n"
- for chunk in r.iter_content(chunk_size=8192):
- if request_id in REQUEST_POOL:
- yield chunk
- else:
- log.warning("User: canceled request")
- break
- finally:
- if hasattr(r, "close"):
- r.close()
- if request_id in REQUEST_POOL:
- REQUEST_POOL.remove(request_id)
- r = requests.request(
- method="POST",
- url=f"{url}/api/generate",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- stream=True,
- )
- r.raise_for_status()
- return StreamingResponse(
- stream_content(),
- status_code=r.status_code,
- headers=dict(r.headers),
- )
- except Exception as e:
- raise e
- try:
- return await run_in_threadpool(get_request)
- except Exception as e:
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- class ChatMessage(BaseModel):
- role: str
- content: str
- images: Optional[List[str]] = None
- class GenerateChatCompletionForm(BaseModel):
- model: str
- messages: List[ChatMessage]
- format: Optional[str] = None
- options: Optional[dict] = None
- template: Optional[str] = None
- stream: Optional[bool] = None
- keep_alive: Optional[Union[int, str]] = None
- @app.post("/api/chat")
- @app.post("/api/chat/{url_idx}")
- async def generate_chat_completion(
- form_data: GenerateChatCompletionForm,
- url_idx: Optional[int] = None,
- user=Depends(get_current_user),
- ):
- if url_idx == None:
- model = form_data.model
- if ":" not in model:
- model = f"{model}:latest"
- if model in app.state.MODELS:
- url_idx = random.choice(app.state.MODELS[model]["urls"])
- else:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
- )
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- r = None
- log.debug(
- "form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
- form_data.model_dump_json(exclude_none=True).encode()
- )
- )
- def get_request():
- nonlocal form_data
- nonlocal r
- request_id = str(uuid.uuid4())
- try:
- REQUEST_POOL.append(request_id)
- def stream_content():
- try:
- if form_data.stream:
- yield json.dumps({"id": request_id, "done": False}) + "\n"
- for chunk in r.iter_content(chunk_size=8192):
- if request_id in REQUEST_POOL:
- yield chunk
- else:
- log.warning("User: canceled request")
- break
- finally:
- if hasattr(r, "close"):
- r.close()
- if request_id in REQUEST_POOL:
- REQUEST_POOL.remove(request_id)
- r = requests.request(
- method="POST",
- url=f"{url}/api/chat",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- stream=True,
- )
- r.raise_for_status()
- return StreamingResponse(
- stream_content(),
- status_code=r.status_code,
- headers=dict(r.headers),
- )
- except Exception as e:
- log.exception(e)
- raise e
- try:
- return await run_in_threadpool(get_request)
- except Exception as e:
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- # TODO: we should update this part once Ollama supports other types
- class OpenAIChatMessage(BaseModel):
- role: str
- content: str
- model_config = ConfigDict(extra="allow")
- class OpenAIChatCompletionForm(BaseModel):
- model: str
- messages: List[OpenAIChatMessage]
- model_config = ConfigDict(extra="allow")
- @app.post("/v1/chat/completions")
- @app.post("/v1/chat/completions/{url_idx}")
- async def generate_openai_chat_completion(
- form_data: OpenAIChatCompletionForm,
- url_idx: Optional[int] = None,
- user=Depends(get_current_user),
- ):
- if url_idx == None:
- model = form_data.model
- if ":" not in model:
- model = f"{model}:latest"
- if model in app.state.MODELS:
- url_idx = random.choice(app.state.MODELS[model]["urls"])
- else:
- raise HTTPException(
- status_code=400,
- detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
- )
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- log.info(f"url: {url}")
- r = None
- def get_request():
- nonlocal form_data
- nonlocal r
- request_id = str(uuid.uuid4())
- try:
- REQUEST_POOL.append(request_id)
- def stream_content():
- try:
- if form_data.stream:
- yield json.dumps(
- {"request_id": request_id, "done": False}
- ) + "\n"
- for chunk in r.iter_content(chunk_size=8192):
- if request_id in REQUEST_POOL:
- yield chunk
- else:
- log.warning("User: canceled request")
- break
- finally:
- if hasattr(r, "close"):
- r.close()
- if request_id in REQUEST_POOL:
- REQUEST_POOL.remove(request_id)
- r = requests.request(
- method="POST",
- url=f"{url}/v1/chat/completions",
- data=form_data.model_dump_json(exclude_none=True).encode(),
- stream=True,
- )
- r.raise_for_status()
- return StreamingResponse(
- stream_content(),
- status_code=r.status_code,
- headers=dict(r.headers),
- )
- except Exception as e:
- raise e
- try:
- return await run_in_threadpool(get_request)
- except Exception as e:
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
- class UrlForm(BaseModel):
- url: str
- class UploadBlobForm(BaseModel):
- filename: str
- def parse_huggingface_url(hf_url):
- try:
- # Parse the URL
- parsed_url = urlparse(hf_url)
- # Get the path and split it into components
- path_components = parsed_url.path.split("/")
- # Extract the desired output
- user_repo = "/".join(path_components[1:3])
- model_file = path_components[-1]
- return model_file
- except ValueError:
- return None
- async def download_file_stream(
- ollama_url, file_url, file_path, file_name, chunk_size=1024 * 1024
- ):
- done = False
- if os.path.exists(file_path):
- current_size = os.path.getsize(file_path)
- else:
- current_size = 0
- headers = {"Range": f"bytes={current_size}-"} if current_size > 0 else {}
- timeout = aiohttp.ClientTimeout(total=600) # Set the timeout
- async with aiohttp.ClientSession(timeout=timeout) as session:
- async with session.get(file_url, headers=headers) as response:
- total_size = int(response.headers.get("content-length", 0)) + current_size
- with open(file_path, "ab+") as file:
- async for data in response.content.iter_chunked(chunk_size):
- current_size += len(data)
- file.write(data)
- done = current_size == total_size
- progress = round((current_size / total_size) * 100, 2)
- yield f'data: {{"progress": {progress}, "completed": {current_size}, "total": {total_size}}}\n\n'
- if done:
- file.seek(0)
- hashed = calculate_sha256(file)
- file.seek(0)
- url = f"{ollama_url}/api/blobs/sha256:{hashed}"
- response = requests.post(url, data=file)
- if response.ok:
- res = {
- "done": done,
- "blob": f"sha256:{hashed}",
- "name": file_name,
- }
- os.remove(file_path)
- yield f"data: {json.dumps(res)}\n\n"
- else:
- raise "Ollama: Could not create blob, Please try again."
- # def number_generator():
- # for i in range(1, 101):
- # yield f"data: {i}\n"
- # url = "https://huggingface.co/TheBloke/stablelm-zephyr-3b-GGUF/resolve/main/stablelm-zephyr-3b.Q2_K.gguf"
- @app.post("/models/download")
- @app.post("/models/download/{url_idx}")
- async def download_model(
- form_data: UrlForm,
- url_idx: Optional[int] = None,
- ):
- allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
- if not any(form_data.url.startswith(host) for host in allowed_hosts):
- raise HTTPException(
- status_code=400,
- detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
- )
- if url_idx == None:
- url_idx = 0
- url = app.state.OLLAMA_BASE_URLS[url_idx]
- file_name = parse_huggingface_url(form_data.url)
- if file_name:
- file_path = f"{UPLOAD_DIR}/{file_name}"
- return StreamingResponse(
- download_file_stream(url, form_data.url, file_path, file_name),
- )
- else:
- return None
- @app.post("/models/upload")
- @app.post("/models/upload/{url_idx}")
- def upload_model(file: UploadFile = File(...), url_idx: Optional[int] = None):
- if url_idx == None:
- url_idx = 0
- ollama_url = app.state.OLLAMA_BASE_URLS[url_idx]
- file_path = f"{UPLOAD_DIR}/{file.filename}"
- # Save file in chunks
- with open(file_path, "wb+") as f:
- for chunk in file.file:
- f.write(chunk)
- def file_process_stream():
- nonlocal ollama_url
- total_size = os.path.getsize(file_path)
- chunk_size = 1024 * 1024
- try:
- with open(file_path, "rb") as f:
- total = 0
- done = False
- while not done:
- chunk = f.read(chunk_size)
- if not chunk:
- done = True
- continue
- total += len(chunk)
- progress = round((total / total_size) * 100, 2)
- res = {
- "progress": progress,
- "total": total_size,
- "completed": total,
- }
- yield f"data: {json.dumps(res)}\n\n"
- if done:
- f.seek(0)
- hashed = calculate_sha256(f)
- f.seek(0)
- url = f"{ollama_url}/api/blobs/sha256:{hashed}"
- response = requests.post(url, data=f)
- if response.ok:
- res = {
- "done": done,
- "blob": f"sha256:{hashed}",
- "name": file.filename,
- }
- os.remove(file_path)
- yield f"data: {json.dumps(res)}\n\n"
- else:
- raise Exception(
- "Ollama: Could not create blob, Please try again."
- )
- except Exception as e:
- res = {"error": str(e)}
- yield f"data: {json.dumps(res)}\n\n"
- return StreamingResponse(file_process_stream(), media_type="text/event-stream")
- # async def upload_model(file: UploadFile = File(), url_idx: Optional[int] = None):
- # if url_idx == None:
- # url_idx = 0
- # url = app.state.OLLAMA_BASE_URLS[url_idx]
- # file_location = os.path.join(UPLOAD_DIR, file.filename)
- # total_size = file.size
- # async def file_upload_generator(file):
- # print(file)
- # try:
- # async with aiofiles.open(file_location, "wb") as f:
- # completed_size = 0
- # while True:
- # chunk = await file.read(1024*1024)
- # if not chunk:
- # break
- # await f.write(chunk)
- # completed_size += len(chunk)
- # progress = (completed_size / total_size) * 100
- # print(progress)
- # yield f'data: {json.dumps({"status": "uploading", "percentage": progress, "total": total_size, "completed": completed_size, "done": False})}\n'
- # except Exception as e:
- # print(e)
- # yield f"data: {json.dumps({'status': 'error', 'message': str(e)})}\n"
- # finally:
- # await file.close()
- # print("done")
- # yield f'data: {json.dumps({"status": "completed", "percentage": 100, "total": total_size, "completed": completed_size, "done": True})}\n'
- # return StreamingResponse(
- # file_upload_generator(copy.deepcopy(file)), media_type="text/event-stream"
- # )
- @app.api_route("/{path:path}", methods=["GET", "POST", "PUT", "DELETE"])
- async def deprecated_proxy(path: str, request: Request, user=Depends(get_current_user)):
- url = app.state.OLLAMA_BASE_URLS[0]
- target_url = f"{url}/{path}"
- body = await request.body()
- headers = dict(request.headers)
- if user.role in ["user", "admin"]:
- if path in ["pull", "delete", "push", "copy", "create"]:
- if user.role != "admin":
- raise HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
- )
- else:
- raise HTTPException(
- status_code=status.HTTP_401_UNAUTHORIZED,
- detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
- )
- headers.pop("host", None)
- headers.pop("authorization", None)
- headers.pop("origin", None)
- headers.pop("referer", None)
- r = None
- def get_request():
- nonlocal r
- request_id = str(uuid.uuid4())
- try:
- REQUEST_POOL.append(request_id)
- def stream_content():
- try:
- if path == "generate":
- data = json.loads(body.decode("utf-8"))
- if not ("stream" in data and data["stream"] == False):
- yield json.dumps({"id": request_id, "done": False}) + "\n"
- elif path == "chat":
- yield json.dumps({"id": request_id, "done": False}) + "\n"
- for chunk in r.iter_content(chunk_size=8192):
- if request_id in REQUEST_POOL:
- yield chunk
- else:
- log.warning("User: canceled request")
- break
- finally:
- if hasattr(r, "close"):
- r.close()
- if request_id in REQUEST_POOL:
- REQUEST_POOL.remove(request_id)
- r = requests.request(
- method=request.method,
- url=target_url,
- data=body,
- headers=headers,
- stream=True,
- )
- r.raise_for_status()
- # r.close()
- return StreamingResponse(
- stream_content(),
- status_code=r.status_code,
- headers=dict(r.headers),
- )
- except Exception as e:
- raise e
- try:
- return await run_in_threadpool(get_request)
- except Exception as e:
- error_detail = "Open WebUI: Server Connection Error"
- if r is not None:
- try:
- res = r.json()
- if "error" in res:
- error_detail = f"Ollama: {res['error']}"
- except:
- error_detail = f"Ollama: {e}"
- raise HTTPException(
- status_code=r.status_code if r else 500,
- detail=error_detail,
- )
|