main.py 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import uuid
  15. import inspect
  16. import asyncio
  17. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  18. from fastapi.staticfiles import StaticFiles
  19. from fastapi.responses import JSONResponse
  20. from fastapi import HTTPException
  21. from fastapi.middleware.wsgi import WSGIMiddleware
  22. from fastapi.middleware.cors import CORSMiddleware
  23. from starlette.exceptions import HTTPException as StarletteHTTPException
  24. from starlette.middleware.base import BaseHTTPMiddleware
  25. from starlette.responses import StreamingResponse, Response
  26. from apps.socket.main import app as socket_app
  27. from apps.ollama.main import (
  28. app as ollama_app,
  29. OpenAIChatCompletionForm,
  30. get_all_models as get_ollama_models,
  31. generate_openai_chat_completion as generate_ollama_chat_completion,
  32. )
  33. from apps.openai.main import (
  34. app as openai_app,
  35. get_all_models as get_openai_models,
  36. generate_chat_completion as generate_openai_chat_completion,
  37. )
  38. from apps.audio.main import app as audio_app
  39. from apps.images.main import app as images_app
  40. from apps.rag.main import app as rag_app
  41. from apps.webui.main import app as webui_app, get_pipe_models
  42. from pydantic import BaseModel
  43. from typing import List, Optional
  44. from apps.webui.models.models import Models, ModelModel
  45. from apps.webui.models.tools import Tools
  46. from apps.webui.models.functions import Functions
  47. from apps.webui.utils import load_toolkit_module_by_id, load_function_module_by_id
  48. from utils.utils import (
  49. get_admin_user,
  50. get_verified_user,
  51. get_current_user,
  52. get_http_authorization_cred,
  53. )
  54. from utils.task import (
  55. title_generation_template,
  56. search_query_generation_template,
  57. tools_function_calling_generation_template,
  58. )
  59. from utils.misc import get_last_user_message, add_or_update_system_message
  60. from apps.rag.utils import get_rag_context, rag_template
  61. from config import (
  62. CONFIG_DATA,
  63. WEBUI_NAME,
  64. WEBUI_URL,
  65. WEBUI_AUTH,
  66. ENV,
  67. VERSION,
  68. CHANGELOG,
  69. FRONTEND_BUILD_DIR,
  70. UPLOAD_DIR,
  71. CACHE_DIR,
  72. STATIC_DIR,
  73. ENABLE_OPENAI_API,
  74. ENABLE_OLLAMA_API,
  75. ENABLE_MODEL_FILTER,
  76. MODEL_FILTER_LIST,
  77. GLOBAL_LOG_LEVEL,
  78. SRC_LOG_LEVELS,
  79. WEBHOOK_URL,
  80. ENABLE_ADMIN_EXPORT,
  81. WEBUI_BUILD_HASH,
  82. TASK_MODEL,
  83. TASK_MODEL_EXTERNAL,
  84. TITLE_GENERATION_PROMPT_TEMPLATE,
  85. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  86. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  87. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  88. AppConfig,
  89. )
  90. from constants import ERROR_MESSAGES
  91. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  92. log = logging.getLogger(__name__)
  93. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  94. class SPAStaticFiles(StaticFiles):
  95. async def get_response(self, path: str, scope):
  96. try:
  97. return await super().get_response(path, scope)
  98. except (HTTPException, StarletteHTTPException) as ex:
  99. if ex.status_code == 404:
  100. return await super().get_response("index.html", scope)
  101. else:
  102. raise ex
  103. print(
  104. rf"""
  105. ___ __ __ _ _ _ ___
  106. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  107. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  108. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  109. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  110. |_|
  111. v{VERSION} - building the best open-source AI user interface.
  112. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  113. https://github.com/open-webui/open-webui
  114. """
  115. )
  116. @asynccontextmanager
  117. async def lifespan(app: FastAPI):
  118. yield
  119. app = FastAPI(
  120. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  121. )
  122. app.state.config = AppConfig()
  123. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  124. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  125. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  126. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  127. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  128. app.state.config.TASK_MODEL = TASK_MODEL
  129. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  130. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  131. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  132. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  133. )
  134. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  135. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  136. )
  137. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  138. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  139. )
  140. app.state.MODELS = {}
  141. origins = ["*"]
  142. ##################################
  143. #
  144. # ChatCompletion Middleware
  145. #
  146. ##################################
  147. async def get_function_call_response(
  148. messages, files, tool_id, template, task_model_id, user
  149. ):
  150. tool = Tools.get_tool_by_id(tool_id)
  151. tools_specs = json.dumps(tool.specs, indent=2)
  152. content = tools_function_calling_generation_template(template, tools_specs)
  153. user_message = get_last_user_message(messages)
  154. prompt = (
  155. "History:\n"
  156. + "\n".join(
  157. [
  158. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  159. for message in messages[::-1][:4]
  160. ]
  161. )
  162. + f"\nQuery: {user_message}"
  163. )
  164. print(prompt)
  165. payload = {
  166. "model": task_model_id,
  167. "messages": [
  168. {"role": "system", "content": content},
  169. {"role": "user", "content": f"Query: {prompt}"},
  170. ],
  171. "stream": False,
  172. }
  173. try:
  174. payload = filter_pipeline(payload, user)
  175. except Exception as e:
  176. raise e
  177. model = app.state.MODELS[task_model_id]
  178. response = None
  179. try:
  180. if model["owned_by"] == "ollama":
  181. response = await generate_ollama_chat_completion(payload, user=user)
  182. else:
  183. response = await generate_openai_chat_completion(payload, user=user)
  184. content = None
  185. if hasattr(response, "body_iterator"):
  186. async for chunk in response.body_iterator:
  187. data = json.loads(chunk.decode("utf-8"))
  188. content = data["choices"][0]["message"]["content"]
  189. # Cleanup any remaining background tasks if necessary
  190. if response.background is not None:
  191. await response.background()
  192. else:
  193. content = response["choices"][0]["message"]["content"]
  194. # Parse the function response
  195. if content is not None:
  196. print(f"content: {content}")
  197. result = json.loads(content)
  198. print(result)
  199. # Call the function
  200. if "name" in result:
  201. if tool_id in webui_app.state.TOOLS:
  202. toolkit_module = webui_app.state.TOOLS[tool_id]
  203. else:
  204. toolkit_module = load_toolkit_module_by_id(tool_id)
  205. webui_app.state.TOOLS[tool_id] = toolkit_module
  206. file_handler = False
  207. # check if toolkit_module has file_handler self variable
  208. if hasattr(toolkit_module, "file_handler"):
  209. file_handler = True
  210. print("file_handler: ", file_handler)
  211. function = getattr(toolkit_module, result["name"])
  212. function_result = None
  213. try:
  214. # Get the signature of the function
  215. sig = inspect.signature(function)
  216. params = result["parameters"]
  217. if "__user__" in sig.parameters:
  218. # Call the function with the '__user__' parameter included
  219. params = {
  220. **params,
  221. "__user__": {
  222. "id": user.id,
  223. "email": user.email,
  224. "name": user.name,
  225. "role": user.role,
  226. },
  227. }
  228. if "__messages__" in sig.parameters:
  229. # Call the function with the '__messages__' parameter included
  230. params = {
  231. **params,
  232. "__messages__": messages,
  233. }
  234. if "__files__" in sig.parameters:
  235. # Call the function with the '__files__' parameter included
  236. params = {
  237. **params,
  238. "__files__": files,
  239. }
  240. if "__model__" in sig.parameters:
  241. # Call the function with the '__model__' parameter included
  242. params = {
  243. **params,
  244. "__model__": model,
  245. }
  246. if "__id__" in sig.parameters:
  247. # Call the function with the '__id__' parameter included
  248. params = {
  249. **params,
  250. "__id__": tool_id,
  251. }
  252. function_result = function(**params)
  253. except Exception as e:
  254. print(e)
  255. # Add the function result to the system prompt
  256. if function_result is not None:
  257. return function_result, file_handler
  258. except Exception as e:
  259. print(f"Error: {e}")
  260. return None, False
  261. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  262. async def dispatch(self, request: Request, call_next):
  263. data_items = []
  264. if request.method == "POST" and any(
  265. endpoint in request.url.path
  266. for endpoint in ["/ollama/api/chat", "/chat/completions"]
  267. ):
  268. log.debug(f"request.url.path: {request.url.path}")
  269. # Read the original request body
  270. body = await request.body()
  271. body_str = body.decode("utf-8")
  272. data = json.loads(body_str) if body_str else {}
  273. user = get_current_user(
  274. request,
  275. get_http_authorization_cred(request.headers.get("Authorization")),
  276. )
  277. # Flag to skip RAG completions if file_handler is present in tools/functions
  278. skip_files = False
  279. model_id = data["model"]
  280. if model_id not in app.state.MODELS:
  281. raise HTTPException(
  282. status_code=status.HTTP_404_NOT_FOUND,
  283. detail="Model not found",
  284. )
  285. model = app.state.MODELS[model_id]
  286. # Check if the model has any filters
  287. for filter_id in model["info"]["meta"].get("filterIds", []):
  288. filter = Functions.get_function_by_id(filter_id)
  289. if filter:
  290. if filter_id in webui_app.state.FUNCTIONS:
  291. function_module = webui_app.state.FUNCTIONS[filter_id]
  292. else:
  293. function_module, function_type = load_function_module_by_id(
  294. filter_id
  295. )
  296. webui_app.state.FUNCTIONS[filter_id] = function_module
  297. # Check if the function has a file_handler variable
  298. if getattr(function_module, "file_handler"):
  299. skip_files = True
  300. try:
  301. if hasattr(function_module, "inlet"):
  302. data = function_module.inlet(
  303. data,
  304. {
  305. "id": user.id,
  306. "email": user.email,
  307. "name": user.name,
  308. "role": user.role,
  309. },
  310. )
  311. except Exception as e:
  312. print(f"Error: {e}")
  313. return JSONResponse(
  314. status_code=status.HTTP_400_BAD_REQUEST,
  315. content={"detail": str(e)},
  316. )
  317. # Set the task model
  318. task_model_id = data["model"]
  319. # Check if the user has a custom task model and use that model
  320. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  321. if (
  322. app.state.config.TASK_MODEL
  323. and app.state.config.TASK_MODEL in app.state.MODELS
  324. ):
  325. task_model_id = app.state.config.TASK_MODEL
  326. else:
  327. if (
  328. app.state.config.TASK_MODEL_EXTERNAL
  329. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  330. ):
  331. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  332. prompt = get_last_user_message(data["messages"])
  333. context = ""
  334. # If tool_ids field is present, call the functions
  335. if "tool_ids" in data:
  336. print(data["tool_ids"])
  337. for tool_id in data["tool_ids"]:
  338. print(tool_id)
  339. try:
  340. response, file_handler = await get_function_call_response(
  341. messages=data["messages"],
  342. files=data.get("files", []),
  343. tool_id=tool_id,
  344. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  345. task_model_id=task_model_id,
  346. user=user,
  347. )
  348. print(file_handler)
  349. if isinstance(response, str):
  350. context += ("\n" if context != "" else "") + response
  351. if file_handler:
  352. skip_files = True
  353. except Exception as e:
  354. print(f"Error: {e}")
  355. del data["tool_ids"]
  356. print(f"tool_context: {context}")
  357. # If files field is present, generate RAG completions
  358. # If skip_files is True, skip the RAG completions
  359. if "files" in data:
  360. if not skip_files:
  361. data = {**data}
  362. rag_context, citations = get_rag_context(
  363. files=data["files"],
  364. messages=data["messages"],
  365. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  366. k=rag_app.state.config.TOP_K,
  367. reranking_function=rag_app.state.sentence_transformer_rf,
  368. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  369. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  370. )
  371. if rag_context:
  372. context += ("\n" if context != "" else "") + rag_context
  373. log.debug(f"rag_context: {rag_context}, citations: {citations}")
  374. if citations and data.get("citations"):
  375. data_items.append({"citations": citations})
  376. del data["files"]
  377. if data.get("citations"):
  378. del data["citations"]
  379. if context != "":
  380. system_prompt = rag_template(
  381. rag_app.state.config.RAG_TEMPLATE, context, prompt
  382. )
  383. print(system_prompt)
  384. data["messages"] = add_or_update_system_message(
  385. system_prompt, data["messages"]
  386. )
  387. modified_body_bytes = json.dumps(data).encode("utf-8")
  388. # Replace the request body with the modified one
  389. request._body = modified_body_bytes
  390. # Set custom header to ensure content-length matches new body length
  391. request.headers.__dict__["_list"] = [
  392. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  393. *[
  394. (k, v)
  395. for k, v in request.headers.raw
  396. if k.lower() != b"content-length"
  397. ],
  398. ]
  399. response = await call_next(request)
  400. if isinstance(response, StreamingResponse):
  401. # If it's a streaming response, inject it as SSE event or NDJSON line
  402. content_type = response.headers.get("Content-Type")
  403. if "text/event-stream" in content_type:
  404. return StreamingResponse(
  405. self.openai_stream_wrapper(response.body_iterator, data_items),
  406. )
  407. if "application/x-ndjson" in content_type:
  408. return StreamingResponse(
  409. self.ollama_stream_wrapper(response.body_iterator, data_items),
  410. )
  411. else:
  412. return response
  413. # If it's not a chat completion request, just pass it through
  414. response = await call_next(request)
  415. return response
  416. async def _receive(self, body: bytes):
  417. return {"type": "http.request", "body": body, "more_body": False}
  418. async def openai_stream_wrapper(self, original_generator, data_items):
  419. for item in data_items:
  420. yield f"data: {json.dumps(item)}\n\n"
  421. async for data in original_generator:
  422. yield data
  423. async def ollama_stream_wrapper(self, original_generator, data_items):
  424. for item in data_items:
  425. yield f"{json.dumps(item)}\n"
  426. async for data in original_generator:
  427. yield data
  428. app.add_middleware(ChatCompletionMiddleware)
  429. ##################################
  430. #
  431. # Pipeline Middleware
  432. #
  433. ##################################
  434. def filter_pipeline(payload, user):
  435. user = {"id": user.id, "email": user.email, "name": user.name, "role": user.role}
  436. model_id = payload["model"]
  437. filters = [
  438. model
  439. for model in app.state.MODELS.values()
  440. if "pipeline" in model
  441. and "type" in model["pipeline"]
  442. and model["pipeline"]["type"] == "filter"
  443. and (
  444. model["pipeline"]["pipelines"] == ["*"]
  445. or any(
  446. model_id == target_model_id
  447. for target_model_id in model["pipeline"]["pipelines"]
  448. )
  449. )
  450. ]
  451. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  452. model = app.state.MODELS[model_id]
  453. if "pipeline" in model:
  454. sorted_filters.append(model)
  455. for filter in sorted_filters:
  456. r = None
  457. try:
  458. urlIdx = filter["urlIdx"]
  459. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  460. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  461. if key != "":
  462. headers = {"Authorization": f"Bearer {key}"}
  463. r = requests.post(
  464. f"{url}/{filter['id']}/filter/inlet",
  465. headers=headers,
  466. json={
  467. "user": user,
  468. "body": payload,
  469. },
  470. )
  471. r.raise_for_status()
  472. payload = r.json()
  473. except Exception as e:
  474. # Handle connection error here
  475. print(f"Connection error: {e}")
  476. if r is not None:
  477. try:
  478. res = r.json()
  479. except:
  480. pass
  481. if "detail" in res:
  482. raise Exception(r.status_code, res["detail"])
  483. else:
  484. pass
  485. if "pipeline" not in app.state.MODELS[model_id]:
  486. if "chat_id" in payload:
  487. del payload["chat_id"]
  488. if "title" in payload:
  489. del payload["title"]
  490. if "task" in payload:
  491. del payload["task"]
  492. return payload
  493. class PipelineMiddleware(BaseHTTPMiddleware):
  494. async def dispatch(self, request: Request, call_next):
  495. if request.method == "POST" and (
  496. "/ollama/api/chat" in request.url.path
  497. or "/chat/completions" in request.url.path
  498. ):
  499. log.debug(f"request.url.path: {request.url.path}")
  500. # Read the original request body
  501. body = await request.body()
  502. # Decode body to string
  503. body_str = body.decode("utf-8")
  504. # Parse string to JSON
  505. data = json.loads(body_str) if body_str else {}
  506. user = get_current_user(
  507. request,
  508. get_http_authorization_cred(request.headers.get("Authorization")),
  509. )
  510. try:
  511. data = filter_pipeline(data, user)
  512. except Exception as e:
  513. return JSONResponse(
  514. status_code=e.args[0],
  515. content={"detail": e.args[1]},
  516. )
  517. modified_body_bytes = json.dumps(data).encode("utf-8")
  518. # Replace the request body with the modified one
  519. request._body = modified_body_bytes
  520. # Set custom header to ensure content-length matches new body length
  521. request.headers.__dict__["_list"] = [
  522. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  523. *[
  524. (k, v)
  525. for k, v in request.headers.raw
  526. if k.lower() != b"content-length"
  527. ],
  528. ]
  529. response = await call_next(request)
  530. return response
  531. async def _receive(self, body: bytes):
  532. return {"type": "http.request", "body": body, "more_body": False}
  533. app.add_middleware(PipelineMiddleware)
  534. app.add_middleware(
  535. CORSMiddleware,
  536. allow_origins=origins,
  537. allow_credentials=True,
  538. allow_methods=["*"],
  539. allow_headers=["*"],
  540. )
  541. @app.middleware("http")
  542. async def check_url(request: Request, call_next):
  543. if len(app.state.MODELS) == 0:
  544. await get_all_models()
  545. else:
  546. pass
  547. start_time = int(time.time())
  548. response = await call_next(request)
  549. process_time = int(time.time()) - start_time
  550. response.headers["X-Process-Time"] = str(process_time)
  551. return response
  552. @app.middleware("http")
  553. async def update_embedding_function(request: Request, call_next):
  554. response = await call_next(request)
  555. if "/embedding/update" in request.url.path:
  556. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  557. return response
  558. app.mount("/ws", socket_app)
  559. app.mount("/ollama", ollama_app)
  560. app.mount("/openai", openai_app)
  561. app.mount("/images/api/v1", images_app)
  562. app.mount("/audio/api/v1", audio_app)
  563. app.mount("/rag/api/v1", rag_app)
  564. app.mount("/api/v1", webui_app)
  565. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  566. async def get_all_models():
  567. pipe_models = []
  568. openai_models = []
  569. ollama_models = []
  570. pipe_models = await get_pipe_models()
  571. if app.state.config.ENABLE_OPENAI_API:
  572. openai_models = await get_openai_models()
  573. openai_models = openai_models["data"]
  574. if app.state.config.ENABLE_OLLAMA_API:
  575. ollama_models = await get_ollama_models()
  576. ollama_models = [
  577. {
  578. "id": model["model"],
  579. "name": model["name"],
  580. "object": "model",
  581. "created": int(time.time()),
  582. "owned_by": "ollama",
  583. "ollama": model,
  584. }
  585. for model in ollama_models["models"]
  586. ]
  587. models = pipe_models + openai_models + ollama_models
  588. custom_models = Models.get_all_models()
  589. for custom_model in custom_models:
  590. if custom_model.base_model_id == None:
  591. for model in models:
  592. if (
  593. custom_model.id == model["id"]
  594. or custom_model.id == model["id"].split(":")[0]
  595. ):
  596. model["name"] = custom_model.name
  597. model["info"] = custom_model.model_dump()
  598. else:
  599. owned_by = "openai"
  600. for model in models:
  601. if (
  602. custom_model.base_model_id == model["id"]
  603. or custom_model.base_model_id == model["id"].split(":")[0]
  604. ):
  605. owned_by = model["owned_by"]
  606. break
  607. models.append(
  608. {
  609. "id": custom_model.id,
  610. "name": custom_model.name,
  611. "object": "model",
  612. "created": custom_model.created_at,
  613. "owned_by": owned_by,
  614. "info": custom_model.model_dump(),
  615. "preset": True,
  616. }
  617. )
  618. app.state.MODELS = {model["id"]: model for model in models}
  619. webui_app.state.MODELS = app.state.MODELS
  620. return models
  621. @app.get("/api/models")
  622. async def get_models(user=Depends(get_verified_user)):
  623. models = await get_all_models()
  624. # Filter out filter pipelines
  625. models = [
  626. model
  627. for model in models
  628. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  629. ]
  630. if app.state.config.ENABLE_MODEL_FILTER:
  631. if user.role == "user":
  632. models = list(
  633. filter(
  634. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  635. models,
  636. )
  637. )
  638. return {"data": models}
  639. return {"data": models}
  640. @app.post("/api/chat/completions")
  641. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  642. model_id = form_data["model"]
  643. if model_id not in app.state.MODELS:
  644. raise HTTPException(
  645. status_code=status.HTTP_404_NOT_FOUND,
  646. detail="Model not found",
  647. )
  648. model = app.state.MODELS[model_id]
  649. print(model)
  650. if model.get('pipe') == True:
  651. print('hi')
  652. if model["owned_by"] == "ollama":
  653. return await generate_ollama_chat_completion(form_data, user=user)
  654. else:
  655. return await generate_openai_chat_completion(form_data, user=user)
  656. @app.post("/api/chat/completed")
  657. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  658. data = form_data
  659. model_id = data["model"]
  660. if model_id not in app.state.MODELS:
  661. raise HTTPException(
  662. status_code=status.HTTP_404_NOT_FOUND,
  663. detail="Model not found",
  664. )
  665. model = app.state.MODELS[model_id]
  666. filters = [
  667. model
  668. for model in app.state.MODELS.values()
  669. if "pipeline" in model
  670. and "type" in model["pipeline"]
  671. and model["pipeline"]["type"] == "filter"
  672. and (
  673. model["pipeline"]["pipelines"] == ["*"]
  674. or any(
  675. model_id == target_model_id
  676. for target_model_id in model["pipeline"]["pipelines"]
  677. )
  678. )
  679. ]
  680. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  681. if "pipeline" in model:
  682. sorted_filters = [model] + sorted_filters
  683. for filter in sorted_filters:
  684. r = None
  685. try:
  686. urlIdx = filter["urlIdx"]
  687. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  688. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  689. if key != "":
  690. headers = {"Authorization": f"Bearer {key}"}
  691. r = requests.post(
  692. f"{url}/{filter['id']}/filter/outlet",
  693. headers=headers,
  694. json={
  695. "user": {"id": user.id, "name": user.name, "role": user.role},
  696. "body": data,
  697. },
  698. )
  699. r.raise_for_status()
  700. data = r.json()
  701. except Exception as e:
  702. # Handle connection error here
  703. print(f"Connection error: {e}")
  704. if r is not None:
  705. try:
  706. res = r.json()
  707. if "detail" in res:
  708. return JSONResponse(
  709. status_code=r.status_code,
  710. content=res,
  711. )
  712. except:
  713. pass
  714. else:
  715. pass
  716. # Check if the model has any filters
  717. for filter_id in model["info"]["meta"].get("filterIds", []):
  718. filter = Functions.get_function_by_id(filter_id)
  719. if filter:
  720. if filter_id in webui_app.state.FUNCTIONS:
  721. function_module = webui_app.state.FUNCTIONS[filter_id]
  722. else:
  723. function_module, function_type = load_function_module_by_id(filter_id)
  724. webui_app.state.FUNCTIONS[filter_id] = function_module
  725. try:
  726. if hasattr(function_module, "outlet"):
  727. data = function_module.outlet(
  728. data,
  729. {
  730. "id": user.id,
  731. "email": user.email,
  732. "name": user.name,
  733. "role": user.role,
  734. },
  735. )
  736. except Exception as e:
  737. print(f"Error: {e}")
  738. return JSONResponse(
  739. status_code=status.HTTP_400_BAD_REQUEST,
  740. content={"detail": str(e)},
  741. )
  742. return data
  743. ##################################
  744. #
  745. # Task Endpoints
  746. #
  747. ##################################
  748. # TODO: Refactor task API endpoints below into a separate file
  749. @app.get("/api/task/config")
  750. async def get_task_config(user=Depends(get_verified_user)):
  751. return {
  752. "TASK_MODEL": app.state.config.TASK_MODEL,
  753. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  754. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  755. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  756. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  757. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  758. }
  759. class TaskConfigForm(BaseModel):
  760. TASK_MODEL: Optional[str]
  761. TASK_MODEL_EXTERNAL: Optional[str]
  762. TITLE_GENERATION_PROMPT_TEMPLATE: str
  763. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  764. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  765. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  766. @app.post("/api/task/config/update")
  767. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  768. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  769. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  770. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  771. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  772. )
  773. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  774. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  775. )
  776. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  777. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  778. )
  779. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  780. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  781. )
  782. return {
  783. "TASK_MODEL": app.state.config.TASK_MODEL,
  784. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  785. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  786. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  787. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  788. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  789. }
  790. @app.post("/api/task/title/completions")
  791. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  792. print("generate_title")
  793. model_id = form_data["model"]
  794. if model_id not in app.state.MODELS:
  795. raise HTTPException(
  796. status_code=status.HTTP_404_NOT_FOUND,
  797. detail="Model not found",
  798. )
  799. # Check if the user has a custom task model
  800. # If the user has a custom task model, use that model
  801. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  802. if app.state.config.TASK_MODEL:
  803. task_model_id = app.state.config.TASK_MODEL
  804. if task_model_id in app.state.MODELS:
  805. model_id = task_model_id
  806. else:
  807. if app.state.config.TASK_MODEL_EXTERNAL:
  808. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  809. if task_model_id in app.state.MODELS:
  810. model_id = task_model_id
  811. print(model_id)
  812. model = app.state.MODELS[model_id]
  813. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  814. content = title_generation_template(
  815. template,
  816. form_data["prompt"],
  817. {
  818. "name": user.name,
  819. "location": user.info.get("location") if user.info else None,
  820. },
  821. )
  822. payload = {
  823. "model": model_id,
  824. "messages": [{"role": "user", "content": content}],
  825. "stream": False,
  826. "max_tokens": 50,
  827. "chat_id": form_data.get("chat_id", None),
  828. "title": True,
  829. }
  830. log.debug(payload)
  831. try:
  832. payload = filter_pipeline(payload, user)
  833. except Exception as e:
  834. return JSONResponse(
  835. status_code=e.args[0],
  836. content={"detail": e.args[1]},
  837. )
  838. if model["owned_by"] == "ollama":
  839. return await generate_ollama_chat_completion(payload, user=user)
  840. else:
  841. return await generate_openai_chat_completion(payload, user=user)
  842. @app.post("/api/task/query/completions")
  843. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  844. print("generate_search_query")
  845. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  846. raise HTTPException(
  847. status_code=status.HTTP_400_BAD_REQUEST,
  848. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  849. )
  850. model_id = form_data["model"]
  851. if model_id not in app.state.MODELS:
  852. raise HTTPException(
  853. status_code=status.HTTP_404_NOT_FOUND,
  854. detail="Model not found",
  855. )
  856. # Check if the user has a custom task model
  857. # If the user has a custom task model, use that model
  858. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  859. if app.state.config.TASK_MODEL:
  860. task_model_id = app.state.config.TASK_MODEL
  861. if task_model_id in app.state.MODELS:
  862. model_id = task_model_id
  863. else:
  864. if app.state.config.TASK_MODEL_EXTERNAL:
  865. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  866. if task_model_id in app.state.MODELS:
  867. model_id = task_model_id
  868. print(model_id)
  869. model = app.state.MODELS[model_id]
  870. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  871. content = search_query_generation_template(
  872. template, form_data["prompt"], {"name": user.name}
  873. )
  874. payload = {
  875. "model": model_id,
  876. "messages": [{"role": "user", "content": content}],
  877. "stream": False,
  878. "max_tokens": 30,
  879. "task": True,
  880. }
  881. print(payload)
  882. try:
  883. payload = filter_pipeline(payload, user)
  884. except Exception as e:
  885. return JSONResponse(
  886. status_code=e.args[0],
  887. content={"detail": e.args[1]},
  888. )
  889. if model["owned_by"] == "ollama":
  890. return await generate_ollama_chat_completion(payload, user=user)
  891. else:
  892. return await generate_openai_chat_completion(payload, user=user)
  893. @app.post("/api/task/emoji/completions")
  894. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  895. print("generate_emoji")
  896. model_id = form_data["model"]
  897. if model_id not in app.state.MODELS:
  898. raise HTTPException(
  899. status_code=status.HTTP_404_NOT_FOUND,
  900. detail="Model not found",
  901. )
  902. # Check if the user has a custom task model
  903. # If the user has a custom task model, use that model
  904. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  905. if app.state.config.TASK_MODEL:
  906. task_model_id = app.state.config.TASK_MODEL
  907. if task_model_id in app.state.MODELS:
  908. model_id = task_model_id
  909. else:
  910. if app.state.config.TASK_MODEL_EXTERNAL:
  911. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  912. if task_model_id in app.state.MODELS:
  913. model_id = task_model_id
  914. print(model_id)
  915. model = app.state.MODELS[model_id]
  916. template = '''
  917. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  918. Message: """{{prompt}}"""
  919. '''
  920. content = title_generation_template(
  921. template,
  922. form_data["prompt"],
  923. {
  924. "name": user.name,
  925. "location": user.info.get("location") if user.info else None,
  926. },
  927. )
  928. payload = {
  929. "model": model_id,
  930. "messages": [{"role": "user", "content": content}],
  931. "stream": False,
  932. "max_tokens": 4,
  933. "chat_id": form_data.get("chat_id", None),
  934. "task": True,
  935. }
  936. log.debug(payload)
  937. try:
  938. payload = filter_pipeline(payload, user)
  939. except Exception as e:
  940. return JSONResponse(
  941. status_code=e.args[0],
  942. content={"detail": e.args[1]},
  943. )
  944. if model["owned_by"] == "ollama":
  945. return await generate_ollama_chat_completion(payload, user=user)
  946. else:
  947. return await generate_openai_chat_completion(payload, user=user)
  948. @app.post("/api/task/tools/completions")
  949. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  950. print("get_tools_function_calling")
  951. model_id = form_data["model"]
  952. if model_id not in app.state.MODELS:
  953. raise HTTPException(
  954. status_code=status.HTTP_404_NOT_FOUND,
  955. detail="Model not found",
  956. )
  957. # Check if the user has a custom task model
  958. # If the user has a custom task model, use that model
  959. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  960. if app.state.config.TASK_MODEL:
  961. task_model_id = app.state.config.TASK_MODEL
  962. if task_model_id in app.state.MODELS:
  963. model_id = task_model_id
  964. else:
  965. if app.state.config.TASK_MODEL_EXTERNAL:
  966. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  967. if task_model_id in app.state.MODELS:
  968. model_id = task_model_id
  969. print(model_id)
  970. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  971. try:
  972. context, file_handler = await get_function_call_response(
  973. form_data["messages"],
  974. form_data.get("files", []),
  975. form_data["tool_id"],
  976. template,
  977. model_id,
  978. user,
  979. )
  980. return context
  981. except Exception as e:
  982. return JSONResponse(
  983. status_code=e.args[0],
  984. content={"detail": e.args[1]},
  985. )
  986. ##################################
  987. #
  988. # Pipelines Endpoints
  989. #
  990. ##################################
  991. # TODO: Refactor pipelines API endpoints below into a separate file
  992. @app.get("/api/pipelines/list")
  993. async def get_pipelines_list(user=Depends(get_admin_user)):
  994. responses = await get_openai_models(raw=True)
  995. print(responses)
  996. urlIdxs = [
  997. idx
  998. for idx, response in enumerate(responses)
  999. if response != None and "pipelines" in response
  1000. ]
  1001. return {
  1002. "data": [
  1003. {
  1004. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  1005. "idx": urlIdx,
  1006. }
  1007. for urlIdx in urlIdxs
  1008. ]
  1009. }
  1010. @app.post("/api/pipelines/upload")
  1011. async def upload_pipeline(
  1012. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  1013. ):
  1014. print("upload_pipeline", urlIdx, file.filename)
  1015. # Check if the uploaded file is a python file
  1016. if not file.filename.endswith(".py"):
  1017. raise HTTPException(
  1018. status_code=status.HTTP_400_BAD_REQUEST,
  1019. detail="Only Python (.py) files are allowed.",
  1020. )
  1021. upload_folder = f"{CACHE_DIR}/pipelines"
  1022. os.makedirs(upload_folder, exist_ok=True)
  1023. file_path = os.path.join(upload_folder, file.filename)
  1024. try:
  1025. # Save the uploaded file
  1026. with open(file_path, "wb") as buffer:
  1027. shutil.copyfileobj(file.file, buffer)
  1028. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1029. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1030. headers = {"Authorization": f"Bearer {key}"}
  1031. with open(file_path, "rb") as f:
  1032. files = {"file": f}
  1033. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  1034. r.raise_for_status()
  1035. data = r.json()
  1036. return {**data}
  1037. except Exception as e:
  1038. # Handle connection error here
  1039. print(f"Connection error: {e}")
  1040. detail = "Pipeline not found"
  1041. if r is not None:
  1042. try:
  1043. res = r.json()
  1044. if "detail" in res:
  1045. detail = res["detail"]
  1046. except:
  1047. pass
  1048. raise HTTPException(
  1049. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1050. detail=detail,
  1051. )
  1052. finally:
  1053. # Ensure the file is deleted after the upload is completed or on failure
  1054. if os.path.exists(file_path):
  1055. os.remove(file_path)
  1056. class AddPipelineForm(BaseModel):
  1057. url: str
  1058. urlIdx: int
  1059. @app.post("/api/pipelines/add")
  1060. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  1061. r = None
  1062. try:
  1063. urlIdx = form_data.urlIdx
  1064. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1065. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1066. headers = {"Authorization": f"Bearer {key}"}
  1067. r = requests.post(
  1068. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  1069. )
  1070. r.raise_for_status()
  1071. data = r.json()
  1072. return {**data}
  1073. except Exception as e:
  1074. # Handle connection error here
  1075. print(f"Connection error: {e}")
  1076. detail = "Pipeline not found"
  1077. if r is not None:
  1078. try:
  1079. res = r.json()
  1080. if "detail" in res:
  1081. detail = res["detail"]
  1082. except:
  1083. pass
  1084. raise HTTPException(
  1085. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1086. detail=detail,
  1087. )
  1088. class DeletePipelineForm(BaseModel):
  1089. id: str
  1090. urlIdx: int
  1091. @app.delete("/api/pipelines/delete")
  1092. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1093. r = None
  1094. try:
  1095. urlIdx = form_data.urlIdx
  1096. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1097. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1098. headers = {"Authorization": f"Bearer {key}"}
  1099. r = requests.delete(
  1100. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1101. )
  1102. r.raise_for_status()
  1103. data = r.json()
  1104. return {**data}
  1105. except Exception as e:
  1106. # Handle connection error here
  1107. print(f"Connection error: {e}")
  1108. detail = "Pipeline not found"
  1109. if r is not None:
  1110. try:
  1111. res = r.json()
  1112. if "detail" in res:
  1113. detail = res["detail"]
  1114. except:
  1115. pass
  1116. raise HTTPException(
  1117. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1118. detail=detail,
  1119. )
  1120. @app.get("/api/pipelines")
  1121. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1122. r = None
  1123. try:
  1124. urlIdx
  1125. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1126. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1127. headers = {"Authorization": f"Bearer {key}"}
  1128. r = requests.get(f"{url}/pipelines", headers=headers)
  1129. r.raise_for_status()
  1130. data = r.json()
  1131. return {**data}
  1132. except Exception as e:
  1133. # Handle connection error here
  1134. print(f"Connection error: {e}")
  1135. detail = "Pipeline not found"
  1136. if r is not None:
  1137. try:
  1138. res = r.json()
  1139. if "detail" in res:
  1140. detail = res["detail"]
  1141. except:
  1142. pass
  1143. raise HTTPException(
  1144. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1145. detail=detail,
  1146. )
  1147. @app.get("/api/pipelines/{pipeline_id}/valves")
  1148. async def get_pipeline_valves(
  1149. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1150. ):
  1151. models = await get_all_models()
  1152. r = None
  1153. try:
  1154. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1155. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1156. headers = {"Authorization": f"Bearer {key}"}
  1157. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1158. r.raise_for_status()
  1159. data = r.json()
  1160. return {**data}
  1161. except Exception as e:
  1162. # Handle connection error here
  1163. print(f"Connection error: {e}")
  1164. detail = "Pipeline not found"
  1165. if r is not None:
  1166. try:
  1167. res = r.json()
  1168. if "detail" in res:
  1169. detail = res["detail"]
  1170. except:
  1171. pass
  1172. raise HTTPException(
  1173. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1174. detail=detail,
  1175. )
  1176. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1177. async def get_pipeline_valves_spec(
  1178. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1179. ):
  1180. models = await get_all_models()
  1181. r = None
  1182. try:
  1183. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1184. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1185. headers = {"Authorization": f"Bearer {key}"}
  1186. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1187. r.raise_for_status()
  1188. data = r.json()
  1189. return {**data}
  1190. except Exception as e:
  1191. # Handle connection error here
  1192. print(f"Connection error: {e}")
  1193. detail = "Pipeline not found"
  1194. if r is not None:
  1195. try:
  1196. res = r.json()
  1197. if "detail" in res:
  1198. detail = res["detail"]
  1199. except:
  1200. pass
  1201. raise HTTPException(
  1202. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1203. detail=detail,
  1204. )
  1205. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1206. async def update_pipeline_valves(
  1207. urlIdx: Optional[int],
  1208. pipeline_id: str,
  1209. form_data: dict,
  1210. user=Depends(get_admin_user),
  1211. ):
  1212. models = await get_all_models()
  1213. r = None
  1214. try:
  1215. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1216. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1217. headers = {"Authorization": f"Bearer {key}"}
  1218. r = requests.post(
  1219. f"{url}/{pipeline_id}/valves/update",
  1220. headers=headers,
  1221. json={**form_data},
  1222. )
  1223. r.raise_for_status()
  1224. data = r.json()
  1225. return {**data}
  1226. except Exception as e:
  1227. # Handle connection error here
  1228. print(f"Connection error: {e}")
  1229. detail = "Pipeline not found"
  1230. if r is not None:
  1231. try:
  1232. res = r.json()
  1233. if "detail" in res:
  1234. detail = res["detail"]
  1235. except:
  1236. pass
  1237. raise HTTPException(
  1238. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1239. detail=detail,
  1240. )
  1241. ##################################
  1242. #
  1243. # Config Endpoints
  1244. #
  1245. ##################################
  1246. @app.get("/api/config")
  1247. async def get_app_config():
  1248. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  1249. default_locale = "en-US"
  1250. if "ui" in CONFIG_DATA:
  1251. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  1252. # The Rest of the Function Now Uses the Variables Defined Above
  1253. return {
  1254. "status": True,
  1255. "name": WEBUI_NAME,
  1256. "version": VERSION,
  1257. "default_locale": default_locale,
  1258. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1259. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1260. "features": {
  1261. "auth": WEBUI_AUTH,
  1262. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1263. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1264. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1265. "enable_image_generation": images_app.state.config.ENABLED,
  1266. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1267. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1268. },
  1269. "audio": {
  1270. "tts": {
  1271. "engine": audio_app.state.config.TTS_ENGINE,
  1272. "voice": audio_app.state.config.TTS_VOICE,
  1273. },
  1274. "stt": {
  1275. "engine": audio_app.state.config.STT_ENGINE,
  1276. },
  1277. },
  1278. }
  1279. @app.get("/api/config/model/filter")
  1280. async def get_model_filter_config(user=Depends(get_admin_user)):
  1281. return {
  1282. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1283. "models": app.state.config.MODEL_FILTER_LIST,
  1284. }
  1285. class ModelFilterConfigForm(BaseModel):
  1286. enabled: bool
  1287. models: List[str]
  1288. @app.post("/api/config/model/filter")
  1289. async def update_model_filter_config(
  1290. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1291. ):
  1292. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1293. app.state.config.MODEL_FILTER_LIST = form_data.models
  1294. return {
  1295. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1296. "models": app.state.config.MODEL_FILTER_LIST,
  1297. }
  1298. # TODO: webhook endpoint should be under config endpoints
  1299. @app.get("/api/webhook")
  1300. async def get_webhook_url(user=Depends(get_admin_user)):
  1301. return {
  1302. "url": app.state.config.WEBHOOK_URL,
  1303. }
  1304. class UrlForm(BaseModel):
  1305. url: str
  1306. @app.post("/api/webhook")
  1307. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1308. app.state.config.WEBHOOK_URL = form_data.url
  1309. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1310. return {"url": app.state.config.WEBHOOK_URL}
  1311. @app.get("/api/version")
  1312. async def get_app_config():
  1313. return {
  1314. "version": VERSION,
  1315. }
  1316. @app.get("/api/changelog")
  1317. async def get_app_changelog():
  1318. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1319. @app.get("/api/version/updates")
  1320. async def get_app_latest_release_version():
  1321. try:
  1322. async with aiohttp.ClientSession(trust_env=True) as session:
  1323. async with session.get(
  1324. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1325. ) as response:
  1326. response.raise_for_status()
  1327. data = await response.json()
  1328. latest_version = data["tag_name"]
  1329. return {"current": VERSION, "latest": latest_version[1:]}
  1330. except aiohttp.ClientError as e:
  1331. raise HTTPException(
  1332. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1333. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1334. )
  1335. @app.get("/manifest.json")
  1336. async def get_manifest_json():
  1337. return {
  1338. "name": WEBUI_NAME,
  1339. "short_name": WEBUI_NAME,
  1340. "start_url": "/",
  1341. "display": "standalone",
  1342. "background_color": "#343541",
  1343. "theme_color": "#343541",
  1344. "orientation": "portrait-primary",
  1345. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1346. }
  1347. @app.get("/opensearch.xml")
  1348. async def get_opensearch_xml():
  1349. xml_content = rf"""
  1350. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1351. <ShortName>{WEBUI_NAME}</ShortName>
  1352. <Description>Search {WEBUI_NAME}</Description>
  1353. <InputEncoding>UTF-8</InputEncoding>
  1354. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1355. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1356. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1357. </OpenSearchDescription>
  1358. """
  1359. return Response(content=xml_content, media_type="application/xml")
  1360. @app.get("/health")
  1361. async def healthcheck():
  1362. return {"status": True}
  1363. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1364. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1365. if os.path.exists(FRONTEND_BUILD_DIR):
  1366. mimetypes.add_type("text/javascript", ".js")
  1367. app.mount(
  1368. "/",
  1369. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1370. name="spa-static-files",
  1371. )
  1372. else:
  1373. log.warning(
  1374. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1375. )