main.py 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import uuid
  15. import inspect
  16. import asyncio
  17. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  18. from fastapi.staticfiles import StaticFiles
  19. from fastapi.responses import JSONResponse
  20. from fastapi import HTTPException
  21. from fastapi.middleware.wsgi import WSGIMiddleware
  22. from fastapi.middleware.cors import CORSMiddleware
  23. from starlette.exceptions import HTTPException as StarletteHTTPException
  24. from starlette.middleware.base import BaseHTTPMiddleware
  25. from starlette.responses import StreamingResponse, Response
  26. from apps.socket.main import app as socket_app
  27. from apps.ollama.main import (
  28. app as ollama_app,
  29. OpenAIChatCompletionForm,
  30. get_all_models as get_ollama_models,
  31. generate_openai_chat_completion as generate_ollama_chat_completion,
  32. )
  33. from apps.openai.main import (
  34. app as openai_app,
  35. get_all_models as get_openai_models,
  36. generate_chat_completion as generate_openai_chat_completion,
  37. )
  38. from apps.audio.main import app as audio_app
  39. from apps.images.main import app as images_app
  40. from apps.rag.main import app as rag_app
  41. from apps.webui.main import app as webui_app
  42. from pydantic import BaseModel
  43. from typing import List, Optional
  44. from apps.webui.models.models import Models, ModelModel
  45. from apps.webui.models.tools import Tools
  46. from apps.webui.utils import load_toolkit_module_by_id
  47. from utils.utils import (
  48. get_admin_user,
  49. get_verified_user,
  50. get_current_user,
  51. get_http_authorization_cred,
  52. )
  53. from utils.task import (
  54. title_generation_template,
  55. search_query_generation_template,
  56. tools_function_calling_generation_template,
  57. )
  58. from utils.misc import get_last_user_message, add_or_update_system_message
  59. from apps.rag.utils import get_rag_context, rag_template
  60. from config import (
  61. CONFIG_DATA,
  62. WEBUI_NAME,
  63. WEBUI_URL,
  64. WEBUI_AUTH,
  65. ENV,
  66. VERSION,
  67. CHANGELOG,
  68. FRONTEND_BUILD_DIR,
  69. UPLOAD_DIR,
  70. CACHE_DIR,
  71. STATIC_DIR,
  72. ENABLE_OPENAI_API,
  73. ENABLE_OLLAMA_API,
  74. ENABLE_MODEL_FILTER,
  75. MODEL_FILTER_LIST,
  76. GLOBAL_LOG_LEVEL,
  77. SRC_LOG_LEVELS,
  78. WEBHOOK_URL,
  79. ENABLE_ADMIN_EXPORT,
  80. WEBUI_BUILD_HASH,
  81. TASK_MODEL,
  82. TASK_MODEL_EXTERNAL,
  83. TITLE_GENERATION_PROMPT_TEMPLATE,
  84. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  85. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  86. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  87. AppConfig,
  88. )
  89. from constants import ERROR_MESSAGES
  90. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  91. log = logging.getLogger(__name__)
  92. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  93. class SPAStaticFiles(StaticFiles):
  94. async def get_response(self, path: str, scope):
  95. try:
  96. return await super().get_response(path, scope)
  97. except (HTTPException, StarletteHTTPException) as ex:
  98. if ex.status_code == 404:
  99. return await super().get_response("index.html", scope)
  100. else:
  101. raise ex
  102. print(
  103. rf"""
  104. ___ __ __ _ _ _ ___
  105. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  106. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  107. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  108. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  109. |_|
  110. v{VERSION} - building the best open-source AI user interface.
  111. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  112. https://github.com/open-webui/open-webui
  113. """
  114. )
  115. @asynccontextmanager
  116. async def lifespan(app: FastAPI):
  117. yield
  118. app = FastAPI(
  119. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  120. )
  121. app.state.config = AppConfig()
  122. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  123. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  124. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  125. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  126. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  127. app.state.config.TASK_MODEL = TASK_MODEL
  128. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  129. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  130. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  131. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  132. )
  133. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  134. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  135. )
  136. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  137. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  138. )
  139. app.state.MODELS = {}
  140. origins = ["*"]
  141. ##################################
  142. #
  143. # ChatCompletion Middleware
  144. #
  145. ##################################
  146. async def get_function_call_response(
  147. messages, files, tool_id, template, task_model_id, user
  148. ):
  149. tool = Tools.get_tool_by_id(tool_id)
  150. tools_specs = json.dumps(tool.specs, indent=2)
  151. content = tools_function_calling_generation_template(template, tools_specs)
  152. user_message = get_last_user_message(messages)
  153. prompt = (
  154. "History:\n"
  155. + "\n".join(
  156. [
  157. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  158. for message in messages[::-1][:4]
  159. ]
  160. )
  161. + f"\nQuery: {user_message}"
  162. )
  163. print(prompt)
  164. payload = {
  165. "model": task_model_id,
  166. "messages": [
  167. {"role": "system", "content": content},
  168. {"role": "user", "content": f"Query: {prompt}"},
  169. ],
  170. "stream": False,
  171. }
  172. try:
  173. payload = filter_pipeline(payload, user)
  174. except Exception as e:
  175. raise e
  176. model = app.state.MODELS[task_model_id]
  177. response = None
  178. try:
  179. if model["owned_by"] == "ollama":
  180. response = await generate_ollama_chat_completion(payload, user=user)
  181. else:
  182. response = await generate_openai_chat_completion(payload, user=user)
  183. content = None
  184. if hasattr(response, "body_iterator"):
  185. async for chunk in response.body_iterator:
  186. data = json.loads(chunk.decode("utf-8"))
  187. content = data["choices"][0]["message"]["content"]
  188. # Cleanup any remaining background tasks if necessary
  189. if response.background is not None:
  190. await response.background()
  191. else:
  192. content = response["choices"][0]["message"]["content"]
  193. # Parse the function response
  194. if content is not None:
  195. print(f"content: {content}")
  196. result = json.loads(content)
  197. print(result)
  198. # Call the function
  199. if "name" in result:
  200. if tool_id in webui_app.state.TOOLS:
  201. toolkit_module = webui_app.state.TOOLS[tool_id]
  202. else:
  203. toolkit_module = load_toolkit_module_by_id(tool_id)
  204. webui_app.state.TOOLS[tool_id] = toolkit_module
  205. file_handler = False
  206. # check if toolkit_module has file_handler self variable
  207. if hasattr(toolkit_module, "file_handler"):
  208. file_handler = True
  209. print("file_handler: ", file_handler)
  210. function = getattr(toolkit_module, result["name"])
  211. function_result = None
  212. try:
  213. # Get the signature of the function
  214. sig = inspect.signature(function)
  215. params = result["parameters"]
  216. if "__user__" in sig.parameters:
  217. # Call the function with the '__user__' parameter included
  218. params = {
  219. **params,
  220. "__user__": {
  221. "id": user.id,
  222. "email": user.email,
  223. "name": user.name,
  224. "role": user.role,
  225. },
  226. }
  227. if "__messages__" in sig.parameters:
  228. # Call the function with the '__messages__' parameter included
  229. params = {
  230. **params,
  231. "__messages__": messages,
  232. }
  233. if "__files__" in sig.parameters:
  234. # Call the function with the '__files__' parameter included
  235. params = {
  236. **params,
  237. "__files__": files,
  238. }
  239. if "__model__" in sig.parameters:
  240. # Call the function with the '__model__' parameter included
  241. params = {
  242. **params,
  243. "__model__": model,
  244. }
  245. if "__id__" in sig.parameters:
  246. # Call the function with the '__id__' parameter included
  247. params = {
  248. **params,
  249. "__id__": tool_id,
  250. }
  251. function_result = function(**params)
  252. except Exception as e:
  253. print(e)
  254. # Add the function result to the system prompt
  255. if function_result is not None:
  256. return function_result, file_handler
  257. except Exception as e:
  258. print(f"Error: {e}")
  259. return None, False
  260. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  261. async def dispatch(self, request: Request, call_next):
  262. data_items = []
  263. if request.method == "POST" and (
  264. "/ollama/api/chat" in request.url.path
  265. or "/chat/completions" in request.url.path
  266. ):
  267. log.debug(f"request.url.path: {request.url.path}")
  268. # Read the original request body
  269. body = await request.body()
  270. body_str = body.decode("utf-8")
  271. data = json.loads(body_str) if body_str else {}
  272. model_id = data["model"]
  273. user = get_current_user(
  274. request,
  275. get_http_authorization_cred(request.headers.get("Authorization")),
  276. )
  277. # Set the task model
  278. task_model_id = model_id
  279. if task_model_id not in app.state.MODELS:
  280. raise HTTPException(
  281. status_code=status.HTTP_404_NOT_FOUND,
  282. detail="Model not found",
  283. )
  284. # Check if the user has a custom task model
  285. # If the user has a custom task model, use that model
  286. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  287. if (
  288. app.state.config.TASK_MODEL
  289. and app.state.config.TASK_MODEL in app.state.MODELS
  290. ):
  291. task_model_id = app.state.config.TASK_MODEL
  292. else:
  293. if (
  294. app.state.config.TASK_MODEL_EXTERNAL
  295. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  296. ):
  297. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  298. skip_files = False
  299. prompt = get_last_user_message(data["messages"])
  300. context = ""
  301. # If tool_ids field is present, call the functions
  302. if "tool_ids" in data:
  303. print(data["tool_ids"])
  304. for tool_id in data["tool_ids"]:
  305. print(tool_id)
  306. try:
  307. response, file_handler = await get_function_call_response(
  308. messages=data["messages"],
  309. files=data.get("files", []),
  310. tool_id=tool_id,
  311. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  312. task_model_id=task_model_id,
  313. user=user,
  314. )
  315. print(file_handler)
  316. if isinstance(response, str):
  317. context += ("\n" if context != "" else "") + response
  318. if file_handler:
  319. skip_files = True
  320. except Exception as e:
  321. print(f"Error: {e}")
  322. del data["tool_ids"]
  323. print(f"tool_context: {context}")
  324. # If files field is present, generate RAG completions
  325. # If skip_files is True, skip the RAG completions
  326. if "files" in data:
  327. if not skip_files:
  328. data = {**data}
  329. rag_context, citations = get_rag_context(
  330. files=data["files"],
  331. messages=data["messages"],
  332. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  333. k=rag_app.state.config.TOP_K,
  334. reranking_function=rag_app.state.sentence_transformer_rf,
  335. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  336. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  337. )
  338. if rag_context:
  339. context += ("\n" if context != "" else "") + rag_context
  340. log.debug(f"rag_context: {rag_context}, citations: {citations}")
  341. if citations:
  342. data_items.append({"citations": citations})
  343. del data["files"]
  344. if context != "":
  345. system_prompt = rag_template(
  346. rag_app.state.config.RAG_TEMPLATE, context, prompt
  347. )
  348. print(system_prompt)
  349. data["messages"] = add_or_update_system_message(
  350. system_prompt, data["messages"]
  351. )
  352. modified_body_bytes = json.dumps(data).encode("utf-8")
  353. # Replace the request body with the modified one
  354. request._body = modified_body_bytes
  355. # Set custom header to ensure content-length matches new body length
  356. request.headers.__dict__["_list"] = [
  357. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  358. *[
  359. (k, v)
  360. for k, v in request.headers.raw
  361. if k.lower() != b"content-length"
  362. ],
  363. ]
  364. response = await call_next(request)
  365. # If there are data_items to inject into the response
  366. if len(data_items) > 0:
  367. if isinstance(response, StreamingResponse):
  368. # If it's a streaming response, inject it as SSE event or NDJSON line
  369. content_type = response.headers.get("Content-Type")
  370. if "text/event-stream" in content_type:
  371. return StreamingResponse(
  372. self.openai_stream_wrapper(response.body_iterator, data_items),
  373. )
  374. if "application/x-ndjson" in content_type:
  375. return StreamingResponse(
  376. self.ollama_stream_wrapper(response.body_iterator, data_items),
  377. )
  378. return response
  379. async def _receive(self, body: bytes):
  380. return {"type": "http.request", "body": body, "more_body": False}
  381. async def openai_stream_wrapper(self, original_generator, data_items):
  382. for item in data_items:
  383. yield f"data: {json.dumps(item)}\n\n"
  384. async for data in original_generator:
  385. yield data
  386. async def ollama_stream_wrapper(self, original_generator, data_items):
  387. for item in data_items:
  388. yield f"{json.dumps(item)}\n"
  389. async for data in original_generator:
  390. yield data
  391. app.add_middleware(ChatCompletionMiddleware)
  392. ##################################
  393. #
  394. # Pipeline Middleware
  395. #
  396. ##################################
  397. def filter_pipeline(payload, user):
  398. user = {"id": user.id, "email": user.email, "name": user.name, "role": user.role}
  399. model_id = payload["model"]
  400. filters = [
  401. model
  402. for model in app.state.MODELS.values()
  403. if "pipeline" in model
  404. and "type" in model["pipeline"]
  405. and model["pipeline"]["type"] == "filter"
  406. and (
  407. model["pipeline"]["pipelines"] == ["*"]
  408. or any(
  409. model_id == target_model_id
  410. for target_model_id in model["pipeline"]["pipelines"]
  411. )
  412. )
  413. ]
  414. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  415. model = app.state.MODELS[model_id]
  416. if "pipeline" in model:
  417. sorted_filters.append(model)
  418. for filter in sorted_filters:
  419. r = None
  420. try:
  421. urlIdx = filter["urlIdx"]
  422. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  423. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  424. if key != "":
  425. headers = {"Authorization": f"Bearer {key}"}
  426. r = requests.post(
  427. f"{url}/{filter['id']}/filter/inlet",
  428. headers=headers,
  429. json={
  430. "user": user,
  431. "body": payload,
  432. },
  433. )
  434. r.raise_for_status()
  435. payload = r.json()
  436. except Exception as e:
  437. # Handle connection error here
  438. print(f"Connection error: {e}")
  439. if r is not None:
  440. try:
  441. res = r.json()
  442. except:
  443. pass
  444. if "detail" in res:
  445. raise Exception(r.status_code, res["detail"])
  446. else:
  447. pass
  448. if "pipeline" not in app.state.MODELS[model_id]:
  449. if "chat_id" in payload:
  450. del payload["chat_id"]
  451. if "title" in payload:
  452. del payload["title"]
  453. if "task" in payload:
  454. del payload["task"]
  455. return payload
  456. class PipelineMiddleware(BaseHTTPMiddleware):
  457. async def dispatch(self, request: Request, call_next):
  458. if request.method == "POST" and (
  459. "/ollama/api/chat" in request.url.path
  460. or "/chat/completions" in request.url.path
  461. ):
  462. log.debug(f"request.url.path: {request.url.path}")
  463. # Read the original request body
  464. body = await request.body()
  465. # Decode body to string
  466. body_str = body.decode("utf-8")
  467. # Parse string to JSON
  468. data = json.loads(body_str) if body_str else {}
  469. user = get_current_user(
  470. request,
  471. get_http_authorization_cred(request.headers.get("Authorization")),
  472. )
  473. try:
  474. data = filter_pipeline(data, user)
  475. except Exception as e:
  476. return JSONResponse(
  477. status_code=e.args[0],
  478. content={"detail": e.args[1]},
  479. )
  480. modified_body_bytes = json.dumps(data).encode("utf-8")
  481. # Replace the request body with the modified one
  482. request._body = modified_body_bytes
  483. # Set custom header to ensure content-length matches new body length
  484. request.headers.__dict__["_list"] = [
  485. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  486. *[
  487. (k, v)
  488. for k, v in request.headers.raw
  489. if k.lower() != b"content-length"
  490. ],
  491. ]
  492. response = await call_next(request)
  493. return response
  494. async def _receive(self, body: bytes):
  495. return {"type": "http.request", "body": body, "more_body": False}
  496. app.add_middleware(PipelineMiddleware)
  497. app.add_middleware(
  498. CORSMiddleware,
  499. allow_origins=origins,
  500. allow_credentials=True,
  501. allow_methods=["*"],
  502. allow_headers=["*"],
  503. )
  504. @app.middleware("http")
  505. async def check_url(request: Request, call_next):
  506. if len(app.state.MODELS) == 0:
  507. await get_all_models()
  508. else:
  509. pass
  510. start_time = int(time.time())
  511. response = await call_next(request)
  512. process_time = int(time.time()) - start_time
  513. response.headers["X-Process-Time"] = str(process_time)
  514. return response
  515. @app.middleware("http")
  516. async def update_embedding_function(request: Request, call_next):
  517. response = await call_next(request)
  518. if "/embedding/update" in request.url.path:
  519. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  520. return response
  521. app.mount("/ws", socket_app)
  522. app.mount("/ollama", ollama_app)
  523. app.mount("/openai", openai_app)
  524. app.mount("/images/api/v1", images_app)
  525. app.mount("/audio/api/v1", audio_app)
  526. app.mount("/rag/api/v1", rag_app)
  527. app.mount("/api/v1", webui_app)
  528. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  529. async def get_all_models():
  530. openai_models = []
  531. ollama_models = []
  532. if app.state.config.ENABLE_OPENAI_API:
  533. openai_models = await get_openai_models()
  534. openai_models = openai_models["data"]
  535. if app.state.config.ENABLE_OLLAMA_API:
  536. ollama_models = await get_ollama_models()
  537. ollama_models = [
  538. {
  539. "id": model["model"],
  540. "name": model["name"],
  541. "object": "model",
  542. "created": int(time.time()),
  543. "owned_by": "ollama",
  544. "ollama": model,
  545. }
  546. for model in ollama_models["models"]
  547. ]
  548. models = openai_models + ollama_models
  549. custom_models = Models.get_all_models()
  550. for custom_model in custom_models:
  551. if custom_model.base_model_id == None:
  552. for model in models:
  553. if (
  554. custom_model.id == model["id"]
  555. or custom_model.id == model["id"].split(":")[0]
  556. ):
  557. model["name"] = custom_model.name
  558. model["info"] = custom_model.model_dump()
  559. else:
  560. owned_by = "openai"
  561. for model in models:
  562. if (
  563. custom_model.base_model_id == model["id"]
  564. or custom_model.base_model_id == model["id"].split(":")[0]
  565. ):
  566. owned_by = model["owned_by"]
  567. break
  568. models.append(
  569. {
  570. "id": custom_model.id,
  571. "name": custom_model.name,
  572. "object": "model",
  573. "created": custom_model.created_at,
  574. "owned_by": owned_by,
  575. "info": custom_model.model_dump(),
  576. "preset": True,
  577. }
  578. )
  579. app.state.MODELS = {model["id"]: model for model in models}
  580. webui_app.state.MODELS = app.state.MODELS
  581. return models
  582. @app.get("/api/models")
  583. async def get_models(user=Depends(get_verified_user)):
  584. models = await get_all_models()
  585. # Filter out filter pipelines
  586. models = [
  587. model
  588. for model in models
  589. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  590. ]
  591. if app.state.config.ENABLE_MODEL_FILTER:
  592. if user.role == "user":
  593. models = list(
  594. filter(
  595. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  596. models,
  597. )
  598. )
  599. return {"data": models}
  600. return {"data": models}
  601. @app.post("/api/chat/completions")
  602. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  603. model_id = form_data["model"]
  604. if model_id not in app.state.MODELS:
  605. raise HTTPException(
  606. status_code=status.HTTP_404_NOT_FOUND,
  607. detail="Model not found",
  608. )
  609. model = app.state.MODELS[model_id]
  610. print(model)
  611. if model["owned_by"] == "ollama":
  612. return await generate_ollama_chat_completion(form_data, user=user)
  613. else:
  614. return await generate_openai_chat_completion(form_data, user=user)
  615. @app.post("/api/chat/completed")
  616. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  617. data = form_data
  618. model_id = data["model"]
  619. filters = [
  620. model
  621. for model in app.state.MODELS.values()
  622. if "pipeline" in model
  623. and "type" in model["pipeline"]
  624. and model["pipeline"]["type"] == "filter"
  625. and (
  626. model["pipeline"]["pipelines"] == ["*"]
  627. or any(
  628. model_id == target_model_id
  629. for target_model_id in model["pipeline"]["pipelines"]
  630. )
  631. )
  632. ]
  633. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  634. print(model_id)
  635. if model_id in app.state.MODELS:
  636. model = app.state.MODELS[model_id]
  637. if "pipeline" in model:
  638. sorted_filters = [model] + sorted_filters
  639. for filter in sorted_filters:
  640. r = None
  641. try:
  642. urlIdx = filter["urlIdx"]
  643. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  644. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  645. if key != "":
  646. headers = {"Authorization": f"Bearer {key}"}
  647. r = requests.post(
  648. f"{url}/{filter['id']}/filter/outlet",
  649. headers=headers,
  650. json={
  651. "user": {"id": user.id, "name": user.name, "role": user.role},
  652. "body": data,
  653. },
  654. )
  655. r.raise_for_status()
  656. data = r.json()
  657. except Exception as e:
  658. # Handle connection error here
  659. print(f"Connection error: {e}")
  660. if r is not None:
  661. try:
  662. res = r.json()
  663. if "detail" in res:
  664. return JSONResponse(
  665. status_code=r.status_code,
  666. content=res,
  667. )
  668. except:
  669. pass
  670. else:
  671. pass
  672. return data
  673. ##################################
  674. #
  675. # Task Endpoints
  676. #
  677. ##################################
  678. # TODO: Refactor task API endpoints below into a separate file
  679. @app.get("/api/task/config")
  680. async def get_task_config(user=Depends(get_verified_user)):
  681. return {
  682. "TASK_MODEL": app.state.config.TASK_MODEL,
  683. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  684. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  685. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  686. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  687. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  688. }
  689. class TaskConfigForm(BaseModel):
  690. TASK_MODEL: Optional[str]
  691. TASK_MODEL_EXTERNAL: Optional[str]
  692. TITLE_GENERATION_PROMPT_TEMPLATE: str
  693. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  694. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  695. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  696. @app.post("/api/task/config/update")
  697. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  698. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  699. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  700. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  701. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  702. )
  703. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  704. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  705. )
  706. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  707. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  708. )
  709. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  710. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  711. )
  712. return {
  713. "TASK_MODEL": app.state.config.TASK_MODEL,
  714. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  715. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  716. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  717. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  718. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  719. }
  720. @app.post("/api/task/title/completions")
  721. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  722. print("generate_title")
  723. model_id = form_data["model"]
  724. if model_id not in app.state.MODELS:
  725. raise HTTPException(
  726. status_code=status.HTTP_404_NOT_FOUND,
  727. detail="Model not found",
  728. )
  729. # Check if the user has a custom task model
  730. # If the user has a custom task model, use that model
  731. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  732. if app.state.config.TASK_MODEL:
  733. task_model_id = app.state.config.TASK_MODEL
  734. if task_model_id in app.state.MODELS:
  735. model_id = task_model_id
  736. else:
  737. if app.state.config.TASK_MODEL_EXTERNAL:
  738. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  739. if task_model_id in app.state.MODELS:
  740. model_id = task_model_id
  741. print(model_id)
  742. model = app.state.MODELS[model_id]
  743. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  744. content = title_generation_template(
  745. template,
  746. form_data["prompt"],
  747. {
  748. "name": user.name,
  749. "location": user.info.get("location") if user.info else None,
  750. },
  751. )
  752. payload = {
  753. "model": model_id,
  754. "messages": [{"role": "user", "content": content}],
  755. "stream": False,
  756. "max_tokens": 50,
  757. "chat_id": form_data.get("chat_id", None),
  758. "title": True,
  759. }
  760. log.debug(payload)
  761. try:
  762. payload = filter_pipeline(payload, user)
  763. except Exception as e:
  764. return JSONResponse(
  765. status_code=e.args[0],
  766. content={"detail": e.args[1]},
  767. )
  768. if model["owned_by"] == "ollama":
  769. return await generate_ollama_chat_completion(payload, user=user)
  770. else:
  771. return await generate_openai_chat_completion(payload, user=user)
  772. @app.post("/api/task/query/completions")
  773. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  774. print("generate_search_query")
  775. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  776. raise HTTPException(
  777. status_code=status.HTTP_400_BAD_REQUEST,
  778. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  779. )
  780. model_id = form_data["model"]
  781. if model_id not in app.state.MODELS:
  782. raise HTTPException(
  783. status_code=status.HTTP_404_NOT_FOUND,
  784. detail="Model not found",
  785. )
  786. # Check if the user has a custom task model
  787. # If the user has a custom task model, use that model
  788. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  789. if app.state.config.TASK_MODEL:
  790. task_model_id = app.state.config.TASK_MODEL
  791. if task_model_id in app.state.MODELS:
  792. model_id = task_model_id
  793. else:
  794. if app.state.config.TASK_MODEL_EXTERNAL:
  795. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  796. if task_model_id in app.state.MODELS:
  797. model_id = task_model_id
  798. print(model_id)
  799. model = app.state.MODELS[model_id]
  800. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  801. content = search_query_generation_template(
  802. template, form_data["prompt"], {"name": user.name}
  803. )
  804. payload = {
  805. "model": model_id,
  806. "messages": [{"role": "user", "content": content}],
  807. "stream": False,
  808. "max_tokens": 30,
  809. "task": True,
  810. }
  811. print(payload)
  812. try:
  813. payload = filter_pipeline(payload, user)
  814. except Exception as e:
  815. return JSONResponse(
  816. status_code=e.args[0],
  817. content={"detail": e.args[1]},
  818. )
  819. if model["owned_by"] == "ollama":
  820. return await generate_ollama_chat_completion(payload, user=user)
  821. else:
  822. return await generate_openai_chat_completion(payload, user=user)
  823. @app.post("/api/task/emoji/completions")
  824. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  825. print("generate_emoji")
  826. model_id = form_data["model"]
  827. if model_id not in app.state.MODELS:
  828. raise HTTPException(
  829. status_code=status.HTTP_404_NOT_FOUND,
  830. detail="Model not found",
  831. )
  832. # Check if the user has a custom task model
  833. # If the user has a custom task model, use that model
  834. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  835. if app.state.config.TASK_MODEL:
  836. task_model_id = app.state.config.TASK_MODEL
  837. if task_model_id in app.state.MODELS:
  838. model_id = task_model_id
  839. else:
  840. if app.state.config.TASK_MODEL_EXTERNAL:
  841. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  842. if task_model_id in app.state.MODELS:
  843. model_id = task_model_id
  844. print(model_id)
  845. model = app.state.MODELS[model_id]
  846. template = '''
  847. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  848. Message: """{{prompt}}"""
  849. '''
  850. content = title_generation_template(
  851. template,
  852. form_data["prompt"],
  853. {
  854. "name": user.name,
  855. "location": user.info.get("location") if user.info else None,
  856. },
  857. )
  858. payload = {
  859. "model": model_id,
  860. "messages": [{"role": "user", "content": content}],
  861. "stream": False,
  862. "max_tokens": 4,
  863. "chat_id": form_data.get("chat_id", None),
  864. "task": True,
  865. }
  866. log.debug(payload)
  867. try:
  868. payload = filter_pipeline(payload, user)
  869. except Exception as e:
  870. return JSONResponse(
  871. status_code=e.args[0],
  872. content={"detail": e.args[1]},
  873. )
  874. if model["owned_by"] == "ollama":
  875. return await generate_ollama_chat_completion(payload, user=user)
  876. else:
  877. return await generate_openai_chat_completion(payload, user=user)
  878. @app.post("/api/task/tools/completions")
  879. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  880. print("get_tools_function_calling")
  881. model_id = form_data["model"]
  882. if model_id not in app.state.MODELS:
  883. raise HTTPException(
  884. status_code=status.HTTP_404_NOT_FOUND,
  885. detail="Model not found",
  886. )
  887. # Check if the user has a custom task model
  888. # If the user has a custom task model, use that model
  889. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  890. if app.state.config.TASK_MODEL:
  891. task_model_id = app.state.config.TASK_MODEL
  892. if task_model_id in app.state.MODELS:
  893. model_id = task_model_id
  894. else:
  895. if app.state.config.TASK_MODEL_EXTERNAL:
  896. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  897. if task_model_id in app.state.MODELS:
  898. model_id = task_model_id
  899. print(model_id)
  900. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  901. try:
  902. context, file_handler = await get_function_call_response(
  903. form_data["messages"],
  904. form_data.get("files", []),
  905. form_data["tool_id"],
  906. template,
  907. model_id,
  908. user,
  909. )
  910. return context
  911. except Exception as e:
  912. return JSONResponse(
  913. status_code=e.args[0],
  914. content={"detail": e.args[1]},
  915. )
  916. ##################################
  917. #
  918. # Pipelines Endpoints
  919. #
  920. ##################################
  921. # TODO: Refactor pipelines API endpoints below into a separate file
  922. @app.get("/api/pipelines/list")
  923. async def get_pipelines_list(user=Depends(get_admin_user)):
  924. responses = await get_openai_models(raw=True)
  925. print(responses)
  926. urlIdxs = [
  927. idx
  928. for idx, response in enumerate(responses)
  929. if response != None and "pipelines" in response
  930. ]
  931. return {
  932. "data": [
  933. {
  934. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  935. "idx": urlIdx,
  936. }
  937. for urlIdx in urlIdxs
  938. ]
  939. }
  940. @app.post("/api/pipelines/upload")
  941. async def upload_pipeline(
  942. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  943. ):
  944. print("upload_pipeline", urlIdx, file.filename)
  945. # Check if the uploaded file is a python file
  946. if not file.filename.endswith(".py"):
  947. raise HTTPException(
  948. status_code=status.HTTP_400_BAD_REQUEST,
  949. detail="Only Python (.py) files are allowed.",
  950. )
  951. upload_folder = f"{CACHE_DIR}/pipelines"
  952. os.makedirs(upload_folder, exist_ok=True)
  953. file_path = os.path.join(upload_folder, file.filename)
  954. try:
  955. # Save the uploaded file
  956. with open(file_path, "wb") as buffer:
  957. shutil.copyfileobj(file.file, buffer)
  958. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  959. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  960. headers = {"Authorization": f"Bearer {key}"}
  961. with open(file_path, "rb") as f:
  962. files = {"file": f}
  963. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  964. r.raise_for_status()
  965. data = r.json()
  966. return {**data}
  967. except Exception as e:
  968. # Handle connection error here
  969. print(f"Connection error: {e}")
  970. detail = "Pipeline not found"
  971. if r is not None:
  972. try:
  973. res = r.json()
  974. if "detail" in res:
  975. detail = res["detail"]
  976. except:
  977. pass
  978. raise HTTPException(
  979. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  980. detail=detail,
  981. )
  982. finally:
  983. # Ensure the file is deleted after the upload is completed or on failure
  984. if os.path.exists(file_path):
  985. os.remove(file_path)
  986. class AddPipelineForm(BaseModel):
  987. url: str
  988. urlIdx: int
  989. @app.post("/api/pipelines/add")
  990. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  991. r = None
  992. try:
  993. urlIdx = form_data.urlIdx
  994. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  995. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  996. headers = {"Authorization": f"Bearer {key}"}
  997. r = requests.post(
  998. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  999. )
  1000. r.raise_for_status()
  1001. data = r.json()
  1002. return {**data}
  1003. except Exception as e:
  1004. # Handle connection error here
  1005. print(f"Connection error: {e}")
  1006. detail = "Pipeline not found"
  1007. if r is not None:
  1008. try:
  1009. res = r.json()
  1010. if "detail" in res:
  1011. detail = res["detail"]
  1012. except:
  1013. pass
  1014. raise HTTPException(
  1015. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1016. detail=detail,
  1017. )
  1018. class DeletePipelineForm(BaseModel):
  1019. id: str
  1020. urlIdx: int
  1021. @app.delete("/api/pipelines/delete")
  1022. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1023. r = None
  1024. try:
  1025. urlIdx = form_data.urlIdx
  1026. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1027. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1028. headers = {"Authorization": f"Bearer {key}"}
  1029. r = requests.delete(
  1030. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1031. )
  1032. r.raise_for_status()
  1033. data = r.json()
  1034. return {**data}
  1035. except Exception as e:
  1036. # Handle connection error here
  1037. print(f"Connection error: {e}")
  1038. detail = "Pipeline not found"
  1039. if r is not None:
  1040. try:
  1041. res = r.json()
  1042. if "detail" in res:
  1043. detail = res["detail"]
  1044. except:
  1045. pass
  1046. raise HTTPException(
  1047. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1048. detail=detail,
  1049. )
  1050. @app.get("/api/pipelines")
  1051. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1052. r = None
  1053. try:
  1054. urlIdx
  1055. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1056. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1057. headers = {"Authorization": f"Bearer {key}"}
  1058. r = requests.get(f"{url}/pipelines", headers=headers)
  1059. r.raise_for_status()
  1060. data = r.json()
  1061. return {**data}
  1062. except Exception as e:
  1063. # Handle connection error here
  1064. print(f"Connection error: {e}")
  1065. detail = "Pipeline not found"
  1066. if r is not None:
  1067. try:
  1068. res = r.json()
  1069. if "detail" in res:
  1070. detail = res["detail"]
  1071. except:
  1072. pass
  1073. raise HTTPException(
  1074. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1075. detail=detail,
  1076. )
  1077. @app.get("/api/pipelines/{pipeline_id}/valves")
  1078. async def get_pipeline_valves(
  1079. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1080. ):
  1081. models = await get_all_models()
  1082. r = None
  1083. try:
  1084. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1085. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1086. headers = {"Authorization": f"Bearer {key}"}
  1087. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1088. r.raise_for_status()
  1089. data = r.json()
  1090. return {**data}
  1091. except Exception as e:
  1092. # Handle connection error here
  1093. print(f"Connection error: {e}")
  1094. detail = "Pipeline not found"
  1095. if r is not None:
  1096. try:
  1097. res = r.json()
  1098. if "detail" in res:
  1099. detail = res["detail"]
  1100. except:
  1101. pass
  1102. raise HTTPException(
  1103. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1104. detail=detail,
  1105. )
  1106. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1107. async def get_pipeline_valves_spec(
  1108. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1109. ):
  1110. models = await get_all_models()
  1111. r = None
  1112. try:
  1113. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1114. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1115. headers = {"Authorization": f"Bearer {key}"}
  1116. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1117. r.raise_for_status()
  1118. data = r.json()
  1119. return {**data}
  1120. except Exception as e:
  1121. # Handle connection error here
  1122. print(f"Connection error: {e}")
  1123. detail = "Pipeline not found"
  1124. if r is not None:
  1125. try:
  1126. res = r.json()
  1127. if "detail" in res:
  1128. detail = res["detail"]
  1129. except:
  1130. pass
  1131. raise HTTPException(
  1132. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1133. detail=detail,
  1134. )
  1135. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1136. async def update_pipeline_valves(
  1137. urlIdx: Optional[int],
  1138. pipeline_id: str,
  1139. form_data: dict,
  1140. user=Depends(get_admin_user),
  1141. ):
  1142. models = await get_all_models()
  1143. r = None
  1144. try:
  1145. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1146. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1147. headers = {"Authorization": f"Bearer {key}"}
  1148. r = requests.post(
  1149. f"{url}/{pipeline_id}/valves/update",
  1150. headers=headers,
  1151. json={**form_data},
  1152. )
  1153. r.raise_for_status()
  1154. data = r.json()
  1155. return {**data}
  1156. except Exception as e:
  1157. # Handle connection error here
  1158. print(f"Connection error: {e}")
  1159. detail = "Pipeline not found"
  1160. if r is not None:
  1161. try:
  1162. res = r.json()
  1163. if "detail" in res:
  1164. detail = res["detail"]
  1165. except:
  1166. pass
  1167. raise HTTPException(
  1168. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1169. detail=detail,
  1170. )
  1171. ##################################
  1172. #
  1173. # Config Endpoints
  1174. #
  1175. ##################################
  1176. @app.get("/api/config")
  1177. async def get_app_config():
  1178. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  1179. default_locale = "en-US"
  1180. if "ui" in CONFIG_DATA:
  1181. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  1182. # The Rest of the Function Now Uses the Variables Defined Above
  1183. return {
  1184. "status": True,
  1185. "name": WEBUI_NAME,
  1186. "version": VERSION,
  1187. "default_locale": default_locale,
  1188. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1189. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1190. "features": {
  1191. "auth": WEBUI_AUTH,
  1192. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1193. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1194. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1195. "enable_image_generation": images_app.state.config.ENABLED,
  1196. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1197. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1198. },
  1199. "audio": {
  1200. "tts": {
  1201. "engine": audio_app.state.config.TTS_ENGINE,
  1202. "voice": audio_app.state.config.TTS_VOICE,
  1203. },
  1204. "stt": {
  1205. "engine": audio_app.state.config.STT_ENGINE,
  1206. },
  1207. },
  1208. }
  1209. @app.get("/api/config/model/filter")
  1210. async def get_model_filter_config(user=Depends(get_admin_user)):
  1211. return {
  1212. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1213. "models": app.state.config.MODEL_FILTER_LIST,
  1214. }
  1215. class ModelFilterConfigForm(BaseModel):
  1216. enabled: bool
  1217. models: List[str]
  1218. @app.post("/api/config/model/filter")
  1219. async def update_model_filter_config(
  1220. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1221. ):
  1222. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1223. app.state.config.MODEL_FILTER_LIST = form_data.models
  1224. return {
  1225. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1226. "models": app.state.config.MODEL_FILTER_LIST,
  1227. }
  1228. # TODO: webhook endpoint should be under config endpoints
  1229. @app.get("/api/webhook")
  1230. async def get_webhook_url(user=Depends(get_admin_user)):
  1231. return {
  1232. "url": app.state.config.WEBHOOK_URL,
  1233. }
  1234. class UrlForm(BaseModel):
  1235. url: str
  1236. @app.post("/api/webhook")
  1237. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1238. app.state.config.WEBHOOK_URL = form_data.url
  1239. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1240. return {"url": app.state.config.WEBHOOK_URL}
  1241. @app.get("/api/version")
  1242. async def get_app_config():
  1243. return {
  1244. "version": VERSION,
  1245. }
  1246. @app.get("/api/changelog")
  1247. async def get_app_changelog():
  1248. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1249. @app.get("/api/version/updates")
  1250. async def get_app_latest_release_version():
  1251. try:
  1252. async with aiohttp.ClientSession(trust_env=True) as session:
  1253. async with session.get(
  1254. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1255. ) as response:
  1256. response.raise_for_status()
  1257. data = await response.json()
  1258. latest_version = data["tag_name"]
  1259. return {"current": VERSION, "latest": latest_version[1:]}
  1260. except aiohttp.ClientError as e:
  1261. raise HTTPException(
  1262. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1263. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1264. )
  1265. @app.get("/manifest.json")
  1266. async def get_manifest_json():
  1267. return {
  1268. "name": WEBUI_NAME,
  1269. "short_name": WEBUI_NAME,
  1270. "start_url": "/",
  1271. "display": "standalone",
  1272. "background_color": "#343541",
  1273. "theme_color": "#343541",
  1274. "orientation": "portrait-primary",
  1275. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1276. }
  1277. @app.get("/opensearch.xml")
  1278. async def get_opensearch_xml():
  1279. xml_content = rf"""
  1280. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1281. <ShortName>{WEBUI_NAME}</ShortName>
  1282. <Description>Search {WEBUI_NAME}</Description>
  1283. <InputEncoding>UTF-8</InputEncoding>
  1284. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1285. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1286. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1287. </OpenSearchDescription>
  1288. """
  1289. return Response(content=xml_content, media_type="application/xml")
  1290. @app.get("/health")
  1291. async def healthcheck():
  1292. return {"status": True}
  1293. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1294. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1295. if os.path.exists(FRONTEND_BUILD_DIR):
  1296. mimetypes.add_type("text/javascript", ".js")
  1297. app.mount(
  1298. "/",
  1299. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1300. name="spa-static-files",
  1301. )
  1302. else:
  1303. log.warning(
  1304. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1305. )