main.py 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583
  1. from contextlib import asynccontextmanager
  2. from bs4 import BeautifulSoup
  3. import json
  4. import markdown
  5. import time
  6. import os
  7. import sys
  8. import logging
  9. import aiohttp
  10. import requests
  11. import mimetypes
  12. import shutil
  13. import os
  14. import uuid
  15. import inspect
  16. import asyncio
  17. from fastapi import FastAPI, Request, Depends, status, UploadFile, File, Form
  18. from fastapi.staticfiles import StaticFiles
  19. from fastapi.responses import JSONResponse
  20. from fastapi import HTTPException
  21. from fastapi.middleware.wsgi import WSGIMiddleware
  22. from fastapi.middleware.cors import CORSMiddleware
  23. from starlette.exceptions import HTTPException as StarletteHTTPException
  24. from starlette.middleware.base import BaseHTTPMiddleware
  25. from starlette.responses import StreamingResponse, Response
  26. from apps.socket.main import app as socket_app
  27. from apps.ollama.main import (
  28. app as ollama_app,
  29. OpenAIChatCompletionForm,
  30. get_all_models as get_ollama_models,
  31. generate_openai_chat_completion as generate_ollama_chat_completion,
  32. )
  33. from apps.openai.main import (
  34. app as openai_app,
  35. get_all_models as get_openai_models,
  36. generate_chat_completion as generate_openai_chat_completion,
  37. )
  38. from apps.audio.main import app as audio_app
  39. from apps.images.main import app as images_app
  40. from apps.rag.main import app as rag_app
  41. from apps.webui.main import app as webui_app
  42. from pydantic import BaseModel
  43. from typing import List, Optional
  44. from apps.webui.models.models import Models, ModelModel
  45. from apps.webui.models.tools import Tools
  46. from apps.webui.utils import load_toolkit_module_by_id
  47. from utils.utils import (
  48. get_admin_user,
  49. get_verified_user,
  50. get_current_user,
  51. get_http_authorization_cred,
  52. )
  53. from utils.task import (
  54. title_generation_template,
  55. search_query_generation_template,
  56. tools_function_calling_generation_template,
  57. )
  58. from utils.misc import get_last_user_message, add_or_update_system_message
  59. from apps.rag.utils import get_rag_context, rag_template
  60. from config import (
  61. CONFIG_DATA,
  62. WEBUI_NAME,
  63. WEBUI_URL,
  64. WEBUI_AUTH,
  65. ENV,
  66. VERSION,
  67. CHANGELOG,
  68. FRONTEND_BUILD_DIR,
  69. UPLOAD_DIR,
  70. CACHE_DIR,
  71. STATIC_DIR,
  72. ENABLE_OPENAI_API,
  73. ENABLE_OLLAMA_API,
  74. ENABLE_MODEL_FILTER,
  75. MODEL_FILTER_LIST,
  76. GLOBAL_LOG_LEVEL,
  77. SRC_LOG_LEVELS,
  78. WEBHOOK_URL,
  79. ENABLE_ADMIN_EXPORT,
  80. WEBUI_BUILD_HASH,
  81. TASK_MODEL,
  82. TASK_MODEL_EXTERNAL,
  83. TITLE_GENERATION_PROMPT_TEMPLATE,
  84. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  85. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  86. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  87. AppConfig,
  88. )
  89. from constants import ERROR_MESSAGES
  90. logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
  91. log = logging.getLogger(__name__)
  92. log.setLevel(SRC_LOG_LEVELS["MAIN"])
  93. class SPAStaticFiles(StaticFiles):
  94. async def get_response(self, path: str, scope):
  95. try:
  96. return await super().get_response(path, scope)
  97. except (HTTPException, StarletteHTTPException) as ex:
  98. if ex.status_code == 404:
  99. return await super().get_response("index.html", scope)
  100. else:
  101. raise ex
  102. print(
  103. rf"""
  104. ___ __ __ _ _ _ ___
  105. / _ \ _ __ ___ _ __ \ \ / /__| |__ | | | |_ _|
  106. | | | | '_ \ / _ \ '_ \ \ \ /\ / / _ \ '_ \| | | || |
  107. | |_| | |_) | __/ | | | \ V V / __/ |_) | |_| || |
  108. \___/| .__/ \___|_| |_| \_/\_/ \___|_.__/ \___/|___|
  109. |_|
  110. v{VERSION} - building the best open-source AI user interface.
  111. {f"Commit: {WEBUI_BUILD_HASH}" if WEBUI_BUILD_HASH != "dev-build" else ""}
  112. https://github.com/open-webui/open-webui
  113. """
  114. )
  115. @asynccontextmanager
  116. async def lifespan(app: FastAPI):
  117. yield
  118. app = FastAPI(
  119. docs_url="/docs" if ENV == "dev" else None, redoc_url=None, lifespan=lifespan
  120. )
  121. app.state.config = AppConfig()
  122. app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
  123. app.state.config.ENABLE_OLLAMA_API = ENABLE_OLLAMA_API
  124. app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
  125. app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
  126. app.state.config.WEBHOOK_URL = WEBHOOK_URL
  127. app.state.config.TASK_MODEL = TASK_MODEL
  128. app.state.config.TASK_MODEL_EXTERNAL = TASK_MODEL_EXTERNAL
  129. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = TITLE_GENERATION_PROMPT_TEMPLATE
  130. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  131. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  132. )
  133. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  134. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  135. )
  136. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  137. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  138. )
  139. app.state.MODELS = {}
  140. origins = ["*"]
  141. async def get_function_call_response(
  142. messages, files, tool_id, template, task_model_id, user
  143. ):
  144. tool = Tools.get_tool_by_id(tool_id)
  145. tools_specs = json.dumps(tool.specs, indent=2)
  146. content = tools_function_calling_generation_template(template, tools_specs)
  147. user_message = get_last_user_message(messages)
  148. prompt = (
  149. "History:\n"
  150. + "\n".join(
  151. [
  152. f"{message['role'].upper()}: \"\"\"{message['content']}\"\"\""
  153. for message in messages[::-1][:4]
  154. ]
  155. )
  156. + f"\nQuery: {user_message}"
  157. )
  158. print(prompt)
  159. payload = {
  160. "model": task_model_id,
  161. "messages": [
  162. {"role": "system", "content": content},
  163. {"role": "user", "content": f"Query: {prompt}"},
  164. ],
  165. "stream": False,
  166. }
  167. try:
  168. payload = filter_pipeline(payload, user)
  169. except Exception as e:
  170. raise e
  171. model = app.state.MODELS[task_model_id]
  172. response = None
  173. try:
  174. if model["owned_by"] == "ollama":
  175. response = await generate_ollama_chat_completion(payload, user=user)
  176. else:
  177. response = await generate_openai_chat_completion(payload, user=user)
  178. content = None
  179. if hasattr(response, "body_iterator"):
  180. async for chunk in response.body_iterator:
  181. data = json.loads(chunk.decode("utf-8"))
  182. content = data["choices"][0]["message"]["content"]
  183. # Cleanup any remaining background tasks if necessary
  184. if response.background is not None:
  185. await response.background()
  186. else:
  187. content = response["choices"][0]["message"]["content"]
  188. # Parse the function response
  189. if content is not None:
  190. print(f"content: {content}")
  191. result = json.loads(content)
  192. print(result)
  193. # Call the function
  194. if "name" in result:
  195. if tool_id in webui_app.state.TOOLS:
  196. toolkit_module = webui_app.state.TOOLS[tool_id]
  197. else:
  198. toolkit_module = load_toolkit_module_by_id(tool_id)
  199. webui_app.state.TOOLS[tool_id] = toolkit_module
  200. file_handler = False
  201. # check if toolkit_module has file_handler self variable
  202. if hasattr(toolkit_module, "file_handler"):
  203. file_handler = True
  204. print("file_handler: ", file_handler)
  205. function = getattr(toolkit_module, result["name"])
  206. function_result = None
  207. try:
  208. # Get the signature of the function
  209. sig = inspect.signature(function)
  210. params = result["parameters"]
  211. if "__user__" in sig.parameters:
  212. # Call the function with the '__user__' parameter included
  213. params = {
  214. **params,
  215. "__user__": {
  216. "id": user.id,
  217. "email": user.email,
  218. "name": user.name,
  219. "role": user.role,
  220. },
  221. }
  222. if "__messages__" in sig.parameters:
  223. # Call the function with the '__messages__' parameter included
  224. params = {
  225. **params,
  226. "__messages__": messages,
  227. }
  228. if "__files__" in sig.parameters:
  229. # Call the function with the '__files__' parameter included
  230. params = {
  231. **params,
  232. "__files__": files,
  233. }
  234. if "__model__" in sig.parameters:
  235. # Call the function with the '__model__' parameter included
  236. params = {
  237. **params,
  238. "__model__": model,
  239. }
  240. if "__id__" in sig.parameters:
  241. # Call the function with the '__id__' parameter included
  242. params = {
  243. **params,
  244. "__id__": tool_id,
  245. }
  246. function_result = function(**params)
  247. except Exception as e:
  248. print(e)
  249. # Add the function result to the system prompt
  250. if function_result is not None:
  251. return function_result, file_handler
  252. except Exception as e:
  253. print(f"Error: {e}")
  254. return None, False
  255. class ChatCompletionMiddleware(BaseHTTPMiddleware):
  256. async def dispatch(self, request: Request, call_next):
  257. return_citations = False
  258. if request.method == "POST" and (
  259. "/ollama/api/chat" in request.url.path
  260. or "/chat/completions" in request.url.path
  261. ):
  262. log.debug(f"request.url.path: {request.url.path}")
  263. # Read the original request body
  264. body = await request.body()
  265. # Decode body to string
  266. body_str = body.decode("utf-8")
  267. # Parse string to JSON
  268. data = json.loads(body_str) if body_str else {}
  269. user = get_current_user(
  270. get_http_authorization_cred(request.headers.get("Authorization"))
  271. )
  272. # Remove the citations from the body
  273. return_citations = data.get("citations", False)
  274. if "citations" in data:
  275. del data["citations"]
  276. # Set the task model
  277. task_model_id = data["model"]
  278. if task_model_id not in app.state.MODELS:
  279. raise HTTPException(
  280. status_code=status.HTTP_404_NOT_FOUND,
  281. detail="Model not found",
  282. )
  283. # Check if the user has a custom task model
  284. # If the user has a custom task model, use that model
  285. if app.state.MODELS[task_model_id]["owned_by"] == "ollama":
  286. if (
  287. app.state.config.TASK_MODEL
  288. and app.state.config.TASK_MODEL in app.state.MODELS
  289. ):
  290. task_model_id = app.state.config.TASK_MODEL
  291. else:
  292. if (
  293. app.state.config.TASK_MODEL_EXTERNAL
  294. and app.state.config.TASK_MODEL_EXTERNAL in app.state.MODELS
  295. ):
  296. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  297. prompt = get_last_user_message(data["messages"])
  298. context = ""
  299. # If tool_ids field is present, call the functions
  300. skip_files = False
  301. if "tool_ids" in data:
  302. print(data["tool_ids"])
  303. for tool_id in data["tool_ids"]:
  304. print(tool_id)
  305. try:
  306. response, file_handler = await get_function_call_response(
  307. messages=data["messages"],
  308. files=data.get("files", []),
  309. tool_id=tool_id,
  310. template=app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  311. task_model_id=task_model_id,
  312. user=user,
  313. )
  314. print(file_handler)
  315. if isinstance(response, str):
  316. context += ("\n" if context != "" else "") + response
  317. if file_handler:
  318. skip_files = True
  319. except Exception as e:
  320. print(f"Error: {e}")
  321. del data["tool_ids"]
  322. print(f"tool_context: {context}")
  323. # If files field is present, generate RAG completions
  324. # If skip_files is True, skip the RAG completions
  325. if "files" in data:
  326. if not skip_files:
  327. data = {**data}
  328. rag_context, citations = get_rag_context(
  329. files=data["files"],
  330. messages=data["messages"],
  331. embedding_function=rag_app.state.EMBEDDING_FUNCTION,
  332. k=rag_app.state.config.TOP_K,
  333. reranking_function=rag_app.state.sentence_transformer_rf,
  334. r=rag_app.state.config.RELEVANCE_THRESHOLD,
  335. hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
  336. )
  337. if rag_context:
  338. context += ("\n" if context != "" else "") + rag_context
  339. log.debug(f"rag_context: {rag_context}, citations: {citations}")
  340. else:
  341. return_citations = False
  342. del data["files"]
  343. if context != "":
  344. system_prompt = rag_template(
  345. rag_app.state.config.RAG_TEMPLATE, context, prompt
  346. )
  347. print(system_prompt)
  348. data["messages"] = add_or_update_system_message(
  349. f"\n{system_prompt}", data["messages"]
  350. )
  351. modified_body_bytes = json.dumps(data).encode("utf-8")
  352. # Replace the request body with the modified one
  353. request._body = modified_body_bytes
  354. # Set custom header to ensure content-length matches new body length
  355. request.headers.__dict__["_list"] = [
  356. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  357. *[
  358. (k, v)
  359. for k, v in request.headers.raw
  360. if k.lower() != b"content-length"
  361. ],
  362. ]
  363. response = await call_next(request)
  364. if return_citations:
  365. # Inject the citations into the response
  366. if isinstance(response, StreamingResponse):
  367. # If it's a streaming response, inject it as SSE event or NDJSON line
  368. content_type = response.headers.get("Content-Type")
  369. if "text/event-stream" in content_type:
  370. return StreamingResponse(
  371. self.openai_stream_wrapper(response.body_iterator, citations),
  372. )
  373. if "application/x-ndjson" in content_type:
  374. return StreamingResponse(
  375. self.ollama_stream_wrapper(response.body_iterator, citations),
  376. )
  377. return response
  378. async def _receive(self, body: bytes):
  379. return {"type": "http.request", "body": body, "more_body": False}
  380. async def openai_stream_wrapper(self, original_generator, citations):
  381. yield f"data: {json.dumps({'citations': citations})}\n\n"
  382. async for data in original_generator:
  383. yield data
  384. async def ollama_stream_wrapper(self, original_generator, citations):
  385. yield f"{json.dumps({'citations': citations})}\n"
  386. async for data in original_generator:
  387. yield data
  388. app.add_middleware(ChatCompletionMiddleware)
  389. def filter_pipeline(payload, user):
  390. user = {"id": user.id, "name": user.name, "role": user.role}
  391. model_id = payload["model"]
  392. filters = [
  393. model
  394. for model in app.state.MODELS.values()
  395. if "pipeline" in model
  396. and "type" in model["pipeline"]
  397. and model["pipeline"]["type"] == "filter"
  398. and (
  399. model["pipeline"]["pipelines"] == ["*"]
  400. or any(
  401. model_id == target_model_id
  402. for target_model_id in model["pipeline"]["pipelines"]
  403. )
  404. )
  405. ]
  406. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  407. model = app.state.MODELS[model_id]
  408. if "pipeline" in model:
  409. sorted_filters.append(model)
  410. for filter in sorted_filters:
  411. r = None
  412. try:
  413. urlIdx = filter["urlIdx"]
  414. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  415. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  416. if key != "":
  417. headers = {"Authorization": f"Bearer {key}"}
  418. r = requests.post(
  419. f"{url}/{filter['id']}/filter/inlet",
  420. headers=headers,
  421. json={
  422. "user": user,
  423. "body": payload,
  424. },
  425. )
  426. r.raise_for_status()
  427. payload = r.json()
  428. except Exception as e:
  429. # Handle connection error here
  430. print(f"Connection error: {e}")
  431. if r is not None:
  432. try:
  433. res = r.json()
  434. except:
  435. pass
  436. if "detail" in res:
  437. raise Exception(r.status_code, res["detail"])
  438. else:
  439. pass
  440. if "pipeline" not in app.state.MODELS[model_id]:
  441. if "chat_id" in payload:
  442. del payload["chat_id"]
  443. if "title" in payload:
  444. del payload["title"]
  445. if "task" in payload:
  446. del payload["task"]
  447. return payload
  448. class PipelineMiddleware(BaseHTTPMiddleware):
  449. async def dispatch(self, request: Request, call_next):
  450. if request.method == "POST" and (
  451. "/ollama/api/chat" in request.url.path
  452. or "/chat/completions" in request.url.path
  453. ):
  454. log.debug(f"request.url.path: {request.url.path}")
  455. # Read the original request body
  456. body = await request.body()
  457. # Decode body to string
  458. body_str = body.decode("utf-8")
  459. # Parse string to JSON
  460. data = json.loads(body_str) if body_str else {}
  461. user = get_current_user(
  462. get_http_authorization_cred(request.headers.get("Authorization"))
  463. )
  464. try:
  465. data = filter_pipeline(data, user)
  466. except Exception as e:
  467. return JSONResponse(
  468. status_code=e.args[0],
  469. content={"detail": e.args[1]},
  470. )
  471. modified_body_bytes = json.dumps(data).encode("utf-8")
  472. # Replace the request body with the modified one
  473. request._body = modified_body_bytes
  474. # Set custom header to ensure content-length matches new body length
  475. request.headers.__dict__["_list"] = [
  476. (b"content-length", str(len(modified_body_bytes)).encode("utf-8")),
  477. *[
  478. (k, v)
  479. for k, v in request.headers.raw
  480. if k.lower() != b"content-length"
  481. ],
  482. ]
  483. response = await call_next(request)
  484. return response
  485. async def _receive(self, body: bytes):
  486. return {"type": "http.request", "body": body, "more_body": False}
  487. app.add_middleware(PipelineMiddleware)
  488. app.add_middleware(
  489. CORSMiddleware,
  490. allow_origins=origins,
  491. allow_credentials=True,
  492. allow_methods=["*"],
  493. allow_headers=["*"],
  494. )
  495. @app.middleware("http")
  496. async def check_url(request: Request, call_next):
  497. if len(app.state.MODELS) == 0:
  498. await get_all_models()
  499. else:
  500. pass
  501. start_time = int(time.time())
  502. response = await call_next(request)
  503. process_time = int(time.time()) - start_time
  504. response.headers["X-Process-Time"] = str(process_time)
  505. return response
  506. @app.middleware("http")
  507. async def update_embedding_function(request: Request, call_next):
  508. response = await call_next(request)
  509. if "/embedding/update" in request.url.path:
  510. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  511. return response
  512. app.mount("/ws", socket_app)
  513. app.mount("/ollama", ollama_app)
  514. app.mount("/openai", openai_app)
  515. app.mount("/images/api/v1", images_app)
  516. app.mount("/audio/api/v1", audio_app)
  517. app.mount("/rag/api/v1", rag_app)
  518. app.mount("/api/v1", webui_app)
  519. webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
  520. async def get_all_models():
  521. openai_models = []
  522. ollama_models = []
  523. if app.state.config.ENABLE_OPENAI_API:
  524. openai_models = await get_openai_models()
  525. openai_models = openai_models["data"]
  526. if app.state.config.ENABLE_OLLAMA_API:
  527. ollama_models = await get_ollama_models()
  528. ollama_models = [
  529. {
  530. "id": model["model"],
  531. "name": model["name"],
  532. "object": "model",
  533. "created": int(time.time()),
  534. "owned_by": "ollama",
  535. "ollama": model,
  536. }
  537. for model in ollama_models["models"]
  538. ]
  539. models = openai_models + ollama_models
  540. custom_models = Models.get_all_models()
  541. for custom_model in custom_models:
  542. if custom_model.base_model_id == None:
  543. for model in models:
  544. if (
  545. custom_model.id == model["id"]
  546. or custom_model.id == model["id"].split(":")[0]
  547. ):
  548. model["name"] = custom_model.name
  549. model["info"] = custom_model.model_dump()
  550. else:
  551. owned_by = "openai"
  552. for model in models:
  553. if (
  554. custom_model.base_model_id == model["id"]
  555. or custom_model.base_model_id == model["id"].split(":")[0]
  556. ):
  557. owned_by = model["owned_by"]
  558. break
  559. models.append(
  560. {
  561. "id": custom_model.id,
  562. "name": custom_model.name,
  563. "object": "model",
  564. "created": custom_model.created_at,
  565. "owned_by": owned_by,
  566. "info": custom_model.model_dump(),
  567. "preset": True,
  568. }
  569. )
  570. app.state.MODELS = {model["id"]: model for model in models}
  571. webui_app.state.MODELS = app.state.MODELS
  572. return models
  573. @app.get("/api/models")
  574. async def get_models(user=Depends(get_verified_user)):
  575. models = await get_all_models()
  576. # Filter out filter pipelines
  577. models = [
  578. model
  579. for model in models
  580. if "pipeline" not in model or model["pipeline"].get("type", None) != "filter"
  581. ]
  582. if app.state.config.ENABLE_MODEL_FILTER:
  583. if user.role == "user":
  584. models = list(
  585. filter(
  586. lambda model: model["id"] in app.state.config.MODEL_FILTER_LIST,
  587. models,
  588. )
  589. )
  590. return {"data": models}
  591. return {"data": models}
  592. @app.get("/api/task/config")
  593. async def get_task_config(user=Depends(get_verified_user)):
  594. return {
  595. "TASK_MODEL": app.state.config.TASK_MODEL,
  596. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  597. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  598. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  599. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  600. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  601. }
  602. class TaskConfigForm(BaseModel):
  603. TASK_MODEL: Optional[str]
  604. TASK_MODEL_EXTERNAL: Optional[str]
  605. TITLE_GENERATION_PROMPT_TEMPLATE: str
  606. SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE: str
  607. SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD: int
  608. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  609. @app.post("/api/task/config/update")
  610. async def update_task_config(form_data: TaskConfigForm, user=Depends(get_admin_user)):
  611. app.state.config.TASK_MODEL = form_data.TASK_MODEL
  612. app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  613. app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  614. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  615. )
  616. app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE = (
  617. form_data.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  618. )
  619. app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD = (
  620. form_data.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD
  621. )
  622. app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  623. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  624. )
  625. return {
  626. "TASK_MODEL": app.state.config.TASK_MODEL,
  627. "TASK_MODEL_EXTERNAL": app.state.config.TASK_MODEL_EXTERNAL,
  628. "TITLE_GENERATION_PROMPT_TEMPLATE": app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  629. "SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE": app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE,
  630. "SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD": app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD,
  631. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  632. }
  633. @app.post("/api/task/title/completions")
  634. async def generate_title(form_data: dict, user=Depends(get_verified_user)):
  635. print("generate_title")
  636. model_id = form_data["model"]
  637. if model_id not in app.state.MODELS:
  638. raise HTTPException(
  639. status_code=status.HTTP_404_NOT_FOUND,
  640. detail="Model not found",
  641. )
  642. # Check if the user has a custom task model
  643. # If the user has a custom task model, use that model
  644. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  645. if app.state.config.TASK_MODEL:
  646. task_model_id = app.state.config.TASK_MODEL
  647. if task_model_id in app.state.MODELS:
  648. model_id = task_model_id
  649. else:
  650. if app.state.config.TASK_MODEL_EXTERNAL:
  651. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  652. if task_model_id in app.state.MODELS:
  653. model_id = task_model_id
  654. print(model_id)
  655. model = app.state.MODELS[model_id]
  656. template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  657. content = title_generation_template(
  658. template,
  659. form_data["prompt"],
  660. {
  661. "name": user.name,
  662. "location": user.info.get("location") if user.info else None,
  663. },
  664. )
  665. payload = {
  666. "model": model_id,
  667. "messages": [{"role": "user", "content": content}],
  668. "stream": False,
  669. "max_tokens": 50,
  670. "chat_id": form_data.get("chat_id", None),
  671. "title": True,
  672. }
  673. log.debug(payload)
  674. try:
  675. payload = filter_pipeline(payload, user)
  676. except Exception as e:
  677. return JSONResponse(
  678. status_code=e.args[0],
  679. content={"detail": e.args[1]},
  680. )
  681. if model["owned_by"] == "ollama":
  682. return await generate_ollama_chat_completion(payload, user=user)
  683. else:
  684. return await generate_openai_chat_completion(payload, user=user)
  685. @app.post("/api/task/query/completions")
  686. async def generate_search_query(form_data: dict, user=Depends(get_verified_user)):
  687. print("generate_search_query")
  688. if len(form_data["prompt"]) < app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD:
  689. raise HTTPException(
  690. status_code=status.HTTP_400_BAD_REQUEST,
  691. detail=f"Skip search query generation for short prompts (< {app.state.config.SEARCH_QUERY_PROMPT_LENGTH_THRESHOLD} characters)",
  692. )
  693. model_id = form_data["model"]
  694. if model_id not in app.state.MODELS:
  695. raise HTTPException(
  696. status_code=status.HTTP_404_NOT_FOUND,
  697. detail="Model not found",
  698. )
  699. # Check if the user has a custom task model
  700. # If the user has a custom task model, use that model
  701. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  702. if app.state.config.TASK_MODEL:
  703. task_model_id = app.state.config.TASK_MODEL
  704. if task_model_id in app.state.MODELS:
  705. model_id = task_model_id
  706. else:
  707. if app.state.config.TASK_MODEL_EXTERNAL:
  708. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  709. if task_model_id in app.state.MODELS:
  710. model_id = task_model_id
  711. print(model_id)
  712. model = app.state.MODELS[model_id]
  713. template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
  714. content = search_query_generation_template(
  715. template, form_data["prompt"], {"name": user.name}
  716. )
  717. payload = {
  718. "model": model_id,
  719. "messages": [{"role": "user", "content": content}],
  720. "stream": False,
  721. "max_tokens": 30,
  722. "task": True,
  723. }
  724. print(payload)
  725. try:
  726. payload = filter_pipeline(payload, user)
  727. except Exception as e:
  728. return JSONResponse(
  729. status_code=e.args[0],
  730. content={"detail": e.args[1]},
  731. )
  732. if model["owned_by"] == "ollama":
  733. return await generate_ollama_chat_completion(payload, user=user)
  734. else:
  735. return await generate_openai_chat_completion(payload, user=user)
  736. @app.post("/api/task/emoji/completions")
  737. async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
  738. print("generate_emoji")
  739. model_id = form_data["model"]
  740. if model_id not in app.state.MODELS:
  741. raise HTTPException(
  742. status_code=status.HTTP_404_NOT_FOUND,
  743. detail="Model not found",
  744. )
  745. # Check if the user has a custom task model
  746. # If the user has a custom task model, use that model
  747. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  748. if app.state.config.TASK_MODEL:
  749. task_model_id = app.state.config.TASK_MODEL
  750. if task_model_id in app.state.MODELS:
  751. model_id = task_model_id
  752. else:
  753. if app.state.config.TASK_MODEL_EXTERNAL:
  754. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  755. if task_model_id in app.state.MODELS:
  756. model_id = task_model_id
  757. print(model_id)
  758. model = app.state.MODELS[model_id]
  759. template = '''
  760. Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
  761. Message: """{{prompt}}"""
  762. '''
  763. content = title_generation_template(
  764. template,
  765. form_data["prompt"],
  766. {
  767. "name": user.name,
  768. "location": user.info.get("location") if user.info else None,
  769. },
  770. )
  771. payload = {
  772. "model": model_id,
  773. "messages": [{"role": "user", "content": content}],
  774. "stream": False,
  775. "max_tokens": 4,
  776. "chat_id": form_data.get("chat_id", None),
  777. "task": True,
  778. }
  779. log.debug(payload)
  780. try:
  781. payload = filter_pipeline(payload, user)
  782. except Exception as e:
  783. return JSONResponse(
  784. status_code=e.args[0],
  785. content={"detail": e.args[1]},
  786. )
  787. if model["owned_by"] == "ollama":
  788. return await generate_ollama_chat_completion(payload, user=user)
  789. else:
  790. return await generate_openai_chat_completion(payload, user=user)
  791. @app.post("/api/task/tools/completions")
  792. async def get_tools_function_calling(form_data: dict, user=Depends(get_verified_user)):
  793. print("get_tools_function_calling")
  794. model_id = form_data["model"]
  795. if model_id not in app.state.MODELS:
  796. raise HTTPException(
  797. status_code=status.HTTP_404_NOT_FOUND,
  798. detail="Model not found",
  799. )
  800. # Check if the user has a custom task model
  801. # If the user has a custom task model, use that model
  802. if app.state.MODELS[model_id]["owned_by"] == "ollama":
  803. if app.state.config.TASK_MODEL:
  804. task_model_id = app.state.config.TASK_MODEL
  805. if task_model_id in app.state.MODELS:
  806. model_id = task_model_id
  807. else:
  808. if app.state.config.TASK_MODEL_EXTERNAL:
  809. task_model_id = app.state.config.TASK_MODEL_EXTERNAL
  810. if task_model_id in app.state.MODELS:
  811. model_id = task_model_id
  812. print(model_id)
  813. template = app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  814. try:
  815. context, file_handler = await get_function_call_response(
  816. form_data["messages"],
  817. form_data.get("files", []),
  818. form_data["tool_id"],
  819. template,
  820. model_id,
  821. user,
  822. )
  823. return context
  824. except Exception as e:
  825. return JSONResponse(
  826. status_code=e.args[0],
  827. content={"detail": e.args[1]},
  828. )
  829. @app.post("/api/chat/completions")
  830. async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
  831. model_id = form_data["model"]
  832. if model_id not in app.state.MODELS:
  833. raise HTTPException(
  834. status_code=status.HTTP_404_NOT_FOUND,
  835. detail="Model not found",
  836. )
  837. model = app.state.MODELS[model_id]
  838. print(model)
  839. if model["owned_by"] == "ollama":
  840. return await generate_ollama_chat_completion(form_data, user=user)
  841. else:
  842. return await generate_openai_chat_completion(form_data, user=user)
  843. @app.post("/api/chat/completed")
  844. async def chat_completed(form_data: dict, user=Depends(get_verified_user)):
  845. data = form_data
  846. model_id = data["model"]
  847. filters = [
  848. model
  849. for model in app.state.MODELS.values()
  850. if "pipeline" in model
  851. and "type" in model["pipeline"]
  852. and model["pipeline"]["type"] == "filter"
  853. and (
  854. model["pipeline"]["pipelines"] == ["*"]
  855. or any(
  856. model_id == target_model_id
  857. for target_model_id in model["pipeline"]["pipelines"]
  858. )
  859. )
  860. ]
  861. sorted_filters = sorted(filters, key=lambda x: x["pipeline"]["priority"])
  862. print(model_id)
  863. if model_id in app.state.MODELS:
  864. model = app.state.MODELS[model_id]
  865. if "pipeline" in model:
  866. sorted_filters = [model] + sorted_filters
  867. for filter in sorted_filters:
  868. r = None
  869. try:
  870. urlIdx = filter["urlIdx"]
  871. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  872. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  873. if key != "":
  874. headers = {"Authorization": f"Bearer {key}"}
  875. r = requests.post(
  876. f"{url}/{filter['id']}/filter/outlet",
  877. headers=headers,
  878. json={
  879. "user": {"id": user.id, "name": user.name, "role": user.role},
  880. "body": data,
  881. },
  882. )
  883. r.raise_for_status()
  884. data = r.json()
  885. except Exception as e:
  886. # Handle connection error here
  887. print(f"Connection error: {e}")
  888. if r is not None:
  889. try:
  890. res = r.json()
  891. if "detail" in res:
  892. return JSONResponse(
  893. status_code=r.status_code,
  894. content=res,
  895. )
  896. except:
  897. pass
  898. else:
  899. pass
  900. return data
  901. @app.get("/api/pipelines/list")
  902. async def get_pipelines_list(user=Depends(get_admin_user)):
  903. responses = await get_openai_models(raw=True)
  904. print(responses)
  905. urlIdxs = [
  906. idx
  907. for idx, response in enumerate(responses)
  908. if response != None and "pipelines" in response
  909. ]
  910. return {
  911. "data": [
  912. {
  913. "url": openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx],
  914. "idx": urlIdx,
  915. }
  916. for urlIdx in urlIdxs
  917. ]
  918. }
  919. @app.post("/api/pipelines/upload")
  920. async def upload_pipeline(
  921. urlIdx: int = Form(...), file: UploadFile = File(...), user=Depends(get_admin_user)
  922. ):
  923. print("upload_pipeline", urlIdx, file.filename)
  924. # Check if the uploaded file is a python file
  925. if not file.filename.endswith(".py"):
  926. raise HTTPException(
  927. status_code=status.HTTP_400_BAD_REQUEST,
  928. detail="Only Python (.py) files are allowed.",
  929. )
  930. upload_folder = f"{CACHE_DIR}/pipelines"
  931. os.makedirs(upload_folder, exist_ok=True)
  932. file_path = os.path.join(upload_folder, file.filename)
  933. try:
  934. # Save the uploaded file
  935. with open(file_path, "wb") as buffer:
  936. shutil.copyfileobj(file.file, buffer)
  937. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  938. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  939. headers = {"Authorization": f"Bearer {key}"}
  940. with open(file_path, "rb") as f:
  941. files = {"file": f}
  942. r = requests.post(f"{url}/pipelines/upload", headers=headers, files=files)
  943. r.raise_for_status()
  944. data = r.json()
  945. return {**data}
  946. except Exception as e:
  947. # Handle connection error here
  948. print(f"Connection error: {e}")
  949. detail = "Pipeline not found"
  950. if r is not None:
  951. try:
  952. res = r.json()
  953. if "detail" in res:
  954. detail = res["detail"]
  955. except:
  956. pass
  957. raise HTTPException(
  958. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  959. detail=detail,
  960. )
  961. finally:
  962. # Ensure the file is deleted after the upload is completed or on failure
  963. if os.path.exists(file_path):
  964. os.remove(file_path)
  965. class AddPipelineForm(BaseModel):
  966. url: str
  967. urlIdx: int
  968. @app.post("/api/pipelines/add")
  969. async def add_pipeline(form_data: AddPipelineForm, user=Depends(get_admin_user)):
  970. r = None
  971. try:
  972. urlIdx = form_data.urlIdx
  973. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  974. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  975. headers = {"Authorization": f"Bearer {key}"}
  976. r = requests.post(
  977. f"{url}/pipelines/add", headers=headers, json={"url": form_data.url}
  978. )
  979. r.raise_for_status()
  980. data = r.json()
  981. return {**data}
  982. except Exception as e:
  983. # Handle connection error here
  984. print(f"Connection error: {e}")
  985. detail = "Pipeline not found"
  986. if r is not None:
  987. try:
  988. res = r.json()
  989. if "detail" in res:
  990. detail = res["detail"]
  991. except:
  992. pass
  993. raise HTTPException(
  994. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  995. detail=detail,
  996. )
  997. class DeletePipelineForm(BaseModel):
  998. id: str
  999. urlIdx: int
  1000. @app.delete("/api/pipelines/delete")
  1001. async def delete_pipeline(form_data: DeletePipelineForm, user=Depends(get_admin_user)):
  1002. r = None
  1003. try:
  1004. urlIdx = form_data.urlIdx
  1005. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1006. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1007. headers = {"Authorization": f"Bearer {key}"}
  1008. r = requests.delete(
  1009. f"{url}/pipelines/delete", headers=headers, json={"id": form_data.id}
  1010. )
  1011. r.raise_for_status()
  1012. data = r.json()
  1013. return {**data}
  1014. except Exception as e:
  1015. # Handle connection error here
  1016. print(f"Connection error: {e}")
  1017. detail = "Pipeline not found"
  1018. if r is not None:
  1019. try:
  1020. res = r.json()
  1021. if "detail" in res:
  1022. detail = res["detail"]
  1023. except:
  1024. pass
  1025. raise HTTPException(
  1026. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1027. detail=detail,
  1028. )
  1029. @app.get("/api/pipelines")
  1030. async def get_pipelines(urlIdx: Optional[int] = None, user=Depends(get_admin_user)):
  1031. r = None
  1032. try:
  1033. urlIdx
  1034. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1035. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1036. headers = {"Authorization": f"Bearer {key}"}
  1037. r = requests.get(f"{url}/pipelines", headers=headers)
  1038. r.raise_for_status()
  1039. data = r.json()
  1040. return {**data}
  1041. except Exception as e:
  1042. # Handle connection error here
  1043. print(f"Connection error: {e}")
  1044. detail = "Pipeline not found"
  1045. if r is not None:
  1046. try:
  1047. res = r.json()
  1048. if "detail" in res:
  1049. detail = res["detail"]
  1050. except:
  1051. pass
  1052. raise HTTPException(
  1053. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1054. detail=detail,
  1055. )
  1056. @app.get("/api/pipelines/{pipeline_id}/valves")
  1057. async def get_pipeline_valves(
  1058. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1059. ):
  1060. models = await get_all_models()
  1061. r = None
  1062. try:
  1063. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1064. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1065. headers = {"Authorization": f"Bearer {key}"}
  1066. r = requests.get(f"{url}/{pipeline_id}/valves", headers=headers)
  1067. r.raise_for_status()
  1068. data = r.json()
  1069. return {**data}
  1070. except Exception as e:
  1071. # Handle connection error here
  1072. print(f"Connection error: {e}")
  1073. detail = "Pipeline not found"
  1074. if r is not None:
  1075. try:
  1076. res = r.json()
  1077. if "detail" in res:
  1078. detail = res["detail"]
  1079. except:
  1080. pass
  1081. raise HTTPException(
  1082. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1083. detail=detail,
  1084. )
  1085. @app.get("/api/pipelines/{pipeline_id}/valves/spec")
  1086. async def get_pipeline_valves_spec(
  1087. urlIdx: Optional[int], pipeline_id: str, user=Depends(get_admin_user)
  1088. ):
  1089. models = await get_all_models()
  1090. r = None
  1091. try:
  1092. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1093. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1094. headers = {"Authorization": f"Bearer {key}"}
  1095. r = requests.get(f"{url}/{pipeline_id}/valves/spec", headers=headers)
  1096. r.raise_for_status()
  1097. data = r.json()
  1098. return {**data}
  1099. except Exception as e:
  1100. # Handle connection error here
  1101. print(f"Connection error: {e}")
  1102. detail = "Pipeline not found"
  1103. if r is not None:
  1104. try:
  1105. res = r.json()
  1106. if "detail" in res:
  1107. detail = res["detail"]
  1108. except:
  1109. pass
  1110. raise HTTPException(
  1111. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1112. detail=detail,
  1113. )
  1114. @app.post("/api/pipelines/{pipeline_id}/valves/update")
  1115. async def update_pipeline_valves(
  1116. urlIdx: Optional[int],
  1117. pipeline_id: str,
  1118. form_data: dict,
  1119. user=Depends(get_admin_user),
  1120. ):
  1121. models = await get_all_models()
  1122. r = None
  1123. try:
  1124. url = openai_app.state.config.OPENAI_API_BASE_URLS[urlIdx]
  1125. key = openai_app.state.config.OPENAI_API_KEYS[urlIdx]
  1126. headers = {"Authorization": f"Bearer {key}"}
  1127. r = requests.post(
  1128. f"{url}/{pipeline_id}/valves/update",
  1129. headers=headers,
  1130. json={**form_data},
  1131. )
  1132. r.raise_for_status()
  1133. data = r.json()
  1134. return {**data}
  1135. except Exception as e:
  1136. # Handle connection error here
  1137. print(f"Connection error: {e}")
  1138. detail = "Pipeline not found"
  1139. if r is not None:
  1140. try:
  1141. res = r.json()
  1142. if "detail" in res:
  1143. detail = res["detail"]
  1144. except:
  1145. pass
  1146. raise HTTPException(
  1147. status_code=(r.status_code if r is not None else status.HTTP_404_NOT_FOUND),
  1148. detail=detail,
  1149. )
  1150. @app.get("/api/config")
  1151. async def get_app_config():
  1152. # Checking and Handling the Absence of 'ui' in CONFIG_DATA
  1153. default_locale = "en-US"
  1154. if "ui" in CONFIG_DATA:
  1155. default_locale = CONFIG_DATA["ui"].get("default_locale", "en-US")
  1156. # The Rest of the Function Now Uses the Variables Defined Above
  1157. return {
  1158. "status": True,
  1159. "name": WEBUI_NAME,
  1160. "version": VERSION,
  1161. "default_locale": default_locale,
  1162. "default_models": webui_app.state.config.DEFAULT_MODELS,
  1163. "default_prompt_suggestions": webui_app.state.config.DEFAULT_PROMPT_SUGGESTIONS,
  1164. "features": {
  1165. "auth": WEBUI_AUTH,
  1166. "auth_trusted_header": bool(webui_app.state.AUTH_TRUSTED_EMAIL_HEADER),
  1167. "enable_signup": webui_app.state.config.ENABLE_SIGNUP,
  1168. "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
  1169. "enable_image_generation": images_app.state.config.ENABLED,
  1170. "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
  1171. "enable_admin_export": ENABLE_ADMIN_EXPORT,
  1172. },
  1173. "audio": {
  1174. "tts": {
  1175. "engine": audio_app.state.config.TTS_ENGINE,
  1176. "voice": audio_app.state.config.TTS_VOICE,
  1177. },
  1178. "stt": {
  1179. "engine": audio_app.state.config.STT_ENGINE,
  1180. },
  1181. },
  1182. }
  1183. @app.get("/api/config/model/filter")
  1184. async def get_model_filter_config(user=Depends(get_admin_user)):
  1185. return {
  1186. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1187. "models": app.state.config.MODEL_FILTER_LIST,
  1188. }
  1189. class ModelFilterConfigForm(BaseModel):
  1190. enabled: bool
  1191. models: List[str]
  1192. @app.post("/api/config/model/filter")
  1193. async def update_model_filter_config(
  1194. form_data: ModelFilterConfigForm, user=Depends(get_admin_user)
  1195. ):
  1196. app.state.config.ENABLE_MODEL_FILTER = form_data.enabled
  1197. app.state.config.MODEL_FILTER_LIST = form_data.models
  1198. return {
  1199. "enabled": app.state.config.ENABLE_MODEL_FILTER,
  1200. "models": app.state.config.MODEL_FILTER_LIST,
  1201. }
  1202. @app.get("/api/webhook")
  1203. async def get_webhook_url(user=Depends(get_admin_user)):
  1204. return {
  1205. "url": app.state.config.WEBHOOK_URL,
  1206. }
  1207. class UrlForm(BaseModel):
  1208. url: str
  1209. @app.post("/api/webhook")
  1210. async def update_webhook_url(form_data: UrlForm, user=Depends(get_admin_user)):
  1211. app.state.config.WEBHOOK_URL = form_data.url
  1212. webui_app.state.WEBHOOK_URL = app.state.config.WEBHOOK_URL
  1213. return {"url": app.state.config.WEBHOOK_URL}
  1214. @app.get("/api/version")
  1215. async def get_app_config():
  1216. return {
  1217. "version": VERSION,
  1218. }
  1219. @app.get("/api/changelog")
  1220. async def get_app_changelog():
  1221. return {key: CHANGELOG[key] for idx, key in enumerate(CHANGELOG) if idx < 5}
  1222. @app.get("/api/version/updates")
  1223. async def get_app_latest_release_version():
  1224. try:
  1225. async with aiohttp.ClientSession(trust_env=True) as session:
  1226. async with session.get(
  1227. "https://api.github.com/repos/open-webui/open-webui/releases/latest"
  1228. ) as response:
  1229. response.raise_for_status()
  1230. data = await response.json()
  1231. latest_version = data["tag_name"]
  1232. return {"current": VERSION, "latest": latest_version[1:]}
  1233. except aiohttp.ClientError as e:
  1234. raise HTTPException(
  1235. status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
  1236. detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
  1237. )
  1238. @app.get("/manifest.json")
  1239. async def get_manifest_json():
  1240. return {
  1241. "name": WEBUI_NAME,
  1242. "short_name": WEBUI_NAME,
  1243. "start_url": "/",
  1244. "display": "standalone",
  1245. "background_color": "#343541",
  1246. "theme_color": "#343541",
  1247. "orientation": "portrait-primary",
  1248. "icons": [{"src": "/static/logo.png", "type": "image/png", "sizes": "500x500"}],
  1249. }
  1250. @app.get("/opensearch.xml")
  1251. async def get_opensearch_xml():
  1252. xml_content = rf"""
  1253. <OpenSearchDescription xmlns="http://a9.com/-/spec/opensearch/1.1/" xmlns:moz="http://www.mozilla.org/2006/browser/search/">
  1254. <ShortName>{WEBUI_NAME}</ShortName>
  1255. <Description>Search {WEBUI_NAME}</Description>
  1256. <InputEncoding>UTF-8</InputEncoding>
  1257. <Image width="16" height="16" type="image/x-icon">{WEBUI_URL}/favicon.png</Image>
  1258. <Url type="text/html" method="get" template="{WEBUI_URL}/?q={"{searchTerms}"}"/>
  1259. <moz:SearchForm>{WEBUI_URL}</moz:SearchForm>
  1260. </OpenSearchDescription>
  1261. """
  1262. return Response(content=xml_content, media_type="application/xml")
  1263. @app.get("/health")
  1264. async def healthcheck():
  1265. return {"status": True}
  1266. app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
  1267. app.mount("/cache", StaticFiles(directory=CACHE_DIR), name="cache")
  1268. if os.path.exists(FRONTEND_BUILD_DIR):
  1269. mimetypes.add_type("text/javascript", ".js")
  1270. app.mount(
  1271. "/",
  1272. SPAStaticFiles(directory=FRONTEND_BUILD_DIR, html=True),
  1273. name="spa-static-files",
  1274. )
  1275. else:
  1276. log.warning(
  1277. f"Frontend build directory not found at '{FRONTEND_BUILD_DIR}'. Serving API only."
  1278. )