tasks.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. from fastapi import APIRouter, Depends, HTTPException, Response, status, Request
  2. from fastapi.responses import JSONResponse, RedirectResponse
  3. from pydantic import BaseModel
  4. from typing import Optional
  5. import logging
  6. import re
  7. from open_webui.utils.chat import generate_chat_completion
  8. from open_webui.utils.task import (
  9. title_generation_template,
  10. query_generation_template,
  11. image_prompt_generation_template,
  12. autocomplete_generation_template,
  13. tags_generation_template,
  14. emoji_generation_template,
  15. moa_response_generation_template,
  16. )
  17. from open_webui.utils.auth import get_admin_user, get_verified_user
  18. from open_webui.constants import TASKS
  19. from open_webui.routers.pipelines import process_pipeline_inlet_filter
  20. from open_webui.utils.task import get_task_model_id
  21. from open_webui.config import (
  22. DEFAULT_TITLE_GENERATION_PROMPT_TEMPLATE,
  23. DEFAULT_TAGS_GENERATION_PROMPT_TEMPLATE,
  24. DEFAULT_IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE,
  25. DEFAULT_QUERY_GENERATION_PROMPT_TEMPLATE,
  26. DEFAULT_AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE,
  27. DEFAULT_EMOJI_GENERATION_PROMPT_TEMPLATE,
  28. DEFAULT_MOA_GENERATION_PROMPT_TEMPLATE,
  29. )
  30. from open_webui.env import SRC_LOG_LEVELS
  31. log = logging.getLogger(__name__)
  32. log.setLevel(SRC_LOG_LEVELS["MODELS"])
  33. router = APIRouter()
  34. ##################################
  35. #
  36. # Task Endpoints
  37. #
  38. ##################################
  39. @router.get("/config")
  40. async def get_task_config(request: Request, user=Depends(get_verified_user)):
  41. return {
  42. "TASK_MODEL": request.app.state.config.TASK_MODEL,
  43. "TASK_MODEL_EXTERNAL": request.app.state.config.TASK_MODEL_EXTERNAL,
  44. "TITLE_GENERATION_PROMPT_TEMPLATE": request.app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  45. "IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE": request.app.state.config.IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE,
  46. "ENABLE_AUTOCOMPLETE_GENERATION": request.app.state.config.ENABLE_AUTOCOMPLETE_GENERATION,
  47. "AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH": request.app.state.config.AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH,
  48. "TAGS_GENERATION_PROMPT_TEMPLATE": request.app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE,
  49. "ENABLE_TAGS_GENERATION": request.app.state.config.ENABLE_TAGS_GENERATION,
  50. "ENABLE_TITLE_GENERATION": request.app.state.config.ENABLE_TITLE_GENERATION,
  51. "ENABLE_SEARCH_QUERY_GENERATION": request.app.state.config.ENABLE_SEARCH_QUERY_GENERATION,
  52. "ENABLE_RETRIEVAL_QUERY_GENERATION": request.app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION,
  53. "QUERY_GENERATION_PROMPT_TEMPLATE": request.app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE,
  54. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  55. }
  56. class TaskConfigForm(BaseModel):
  57. TASK_MODEL: Optional[str]
  58. TASK_MODEL_EXTERNAL: Optional[str]
  59. ENABLE_TITLE_GENERATION: bool
  60. TITLE_GENERATION_PROMPT_TEMPLATE: str
  61. IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE: str
  62. ENABLE_AUTOCOMPLETE_GENERATION: bool
  63. AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH: int
  64. TAGS_GENERATION_PROMPT_TEMPLATE: str
  65. ENABLE_TAGS_GENERATION: bool
  66. ENABLE_SEARCH_QUERY_GENERATION: bool
  67. ENABLE_RETRIEVAL_QUERY_GENERATION: bool
  68. QUERY_GENERATION_PROMPT_TEMPLATE: str
  69. TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE: str
  70. @router.post("/config/update")
  71. async def update_task_config(
  72. request: Request, form_data: TaskConfigForm, user=Depends(get_admin_user)
  73. ):
  74. request.app.state.config.TASK_MODEL = form_data.TASK_MODEL
  75. request.app.state.config.TASK_MODEL_EXTERNAL = form_data.TASK_MODEL_EXTERNAL
  76. request.app.state.config.ENABLE_TITLE_GENERATION = form_data.ENABLE_TITLE_GENERATION
  77. request.app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE = (
  78. form_data.TITLE_GENERATION_PROMPT_TEMPLATE
  79. )
  80. request.app.state.config.IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE = (
  81. form_data.IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE
  82. )
  83. request.app.state.config.ENABLE_AUTOCOMPLETE_GENERATION = (
  84. form_data.ENABLE_AUTOCOMPLETE_GENERATION
  85. )
  86. request.app.state.config.AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH = (
  87. form_data.AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH
  88. )
  89. request.app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE = (
  90. form_data.TAGS_GENERATION_PROMPT_TEMPLATE
  91. )
  92. request.app.state.config.ENABLE_TAGS_GENERATION = form_data.ENABLE_TAGS_GENERATION
  93. request.app.state.config.ENABLE_SEARCH_QUERY_GENERATION = (
  94. form_data.ENABLE_SEARCH_QUERY_GENERATION
  95. )
  96. request.app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION = (
  97. form_data.ENABLE_RETRIEVAL_QUERY_GENERATION
  98. )
  99. request.app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE = (
  100. form_data.QUERY_GENERATION_PROMPT_TEMPLATE
  101. )
  102. request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = (
  103. form_data.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE
  104. )
  105. return {
  106. "TASK_MODEL": request.app.state.config.TASK_MODEL,
  107. "TASK_MODEL_EXTERNAL": request.app.state.config.TASK_MODEL_EXTERNAL,
  108. "ENABLE_TITLE_GENERATION": request.app.state.config.ENABLE_TITLE_GENERATION,
  109. "TITLE_GENERATION_PROMPT_TEMPLATE": request.app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE,
  110. "IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE": request.app.state.config.IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE,
  111. "ENABLE_AUTOCOMPLETE_GENERATION": request.app.state.config.ENABLE_AUTOCOMPLETE_GENERATION,
  112. "AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH": request.app.state.config.AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH,
  113. "TAGS_GENERATION_PROMPT_TEMPLATE": request.app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE,
  114. "ENABLE_TAGS_GENERATION": request.app.state.config.ENABLE_TAGS_GENERATION,
  115. "ENABLE_SEARCH_QUERY_GENERATION": request.app.state.config.ENABLE_SEARCH_QUERY_GENERATION,
  116. "ENABLE_RETRIEVAL_QUERY_GENERATION": request.app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION,
  117. "QUERY_GENERATION_PROMPT_TEMPLATE": request.app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE,
  118. "TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE": request.app.state.config.TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE,
  119. }
  120. @router.post("/title/completions")
  121. async def generate_title(
  122. request: Request, form_data: dict, user=Depends(get_verified_user)
  123. ):
  124. if not request.app.state.config.ENABLE_TITLE_GENERATION:
  125. return JSONResponse(
  126. status_code=status.HTTP_200_OK,
  127. content={"detail": "Title generation is disabled"},
  128. )
  129. if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
  130. models = {
  131. request.state.model["id"]: request.state.model,
  132. }
  133. else:
  134. models = request.app.state.MODELS
  135. model_id = form_data["model"]
  136. if model_id not in models:
  137. raise HTTPException(
  138. status_code=status.HTTP_404_NOT_FOUND,
  139. detail="Model not found",
  140. )
  141. # Check if the user has a custom task model
  142. # If the user has a custom task model, use that model
  143. task_model_id = get_task_model_id(
  144. model_id,
  145. request.app.state.config.TASK_MODEL,
  146. request.app.state.config.TASK_MODEL_EXTERNAL,
  147. models,
  148. )
  149. log.debug(
  150. f"generating chat title using model {task_model_id} for user {user.email} "
  151. )
  152. if request.app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
  153. template = request.app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
  154. else:
  155. template = DEFAULT_TITLE_GENERATION_PROMPT_TEMPLATE
  156. messages = form_data["messages"]
  157. # Remove reasoning details from the messages
  158. for message in messages:
  159. message["content"] = re.sub(
  160. r"<details\s+type=\"reasoning\"[^>]*>.*?<\/details>",
  161. "",
  162. message["content"],
  163. flags=re.S,
  164. ).strip()
  165. content = title_generation_template(
  166. template,
  167. messages,
  168. {
  169. "name": user.name,
  170. "location": user.info.get("location") if user.info else None,
  171. },
  172. )
  173. payload = {
  174. "model": task_model_id,
  175. "messages": [{"role": "user", "content": content}],
  176. "stream": False,
  177. **(
  178. {"max_tokens": 1000}
  179. if models[task_model_id]["owned_by"] == "ollama"
  180. else {
  181. "max_completion_tokens": 1000,
  182. }
  183. ),
  184. "metadata": {
  185. **(request.state.metadata if hasattr(request.state, "metadata") else {}),
  186. "task": str(TASKS.TITLE_GENERATION),
  187. "task_body": form_data,
  188. "chat_id": form_data.get("chat_id", None),
  189. },
  190. }
  191. try:
  192. return await generate_chat_completion(request, form_data=payload, user=user)
  193. except Exception as e:
  194. log.error("Exception occurred", exc_info=True)
  195. return JSONResponse(
  196. status_code=status.HTTP_400_BAD_REQUEST,
  197. content={"detail": "An internal error has occurred."},
  198. )
  199. @router.post("/tags/completions")
  200. async def generate_chat_tags(
  201. request: Request, form_data: dict, user=Depends(get_verified_user)
  202. ):
  203. if not request.app.state.config.ENABLE_TAGS_GENERATION:
  204. return JSONResponse(
  205. status_code=status.HTTP_200_OK,
  206. content={"detail": "Tags generation is disabled"},
  207. )
  208. if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
  209. models = {
  210. request.state.model["id"]: request.state.model,
  211. }
  212. else:
  213. models = request.app.state.MODELS
  214. model_id = form_data["model"]
  215. if model_id not in models:
  216. raise HTTPException(
  217. status_code=status.HTTP_404_NOT_FOUND,
  218. detail="Model not found",
  219. )
  220. # Check if the user has a custom task model
  221. # If the user has a custom task model, use that model
  222. task_model_id = get_task_model_id(
  223. model_id,
  224. request.app.state.config.TASK_MODEL,
  225. request.app.state.config.TASK_MODEL_EXTERNAL,
  226. models,
  227. )
  228. log.debug(
  229. f"generating chat tags using model {task_model_id} for user {user.email} "
  230. )
  231. if request.app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE != "":
  232. template = request.app.state.config.TAGS_GENERATION_PROMPT_TEMPLATE
  233. else:
  234. template = DEFAULT_TAGS_GENERATION_PROMPT_TEMPLATE
  235. content = tags_generation_template(
  236. template, form_data["messages"], {"name": user.name}
  237. )
  238. payload = {
  239. "model": task_model_id,
  240. "messages": [{"role": "user", "content": content}],
  241. "stream": False,
  242. "metadata": {
  243. **(request.state.metadata if hasattr(request.state, "metadata") else {}),
  244. "task": str(TASKS.TAGS_GENERATION),
  245. "task_body": form_data,
  246. "chat_id": form_data.get("chat_id", None),
  247. },
  248. }
  249. try:
  250. return await generate_chat_completion(request, form_data=payload, user=user)
  251. except Exception as e:
  252. log.error(f"Error generating chat completion: {e}")
  253. return JSONResponse(
  254. status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
  255. content={"detail": "An internal error has occurred."},
  256. )
  257. @router.post("/image_prompt/completions")
  258. async def generate_image_prompt(
  259. request: Request, form_data: dict, user=Depends(get_verified_user)
  260. ):
  261. if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
  262. models = {
  263. request.state.model["id"]: request.state.model,
  264. }
  265. else:
  266. models = request.app.state.MODELS
  267. model_id = form_data["model"]
  268. if model_id not in models:
  269. raise HTTPException(
  270. status_code=status.HTTP_404_NOT_FOUND,
  271. detail="Model not found",
  272. )
  273. # Check if the user has a custom task model
  274. # If the user has a custom task model, use that model
  275. task_model_id = get_task_model_id(
  276. model_id,
  277. request.app.state.config.TASK_MODEL,
  278. request.app.state.config.TASK_MODEL_EXTERNAL,
  279. models,
  280. )
  281. log.debug(
  282. f"generating image prompt using model {task_model_id} for user {user.email} "
  283. )
  284. if request.app.state.config.IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE != "":
  285. template = request.app.state.config.IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE
  286. else:
  287. template = DEFAULT_IMAGE_PROMPT_GENERATION_PROMPT_TEMPLATE
  288. content = image_prompt_generation_template(
  289. template,
  290. form_data["messages"],
  291. user={
  292. "name": user.name,
  293. },
  294. )
  295. payload = {
  296. "model": task_model_id,
  297. "messages": [{"role": "user", "content": content}],
  298. "stream": False,
  299. "metadata": {
  300. **(request.state.metadata if hasattr(request.state, "metadata") else {}),
  301. "task": str(TASKS.IMAGE_PROMPT_GENERATION),
  302. "task_body": form_data,
  303. "chat_id": form_data.get("chat_id", None),
  304. },
  305. }
  306. try:
  307. return await generate_chat_completion(request, form_data=payload, user=user)
  308. except Exception as e:
  309. log.error("Exception occurred", exc_info=True)
  310. return JSONResponse(
  311. status_code=status.HTTP_400_BAD_REQUEST,
  312. content={"detail": "An internal error has occurred."},
  313. )
  314. @router.post("/queries/completions")
  315. async def generate_queries(
  316. request: Request, form_data: dict, user=Depends(get_verified_user)
  317. ):
  318. type = form_data.get("type")
  319. if type == "web_search":
  320. if not request.app.state.config.ENABLE_SEARCH_QUERY_GENERATION:
  321. raise HTTPException(
  322. status_code=status.HTTP_400_BAD_REQUEST,
  323. detail=f"Search query generation is disabled",
  324. )
  325. elif type == "retrieval":
  326. if not request.app.state.config.ENABLE_RETRIEVAL_QUERY_GENERATION:
  327. raise HTTPException(
  328. status_code=status.HTTP_400_BAD_REQUEST,
  329. detail=f"Query generation is disabled",
  330. )
  331. if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
  332. models = {
  333. request.state.model["id"]: request.state.model,
  334. }
  335. else:
  336. models = request.app.state.MODELS
  337. model_id = form_data["model"]
  338. if model_id not in models:
  339. raise HTTPException(
  340. status_code=status.HTTP_404_NOT_FOUND,
  341. detail="Model not found",
  342. )
  343. # Check if the user has a custom task model
  344. # If the user has a custom task model, use that model
  345. task_model_id = get_task_model_id(
  346. model_id,
  347. request.app.state.config.TASK_MODEL,
  348. request.app.state.config.TASK_MODEL_EXTERNAL,
  349. models,
  350. )
  351. log.debug(
  352. f"generating {type} queries using model {task_model_id} for user {user.email}"
  353. )
  354. if (request.app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE).strip() != "":
  355. template = request.app.state.config.QUERY_GENERATION_PROMPT_TEMPLATE
  356. else:
  357. template = DEFAULT_QUERY_GENERATION_PROMPT_TEMPLATE
  358. content = query_generation_template(
  359. template, form_data["messages"], {"name": user.name}
  360. )
  361. payload = {
  362. "model": task_model_id,
  363. "messages": [{"role": "user", "content": content}],
  364. "stream": False,
  365. "metadata": {
  366. **(request.state.metadata if hasattr(request.state, "metadata") else {}),
  367. "task": str(TASKS.QUERY_GENERATION),
  368. "task_body": form_data,
  369. "chat_id": form_data.get("chat_id", None),
  370. },
  371. }
  372. try:
  373. return await generate_chat_completion(request, form_data=payload, user=user)
  374. except Exception as e:
  375. return JSONResponse(
  376. status_code=status.HTTP_400_BAD_REQUEST,
  377. content={"detail": str(e)},
  378. )
  379. @router.post("/auto/completions")
  380. async def generate_autocompletion(
  381. request: Request, form_data: dict, user=Depends(get_verified_user)
  382. ):
  383. if not request.app.state.config.ENABLE_AUTOCOMPLETE_GENERATION:
  384. raise HTTPException(
  385. status_code=status.HTTP_400_BAD_REQUEST,
  386. detail=f"Autocompletion generation is disabled",
  387. )
  388. type = form_data.get("type")
  389. prompt = form_data.get("prompt")
  390. messages = form_data.get("messages")
  391. if request.app.state.config.AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH > 0:
  392. if (
  393. len(prompt)
  394. > request.app.state.config.AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH
  395. ):
  396. raise HTTPException(
  397. status_code=status.HTTP_400_BAD_REQUEST,
  398. detail=f"Input prompt exceeds maximum length of {request.app.state.config.AUTOCOMPLETE_GENERATION_INPUT_MAX_LENGTH}",
  399. )
  400. if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
  401. models = {
  402. request.state.model["id"]: request.state.model,
  403. }
  404. else:
  405. models = request.app.state.MODELS
  406. model_id = form_data["model"]
  407. if model_id not in models:
  408. raise HTTPException(
  409. status_code=status.HTTP_404_NOT_FOUND,
  410. detail="Model not found",
  411. )
  412. # Check if the user has a custom task model
  413. # If the user has a custom task model, use that model
  414. task_model_id = get_task_model_id(
  415. model_id,
  416. request.app.state.config.TASK_MODEL,
  417. request.app.state.config.TASK_MODEL_EXTERNAL,
  418. models,
  419. )
  420. log.debug(
  421. f"generating autocompletion using model {task_model_id} for user {user.email}"
  422. )
  423. if (request.app.state.config.AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE).strip() != "":
  424. template = request.app.state.config.AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE
  425. else:
  426. template = DEFAULT_AUTOCOMPLETE_GENERATION_PROMPT_TEMPLATE
  427. content = autocomplete_generation_template(
  428. template, prompt, messages, type, {"name": user.name}
  429. )
  430. payload = {
  431. "model": task_model_id,
  432. "messages": [{"role": "user", "content": content}],
  433. "stream": False,
  434. "metadata": {
  435. **(request.state.metadata if hasattr(request.state, "metadata") else {}),
  436. "task": str(TASKS.AUTOCOMPLETE_GENERATION),
  437. "task_body": form_data,
  438. "chat_id": form_data.get("chat_id", None),
  439. },
  440. }
  441. try:
  442. return await generate_chat_completion(request, form_data=payload, user=user)
  443. except Exception as e:
  444. log.error(f"Error generating chat completion: {e}")
  445. return JSONResponse(
  446. status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
  447. content={"detail": "An internal error has occurred."},
  448. )
  449. @router.post("/emoji/completions")
  450. async def generate_emoji(
  451. request: Request, form_data: dict, user=Depends(get_verified_user)
  452. ):
  453. if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
  454. models = {
  455. request.state.model["id"]: request.state.model,
  456. }
  457. else:
  458. models = request.app.state.MODELS
  459. model_id = form_data["model"]
  460. if model_id not in models:
  461. raise HTTPException(
  462. status_code=status.HTTP_404_NOT_FOUND,
  463. detail="Model not found",
  464. )
  465. # Check if the user has a custom task model
  466. # If the user has a custom task model, use that model
  467. task_model_id = get_task_model_id(
  468. model_id,
  469. request.app.state.config.TASK_MODEL,
  470. request.app.state.config.TASK_MODEL_EXTERNAL,
  471. models,
  472. )
  473. log.debug(f"generating emoji using model {task_model_id} for user {user.email} ")
  474. template = DEFAULT_EMOJI_GENERATION_PROMPT_TEMPLATE
  475. content = emoji_generation_template(
  476. template,
  477. form_data["prompt"],
  478. {
  479. "name": user.name,
  480. "location": user.info.get("location") if user.info else None,
  481. },
  482. )
  483. payload = {
  484. "model": task_model_id,
  485. "messages": [{"role": "user", "content": content}],
  486. "stream": False,
  487. **(
  488. {"max_tokens": 4}
  489. if models[task_model_id]["owned_by"] == "ollama"
  490. else {
  491. "max_completion_tokens": 4,
  492. }
  493. ),
  494. "chat_id": form_data.get("chat_id", None),
  495. "metadata": {
  496. **(request.state.metadata if hasattr(request.state, "metadata") else {}),
  497. "task": str(TASKS.EMOJI_GENERATION),
  498. "task_body": form_data,
  499. },
  500. }
  501. try:
  502. return await generate_chat_completion(request, form_data=payload, user=user)
  503. except Exception as e:
  504. return JSONResponse(
  505. status_code=status.HTTP_400_BAD_REQUEST,
  506. content={"detail": str(e)},
  507. )
  508. @router.post("/moa/completions")
  509. async def generate_moa_response(
  510. request: Request, form_data: dict, user=Depends(get_verified_user)
  511. ):
  512. if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
  513. models = {
  514. request.state.model["id"]: request.state.model,
  515. }
  516. else:
  517. models = request.app.state.MODELS
  518. model_id = form_data["model"]
  519. if model_id not in models:
  520. raise HTTPException(
  521. status_code=status.HTTP_404_NOT_FOUND,
  522. detail="Model not found",
  523. )
  524. # Check if the user has a custom task model
  525. # If the user has a custom task model, use that model
  526. task_model_id = get_task_model_id(
  527. model_id,
  528. request.app.state.config.TASK_MODEL,
  529. request.app.state.config.TASK_MODEL_EXTERNAL,
  530. models,
  531. )
  532. log.debug(f"generating MOA model {task_model_id} for user {user.email} ")
  533. template = DEFAULT_MOA_GENERATION_PROMPT_TEMPLATE
  534. content = moa_response_generation_template(
  535. template,
  536. form_data["prompt"],
  537. form_data["responses"],
  538. )
  539. payload = {
  540. "model": task_model_id,
  541. "messages": [{"role": "user", "content": content}],
  542. "stream": form_data.get("stream", False),
  543. "metadata": {
  544. **(request.state.metadata if hasattr(request.state, "metadata") else {}),
  545. "chat_id": form_data.get("chat_id", None),
  546. "task": str(TASKS.MOA_RESPONSE_GENERATION),
  547. "task_body": form_data,
  548. },
  549. }
  550. try:
  551. return await generate_chat_completion(request, form_data=payload, user=user)
  552. except Exception as e:
  553. return JSONResponse(
  554. status_code=status.HTTP_400_BAD_REQUEST,
  555. content={"detail": str(e)},
  556. )