Browse Source

Merge pull request #4434 from open-webui/dev

0.3.13
Timothy Jaeryang Baek 8 months ago
parent
commit
13b0e7d64a
100 changed files with 1289 additions and 1428 deletions
  1. 7 0
      .github/workflows/integration-test.yml
  2. 27 0
      CHANGELOG.md
  3. 7 7
      backend/apps/audio/main.py
  4. 10 8
      backend/apps/images/main.py
  5. 7 5
      backend/apps/images/utils/comfyui.py
  6. 90 315
      backend/apps/ollama/main.py
  7. 8 5
      backend/apps/openai/main.py
  8. 9 9
      backend/apps/rag/main.py
  9. 2 2
      backend/apps/rag/search/brave.py
  10. 3 3
      backend/apps/rag/search/duckduckgo.py
  11. 2 2
      backend/apps/rag/search/google_pse.py
  12. 1 1
      backend/apps/rag/search/jina_search.py
  13. 5 5
      backend/apps/rag/search/searxng.py
  14. 2 2
      backend/apps/rag/search/serper.py
  15. 2 2
      backend/apps/rag/search/serply.py
  16. 2 2
      backend/apps/rag/search/serpstack.py
  17. 1 1
      backend/apps/rag/search/tavily.py
  18. 6 6
      backend/apps/rag/utils.py
  19. 4 2
      backend/apps/webui/main.py
  20. 6 6
      backend/apps/webui/models/auths.py
  21. 20 20
      backend/apps/webui/models/chats.py
  22. 5 5
      backend/apps/webui/models/documents.py
  23. 5 5
      backend/apps/webui/models/files.py
  24. 10 10
      backend/apps/webui/models/functions.py
  25. 10 10
      backend/apps/webui/models/memories.py
  26. 3 3
      backend/apps/webui/models/models.py
  27. 5 5
      backend/apps/webui/models/prompts.py
  28. 8 8
      backend/apps/webui/models/tags.py
  29. 8 8
      backend/apps/webui/models/tools.py
  30. 12 12
      backend/apps/webui/models/users.py
  31. 11 11
      backend/apps/webui/routers/chats.py
  32. 7 7
      backend/apps/webui/routers/configs.py
  33. 4 4
      backend/apps/webui/routers/documents.py
  34. 2 2
      backend/apps/webui/routers/files.py
  35. 5 5
      backend/apps/webui/routers/functions.py
  36. 2 2
      backend/apps/webui/routers/memories.py
  37. 2 2
      backend/apps/webui/routers/models.py
  38. 3 3
      backend/apps/webui/routers/prompts.py
  39. 3 3
      backend/apps/webui/routers/tools.py
  40. 2 2
      backend/apps/webui/routers/users.py
  41. 2 2
      backend/apps/webui/routers/utils.py
  42. 14 0
      backend/apps/webui/utils.py
  43. 14 8
      backend/config.py
  44. 5 4
      backend/main.py
  45. 6 6
      backend/requirements.txt
  46. 0 1
      backend/start.sh
  47. 55 14
      backend/utils/misc.py
  48. 2 2
      backend/utils/tools.py
  49. 18 13
      cypress/e2e/chat.cy.ts
  50. 0 1
      docs/CONTRIBUTING.md
  51. 247 270
      package-lock.json
  52. 22 16
      package.json
  53. 6 6
      pyproject.toml
  54. 8 6
      requirements-dev.lock
  55. 8 6
      requirements.lock
  56. 1 1
      src/app.html
  57. 1 0
      src/lib/apis/index.ts
  58. 0 50
      src/lib/apis/ollama/index.ts
  59. 1 126
      src/lib/apis/openai/index.ts
  60. 6 6
      src/lib/components/ChangelogModal.svelte
  61. 1 1
      src/lib/components/admin/Settings/Documents.svelte
  62. 24 16
      src/lib/components/chat/Chat.svelte
  63. 3 1
      src/lib/components/chat/Controls/Controls.svelte
  64. 16 16
      src/lib/components/chat/MessageInput/CallOverlay.svelte
  65. 1 1
      src/lib/components/chat/MessageInput/Documents.svelte
  66. 4 4
      src/lib/components/chat/Messages.svelte
  67. 2 2
      src/lib/components/chat/Messages/CitationsModal.svelte
  68. 101 61
      src/lib/components/chat/Messages/CodeBlock.svelte
  69. 9 0
      src/lib/components/chat/Messages/KatexRenderer.svelte
  70. 10 3
      src/lib/components/chat/Messages/MarkdownInlineTokens.svelte
  71. 114 119
      src/lib/components/chat/Messages/MarkdownTokens.svelte
  72. 76 132
      src/lib/components/chat/Messages/ResponseMessage.svelte
  73. 7 7
      src/lib/components/chat/Messages/UserMessage.svelte
  74. 1 1
      src/lib/components/chat/ModelSelector/Selector.svelte
  75. 2 2
      src/lib/components/chat/Settings/About.svelte
  76. 47 0
      src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte
  77. 28 0
      src/lib/components/chat/Settings/Interface.svelte
  78. 1 1
      src/lib/components/common/Valves.svelte
  79. 6 6
      src/lib/components/layout/Sidebar/ChatItem.svelte
  80. 1 1
      src/lib/components/playground/Playground.svelte
  81. 1 1
      src/lib/components/workspace/Documents.svelte
  82. 2 2
      src/lib/components/workspace/Models.svelte
  83. 1 1
      src/lib/components/workspace/Models/Knowledge/Selector.svelte
  84. 5 0
      src/lib/i18n/locales/ar-BH/translation.json
  85. 5 0
      src/lib/i18n/locales/bg-BG/translation.json
  86. 5 0
      src/lib/i18n/locales/bn-BD/translation.json
  87. 7 2
      src/lib/i18n/locales/ca-ES/translation.json
  88. 5 0
      src/lib/i18n/locales/ceb-PH/translation.json
  89. 5 0
      src/lib/i18n/locales/de-DE/translation.json
  90. 5 0
      src/lib/i18n/locales/dg-DG/translation.json
  91. 5 0
      src/lib/i18n/locales/en-GB/translation.json
  92. 5 0
      src/lib/i18n/locales/en-US/translation.json
  93. 5 0
      src/lib/i18n/locales/es-ES/translation.json
  94. 5 0
      src/lib/i18n/locales/fa-IR/translation.json
  95. 5 0
      src/lib/i18n/locales/fi-FI/translation.json
  96. 5 0
      src/lib/i18n/locales/fr-CA/translation.json
  97. 5 0
      src/lib/i18n/locales/fr-FR/translation.json
  98. 5 0
      src/lib/i18n/locales/he-IL/translation.json
  99. 5 0
      src/lib/i18n/locales/hi-IN/translation.json
  100. 5 0
      src/lib/i18n/locales/hr-HR/translation.json

+ 7 - 0
.github/workflows/integration-test.yml

@@ -15,6 +15,13 @@ jobs:
     name: Run Cypress Integration Tests
     runs-on: ubuntu-latest
     steps:
+      - name: Maximize build space
+        uses: AdityaGarg8/remove-unwanted-software@v4.1
+        with:
+          remove-android: 'true'
+          remove-haskell: 'true'
+          remove-codeql: 'true'
+
       - name: Checkout Repository
         uses: actions/checkout@v4
 

+ 27 - 0
CHANGELOG.md

@@ -5,6 +5,33 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.3.13] - 2024-08-14
+
+### Added
+
+- **🎨 Enhanced Markdown Rendering**: Significant improvements in rendering markdown, ensuring smooth and reliable display of LaTeX and Mermaid charts, enhancing user experience with more robust visual content.
+- **🔄 Auto-Install Tools & Functions Python Dependencies**: For 'Tools' and 'Functions', Open WebUI now automatically install extra python requirements specified in the frontmatter, streamlining setup processes and customization.
+- **🌀 OAuth Email Claim Customization**: Introduced an 'OAUTH_EMAIL_CLAIM' variable to allow customization of the default "email" claim within OAuth configurations, providing greater flexibility in authentication processes.
+- **📶 Websocket Reconnection**: Enhanced reliability with the capability to automatically reconnect when a websocket is closed, ensuring consistent and stable communication.
+- **🤳 Haptic Feedback on Support Devices**: Android devices now support haptic feedback for an immersive tactile experience during certain interactions.
+
+### Fixed
+
+- **🛠️ ComfyUI Performance Improvement**: Addressed an issue causing FastAPI to stall when ComfyUI image generation was active; now runs in a separate thread to prevent UI unresponsiveness.
+- **🔀 Session Handling**: Fixed an issue mandating session_id on client-side to ensure smoother session management and transitions.
+- **🖋️ Minor Bug Fixes and Format Corrections**: Various minor fixes including typo corrections, backend formatting improvements, and test amendments enhancing overall system stability and performance.
+
+### Changed
+
+- **🚀 Migration to SvelteKit 2**: Upgraded the underlying framework to SvelteKit version 2, offering enhanced speed, better code structure, and improved deployment capabilities.
+- **🧹 General Cleanup and Refactoring**: Performed broad cleanup and refactoring across the platform, improving code efficiency and maintaining high standards of code health.
+- **🚧 Integration Testing Improvements**: Modified how Cypress integration tests detect chat messages and updated sharing tests for better reliability and accuracy.
+- **📁 Standardized '.safetensors' File Extension**: Renamed the '.sft' file extension to '.safetensors' for ComfyUI workflows, standardizing file formats across the platform.
+
+### Removed
+
+- **🗑️ Deprecated Frontend Functions**: Removed frontend functions that were migrated to backend to declutter the codebase and reduce redundancy.
+
 ## [0.3.12] - 2024-08-07
 
 ### Added

+ 7 - 7
backend/apps/audio/main.py

@@ -15,7 +15,7 @@ from fastapi.responses import StreamingResponse, JSONResponse, FileResponse
 from fastapi.middleware.cors import CORSMiddleware
 from pydantic import BaseModel
 
-from typing import List
+
 import uuid
 import requests
 import hashlib
@@ -244,7 +244,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
                     res = r.json()
                     if "error" in res:
                         error_detail = f"External: {res['error']['message']}"
-                except:
+                except Exception:
                     error_detail = f"External: {e}"
 
             raise HTTPException(
@@ -299,7 +299,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
                     res = r.json()
                     if "error" in res:
                         error_detail = f"External: {res['error']['message']}"
-                except:
+                except Exception:
                     error_detail = f"External: {e}"
 
             raise HTTPException(
@@ -353,7 +353,7 @@ def transcribe(
 
             try:
                 model = WhisperModel(**whisper_kwargs)
-            except:
+            except Exception:
                 log.warning(
                     "WhisperModel initialization failed, attempting download with local_files_only=False"
                 )
@@ -421,7 +421,7 @@ def transcribe(
                         res = r.json()
                         if "error" in res:
                             error_detail = f"External: {res['error']['message']}"
-                    except:
+                    except Exception:
                         error_detail = f"External: {e}"
 
                 raise HTTPException(
@@ -438,7 +438,7 @@ def transcribe(
         )
 
 
-def get_available_models() -> List[dict]:
+def get_available_models() -> list[dict]:
     if app.state.config.TTS_ENGINE == "openai":
         return [{"id": "tts-1"}, {"id": "tts-1-hd"}]
     elif app.state.config.TTS_ENGINE == "elevenlabs":
@@ -466,7 +466,7 @@ async def get_models(user=Depends(get_verified_user)):
     return {"models": get_available_models()}
 
 
-def get_available_voices() -> List[dict]:
+def get_available_voices() -> list[dict]:
     if app.state.config.TTS_ENGINE == "openai":
         return [
             {"name": "alloy", "id": "alloy"},

+ 10 - 8
backend/apps/images/main.py

@@ -94,7 +94,7 @@ app.state.config.COMFYUI_FLUX_FP8_CLIP = COMFYUI_FLUX_FP8_CLIP
 
 
 def get_automatic1111_api_auth():
-    if app.state.config.AUTOMATIC1111_API_AUTH == None:
+    if app.state.config.AUTOMATIC1111_API_AUTH is None:
         return ""
     else:
         auth1111_byte_string = app.state.config.AUTOMATIC1111_API_AUTH.encode("utf-8")
@@ -145,28 +145,30 @@ async def get_engine_url(user=Depends(get_admin_user)):
 async def update_engine_url(
     form_data: EngineUrlUpdateForm, user=Depends(get_admin_user)
 ):
-    if form_data.AUTOMATIC1111_BASE_URL == None:
+    if form_data.AUTOMATIC1111_BASE_URL is None:
         app.state.config.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
     else:
         url = form_data.AUTOMATIC1111_BASE_URL.strip("/")
         try:
-            r = requests.head(url) 
+            r = requests.head(url)
+            r.raise_for_status()
             app.state.config.AUTOMATIC1111_BASE_URL = url
         except Exception as e:
-            raise HTTPException(status_code=400, detail="Invalid URL provided.")
+            raise HTTPException(status_code=400, detail=ERROR_MESSAGES.INVALID_URL)
 
-    if form_data.COMFYUI_BASE_URL == None:
+    if form_data.COMFYUI_BASE_URL is None:
         app.state.config.COMFYUI_BASE_URL = COMFYUI_BASE_URL
     else:
         url = form_data.COMFYUI_BASE_URL.strip("/")
 
         try:
             r = requests.head(url)
+            r.raise_for_status()
             app.state.config.COMFYUI_BASE_URL = url
         except Exception as e:
-            raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
+            raise HTTPException(status_code=400, detail=ERROR_MESSAGES.INVALID_URL)
 
-    if form_data.AUTOMATIC1111_API_AUTH == None:
+    if form_data.AUTOMATIC1111_API_AUTH is None:
         app.state.config.AUTOMATIC1111_API_AUTH = AUTOMATIC1111_API_AUTH
     else:
         app.state.config.AUTOMATIC1111_API_AUTH = form_data.AUTOMATIC1111_API_AUTH
@@ -514,7 +516,7 @@ async def image_generations(
 
             data = ImageGenerationPayload(**data)
 
-            res = comfyui_generate_image(
+            res = await comfyui_generate_image(
                 app.state.config.MODEL,
                 data,
                 user.id,

+ 7 - 5
backend/apps/images/utils/comfyui.py

@@ -1,5 +1,5 @@
+import asyncio
 import websocket  # NOTE: websocket-client (https://github.com/websocket-client/websocket-client)
-import uuid
 import json
 import urllib.request
 import urllib.parse
@@ -170,7 +170,7 @@ FLUX_DEFAULT_PROMPT = """
     },
     "10": {
         "inputs": {
-            "vae_name": "ae.sft"
+            "vae_name": "ae.safetensors"
         },
         "class_type": "VAELoader"
     },
@@ -184,7 +184,7 @@ FLUX_DEFAULT_PROMPT = """
     },
     "12": {
         "inputs": {
-            "unet_name": "flux1-dev.sft",
+            "unet_name": "flux1-dev.safetensors",
             "weight_dtype": "default"
         },
         "class_type": "UNETLoader"
@@ -328,7 +328,7 @@ class ImageGenerationPayload(BaseModel):
     flux_fp8_clip: Optional[bool] = None
 
 
-def comfyui_generate_image(
+async def comfyui_generate_image(
     model: str, payload: ImageGenerationPayload, client_id, base_url
 ):
     ws_url = base_url.replace("http://", "ws://").replace("https://", "wss://")
@@ -397,7 +397,9 @@ def comfyui_generate_image(
         return None
 
     try:
-        images = get_images(ws, comfyui_prompt, client_id, base_url)
+        images = await asyncio.to_thread(
+            get_images, ws, comfyui_prompt, client_id, base_url
+        )
     except Exception as e:
         log.exception(f"Error while receiving images: {e}")
         images = None

+ 90 - 315
backend/apps/ollama/main.py

@@ -1,47 +1,36 @@
 from fastapi import (
     FastAPI,
     Request,
-    Response,
     HTTPException,
     Depends,
-    status,
     UploadFile,
     File,
-    BackgroundTasks,
 )
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.responses import StreamingResponse
-from fastapi.concurrency import run_in_threadpool
 
 from pydantic import BaseModel, ConfigDict
 
 import os
 import re
-import copy
 import random
 import requests
 import json
-import uuid
 import aiohttp
 import asyncio
 import logging
 import time
 from urllib.parse import urlparse
-from typing import Optional, List, Union
+from typing import Optional, Union
 
 from starlette.background import BackgroundTask
 
 from apps.webui.models.models import Models
-from apps.webui.models.users import Users
 from constants import ERROR_MESSAGES
 from utils.utils import (
-    decode_token,
-    get_current_user,
     get_verified_user,
     get_admin_user,
 )
-from utils.task import prompt_template
-
 
 from config import (
     SRC_LOG_LEVELS,
@@ -53,7 +42,12 @@ from config import (
     UPLOAD_DIR,
     AppConfig,
 )
-from utils.misc import calculate_sha256, add_or_update_system_message
+from utils.misc import (
+    calculate_sha256,
+    apply_model_params_to_body_ollama,
+    apply_model_params_to_body_openai,
+    apply_model_system_prompt_to_body,
+)
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
@@ -120,7 +114,7 @@ async def get_ollama_api_urls(user=Depends(get_admin_user)):
 
 
 class UrlUpdateForm(BaseModel):
-    urls: List[str]
+    urls: list[str]
 
 
 @app.post("/urls/update")
@@ -183,7 +177,7 @@ async def post_streaming_url(url: str, payload: str, stream: bool = True):
                 res = await r.json()
                 if "error" in res:
                     error_detail = f"Ollama: {res['error']}"
-            except:
+            except Exception:
                 error_detail = f"Ollama: {e}"
 
         raise HTTPException(
@@ -238,7 +232,7 @@ async def get_all_models():
 async def get_ollama_tags(
     url_idx: Optional[int] = None, user=Depends(get_verified_user)
 ):
-    if url_idx == None:
+    if url_idx is None:
         models = await get_all_models()
 
         if app.state.config.ENABLE_MODEL_FILTER:
@@ -269,7 +263,7 @@ async def get_ollama_tags(
                     res = r.json()
                     if "error" in res:
                         error_detail = f"Ollama: {res['error']}"
-                except:
+                except Exception:
                     error_detail = f"Ollama: {e}"
 
             raise HTTPException(
@@ -282,8 +276,7 @@ async def get_ollama_tags(
 @app.get("/api/version/{url_idx}")
 async def get_ollama_versions(url_idx: Optional[int] = None):
     if app.state.config.ENABLE_OLLAMA_API:
-        if url_idx == None:
-
+        if url_idx is None:
             # returns lowest version
             tasks = [
                 fetch_url(f"{url}/api/version")
@@ -323,7 +316,7 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
                         res = r.json()
                         if "error" in res:
                             error_detail = f"Ollama: {res['error']}"
-                    except:
+                    except Exception:
                         error_detail = f"Ollama: {e}"
 
                 raise HTTPException(
@@ -346,8 +339,6 @@ async def pull_model(
     url = app.state.config.OLLAMA_BASE_URLS[url_idx]
     log.info(f"url: {url}")
 
-    r = None
-
     # Admin should be able to pull models from any source
     payload = {**form_data.model_dump(exclude_none=True), "insecure": True}
 
@@ -367,7 +358,7 @@ async def push_model(
     url_idx: Optional[int] = None,
     user=Depends(get_admin_user),
 ):
-    if url_idx == None:
+    if url_idx is None:
         if form_data.name in app.state.MODELS:
             url_idx = app.state.MODELS[form_data.name]["urls"][0]
         else:
@@ -417,7 +408,7 @@ async def copy_model(
     url_idx: Optional[int] = None,
     user=Depends(get_admin_user),
 ):
-    if url_idx == None:
+    if url_idx is None:
         if form_data.source in app.state.MODELS:
             url_idx = app.state.MODELS[form_data.source]["urls"][0]
         else:
@@ -428,13 +419,13 @@ async def copy_model(
 
     url = app.state.config.OLLAMA_BASE_URLS[url_idx]
     log.info(f"url: {url}")
+    r = requests.request(
+        method="POST",
+        url=f"{url}/api/copy",
+        data=form_data.model_dump_json(exclude_none=True).encode(),
+    )
 
     try:
-        r = requests.request(
-            method="POST",
-            url=f"{url}/api/copy",
-            data=form_data.model_dump_json(exclude_none=True).encode(),
-        )
         r.raise_for_status()
 
         log.debug(f"r.text: {r.text}")
@@ -448,7 +439,7 @@ async def copy_model(
                 res = r.json()
                 if "error" in res:
                     error_detail = f"Ollama: {res['error']}"
-            except:
+            except Exception:
                 error_detail = f"Ollama: {e}"
 
         raise HTTPException(
@@ -464,7 +455,7 @@ async def delete_model(
     url_idx: Optional[int] = None,
     user=Depends(get_admin_user),
 ):
-    if url_idx == None:
+    if url_idx is None:
         if form_data.name in app.state.MODELS:
             url_idx = app.state.MODELS[form_data.name]["urls"][0]
         else:
@@ -476,12 +467,12 @@ async def delete_model(
     url = app.state.config.OLLAMA_BASE_URLS[url_idx]
     log.info(f"url: {url}")
 
+    r = requests.request(
+        method="DELETE",
+        url=f"{url}/api/delete",
+        data=form_data.model_dump_json(exclude_none=True).encode(),
+    )
     try:
-        r = requests.request(
-            method="DELETE",
-            url=f"{url}/api/delete",
-            data=form_data.model_dump_json(exclude_none=True).encode(),
-        )
         r.raise_for_status()
 
         log.debug(f"r.text: {r.text}")
@@ -495,7 +486,7 @@ async def delete_model(
                 res = r.json()
                 if "error" in res:
                     error_detail = f"Ollama: {res['error']}"
-            except:
+            except Exception:
                 error_detail = f"Ollama: {e}"
 
         raise HTTPException(
@@ -516,12 +507,12 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_us
     url = app.state.config.OLLAMA_BASE_URLS[url_idx]
     log.info(f"url: {url}")
 
+    r = requests.request(
+        method="POST",
+        url=f"{url}/api/show",
+        data=form_data.model_dump_json(exclude_none=True).encode(),
+    )
     try:
-        r = requests.request(
-            method="POST",
-            url=f"{url}/api/show",
-            data=form_data.model_dump_json(exclude_none=True).encode(),
-        )
         r.raise_for_status()
 
         return r.json()
@@ -533,7 +524,7 @@ async def show_model_info(form_data: ModelNameForm, user=Depends(get_verified_us
                 res = r.json()
                 if "error" in res:
                     error_detail = f"Ollama: {res['error']}"
-            except:
+            except Exception:
                 error_detail = f"Ollama: {e}"
 
         raise HTTPException(
@@ -556,7 +547,7 @@ async def generate_embeddings(
     url_idx: Optional[int] = None,
     user=Depends(get_verified_user),
 ):
-    if url_idx == None:
+    if url_idx is None:
         model = form_data.model
 
         if ":" not in model:
@@ -573,12 +564,12 @@ async def generate_embeddings(
     url = app.state.config.OLLAMA_BASE_URLS[url_idx]
     log.info(f"url: {url}")
 
+    r = requests.request(
+        method="POST",
+        url=f"{url}/api/embeddings",
+        data=form_data.model_dump_json(exclude_none=True).encode(),
+    )
     try:
-        r = requests.request(
-            method="POST",
-            url=f"{url}/api/embeddings",
-            data=form_data.model_dump_json(exclude_none=True).encode(),
-        )
         r.raise_for_status()
 
         return r.json()
@@ -590,7 +581,7 @@ async def generate_embeddings(
                 res = r.json()
                 if "error" in res:
                     error_detail = f"Ollama: {res['error']}"
-            except:
+            except Exception:
                 error_detail = f"Ollama: {e}"
 
         raise HTTPException(
@@ -603,10 +594,9 @@ def generate_ollama_embeddings(
     form_data: GenerateEmbeddingsForm,
     url_idx: Optional[int] = None,
 ):
-
     log.info(f"generate_ollama_embeddings {form_data}")
 
-    if url_idx == None:
+    if url_idx is None:
         model = form_data.model
 
         if ":" not in model:
@@ -623,12 +613,12 @@ def generate_ollama_embeddings(
     url = app.state.config.OLLAMA_BASE_URLS[url_idx]
     log.info(f"url: {url}")
 
+    r = requests.request(
+        method="POST",
+        url=f"{url}/api/embeddings",
+        data=form_data.model_dump_json(exclude_none=True).encode(),
+    )
     try:
-        r = requests.request(
-            method="POST",
-            url=f"{url}/api/embeddings",
-            data=form_data.model_dump_json(exclude_none=True).encode(),
-        )
         r.raise_for_status()
 
         data = r.json()
@@ -638,7 +628,7 @@ def generate_ollama_embeddings(
         if "embedding" in data:
             return data["embedding"]
         else:
-            raise "Something went wrong :/"
+            raise Exception("Something went wrong :/")
     except Exception as e:
         log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
@@ -647,16 +637,16 @@ def generate_ollama_embeddings(
                 res = r.json()
                 if "error" in res:
                     error_detail = f"Ollama: {res['error']}"
-            except:
+            except Exception:
                 error_detail = f"Ollama: {e}"
 
-        raise error_detail
+        raise Exception(error_detail)
 
 
 class GenerateCompletionForm(BaseModel):
     model: str
     prompt: str
-    images: Optional[List[str]] = None
+    images: Optional[list[str]] = None
     format: Optional[str] = None
     options: Optional[dict] = None
     system: Optional[str] = None
@@ -674,8 +664,7 @@ async def generate_completion(
     url_idx: Optional[int] = None,
     user=Depends(get_verified_user),
 ):
-
-    if url_idx == None:
+    if url_idx is None:
         model = form_data.model
 
         if ":" not in model:
@@ -700,12 +689,12 @@ async def generate_completion(
 class ChatMessage(BaseModel):
     role: str
     content: str
-    images: Optional[List[str]] = None
+    images: Optional[list[str]] = None
 
 
 class GenerateChatCompletionForm(BaseModel):
     model: str
-    messages: List[ChatMessage]
+    messages: list[ChatMessage]
     format: Optional[str] = None
     options: Optional[dict] = None
     template: Optional[str] = None
@@ -713,6 +702,18 @@ class GenerateChatCompletionForm(BaseModel):
     keep_alive: Optional[Union[int, str]] = None
 
 
+def get_ollama_url(url_idx: Optional[int], model: str):
+    if url_idx is None:
+        if model not in app.state.MODELS:
+            raise HTTPException(
+                status_code=400,
+                detail=ERROR_MESSAGES.MODEL_NOT_FOUND(model),
+            )
+        url_idx = random.choice(app.state.MODELS[model]["urls"])
+    url = app.state.config.OLLAMA_BASE_URLS[url_idx]
+    return url
+
+
 @app.post("/api/chat")
 @app.post("/api/chat/{url_idx}")
 async def generate_chat_completion(
@@ -720,12 +721,7 @@ async def generate_chat_completion(
     url_idx: Optional[int] = None,
     user=Depends(get_verified_user),
 ):
-
-    log.debug(
-        "form_data.model_dump_json(exclude_none=True).encode(): {0} ".format(
-            form_data.model_dump_json(exclude_none=True).encode()
-        )
-    )
+    log.debug(f"{form_data.model_dump_json(exclude_none=True).encode()}=")
 
     payload = {
         **form_data.model_dump(exclude_none=True, exclude=["metadata"]),
@@ -740,185 +736,21 @@ async def generate_chat_completion(
         if model_info.base_model_id:
             payload["model"] = model_info.base_model_id
 
-        model_info.params = model_info.params.model_dump()
+        params = model_info.params.model_dump()
 
-        if model_info.params:
+        if params:
             if payload.get("options") is None:
                 payload["options"] = {}
 
-            if (
-                model_info.params.get("mirostat", None)
-                and payload["options"].get("mirostat") is None
-            ):
-                payload["options"]["mirostat"] = model_info.params.get("mirostat", None)
-
-            if (
-                model_info.params.get("mirostat_eta", None)
-                and payload["options"].get("mirostat_eta") is None
-            ):
-                payload["options"]["mirostat_eta"] = model_info.params.get(
-                    "mirostat_eta", None
-                )
-
-            if (
-                model_info.params.get("mirostat_tau", None)
-                and payload["options"].get("mirostat_tau") is None
-            ):
-                payload["options"]["mirostat_tau"] = model_info.params.get(
-                    "mirostat_tau", None
-                )
-
-            if (
-                model_info.params.get("num_ctx", None)
-                and payload["options"].get("num_ctx") is None
-            ):
-                payload["options"]["num_ctx"] = model_info.params.get("num_ctx", None)
-
-            if (
-                model_info.params.get("num_batch", None)
-                and payload["options"].get("num_batch") is None
-            ):
-                payload["options"]["num_batch"] = model_info.params.get(
-                    "num_batch", None
-                )
-
-            if (
-                model_info.params.get("num_keep", None)
-                and payload["options"].get("num_keep") is None
-            ):
-                payload["options"]["num_keep"] = model_info.params.get("num_keep", None)
-
-            if (
-                model_info.params.get("repeat_last_n", None)
-                and payload["options"].get("repeat_last_n") is None
-            ):
-                payload["options"]["repeat_last_n"] = model_info.params.get(
-                    "repeat_last_n", None
-                )
-
-            if (
-                model_info.params.get("frequency_penalty", None)
-                and payload["options"].get("frequency_penalty") is None
-            ):
-                payload["options"]["repeat_penalty"] = model_info.params.get(
-                    "frequency_penalty", None
-                )
-
-            if (
-                model_info.params.get("temperature", None) is not None
-                and payload["options"].get("temperature") is None
-            ):
-                payload["options"]["temperature"] = model_info.params.get(
-                    "temperature", None
-                )
-
-            if (
-                model_info.params.get("seed", None) is not None
-                and payload["options"].get("seed") is None
-            ):
-                payload["options"]["seed"] = model_info.params.get("seed", None)
-
-            if (
-                model_info.params.get("stop", None)
-                and payload["options"].get("stop") is None
-            ):
-                payload["options"]["stop"] = (
-                    [
-                        bytes(stop, "utf-8").decode("unicode_escape")
-                        for stop in model_info.params["stop"]
-                    ]
-                    if model_info.params.get("stop", None)
-                    else None
-                )
-
-            if (
-                model_info.params.get("tfs_z", None)
-                and payload["options"].get("tfs_z") is None
-            ):
-                payload["options"]["tfs_z"] = model_info.params.get("tfs_z", None)
-
-            if (
-                model_info.params.get("max_tokens", None)
-                and payload["options"].get("max_tokens") is None
-            ):
-                payload["options"]["num_predict"] = model_info.params.get(
-                    "max_tokens", None
-                )
-
-            if (
-                model_info.params.get("top_k", None)
-                and payload["options"].get("top_k") is None
-            ):
-                payload["options"]["top_k"] = model_info.params.get("top_k", None)
-
-            if (
-                model_info.params.get("top_p", None)
-                and payload["options"].get("top_p") is None
-            ):
-                payload["options"]["top_p"] = model_info.params.get("top_p", None)
-
-            if (
-                model_info.params.get("min_p", None)
-                and payload["options"].get("min_p") is None
-            ):
-                payload["options"]["min_p"] = model_info.params.get("min_p", None)
-
-            if (
-                model_info.params.get("use_mmap", None)
-                and payload["options"].get("use_mmap") is None
-            ):
-                payload["options"]["use_mmap"] = model_info.params.get("use_mmap", None)
-
-            if (
-                model_info.params.get("use_mlock", None)
-                and payload["options"].get("use_mlock") is None
-            ):
-                payload["options"]["use_mlock"] = model_info.params.get(
-                    "use_mlock", None
-                )
-
-            if (
-                model_info.params.get("num_thread", None)
-                and payload["options"].get("num_thread") is None
-            ):
-                payload["options"]["num_thread"] = model_info.params.get(
-                    "num_thread", None
-                )
-
-        system = model_info.params.get("system", None)
-        if system:
-            system = prompt_template(
-                system,
-                **(
-                    {
-                        "user_name": user.name,
-                        "user_location": (
-                            user.info.get("location") if user.info else None
-                        ),
-                    }
-                    if user
-                    else {}
-                ),
+            payload["options"] = apply_model_params_to_body_ollama(
+                params, payload["options"]
             )
+            payload = apply_model_system_prompt_to_body(params, payload, user)
 
-            if payload.get("messages"):
-                payload["messages"] = add_or_update_system_message(
-                    system, payload["messages"]
-                )
-
-    if url_idx == None:
-        if ":" not in payload["model"]:
-            payload["model"] = f"{payload['model']}:latest"
-
-        if payload["model"] in app.state.MODELS:
-            url_idx = random.choice(app.state.MODELS[payload["model"]]["urls"])
-        else:
-            raise HTTPException(
-                status_code=400,
-                detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
-            )
+    if ":" not in payload["model"]:
+        payload["model"] = f"{payload['model']}:latest"
 
-    url = app.state.config.OLLAMA_BASE_URLS[url_idx]
+    url = get_ollama_url(url_idx, payload["model"])
     log.info(f"url: {url}")
     log.debug(payload)
 
@@ -940,7 +772,7 @@ class OpenAIChatMessage(BaseModel):
 
 class OpenAIChatCompletionForm(BaseModel):
     model: str
-    messages: List[OpenAIChatMessage]
+    messages: list[OpenAIChatMessage]
 
     model_config = ConfigDict(extra="allow")
 
@@ -952,83 +784,28 @@ async def generate_openai_chat_completion(
     url_idx: Optional[int] = None,
     user=Depends(get_verified_user),
 ):
-    form_data = OpenAIChatCompletionForm(**form_data)
-    payload = {**form_data.model_dump(exclude_none=True, exclude=["metadata"])}
-
+    completion_form = OpenAIChatCompletionForm(**form_data)
+    payload = {**completion_form.model_dump(exclude_none=True, exclude=["metadata"])}
     if "metadata" in payload:
         del payload["metadata"]
 
-    model_id = form_data.model
+    model_id = completion_form.model
     model_info = Models.get_model_by_id(model_id)
 
     if model_info:
         if model_info.base_model_id:
             payload["model"] = model_info.base_model_id
 
-        model_info.params = model_info.params.model_dump()
+        params = model_info.params.model_dump()
 
-        if model_info.params:
-            payload["temperature"] = model_info.params.get("temperature", None)
-            payload["top_p"] = model_info.params.get("top_p", None)
-            payload["max_tokens"] = model_info.params.get("max_tokens", None)
-            payload["frequency_penalty"] = model_info.params.get(
-                "frequency_penalty", None
-            )
-            payload["seed"] = model_info.params.get("seed", None)
-            payload["stop"] = (
-                [
-                    bytes(stop, "utf-8").decode("unicode_escape")
-                    for stop in model_info.params["stop"]
-                ]
-                if model_info.params.get("stop", None)
-                else None
-            )
+        if params:
+            payload = apply_model_params_to_body_openai(params, payload)
+            payload = apply_model_system_prompt_to_body(params, payload, user)
 
-        system = model_info.params.get("system", None)
-
-        if system:
-            system = prompt_template(
-                system,
-                **(
-                    {
-                        "user_name": user.name,
-                        "user_location": (
-                            user.info.get("location") if user.info else None
-                        ),
-                    }
-                    if user
-                    else {}
-                ),
-            )
-            # Check if the payload already has a system message
-            # If not, add a system message to the payload
-            if payload.get("messages"):
-                for message in payload["messages"]:
-                    if message.get("role") == "system":
-                        message["content"] = system + message["content"]
-                        break
-                else:
-                    payload["messages"].insert(
-                        0,
-                        {
-                            "role": "system",
-                            "content": system,
-                        },
-                    )
+    if ":" not in payload["model"]:
+        payload["model"] = f"{payload['model']}:latest"
 
-    if url_idx == None:
-        if ":" not in payload["model"]:
-            payload["model"] = f"{payload['model']}:latest"
-
-        if payload["model"] in app.state.MODELS:
-            url_idx = random.choice(app.state.MODELS[payload["model"]]["urls"])
-        else:
-            raise HTTPException(
-                status_code=400,
-                detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
-            )
-
-    url = app.state.config.OLLAMA_BASE_URLS[url_idx]
+    url = get_ollama_url(url_idx, payload["model"])
     log.info(f"url: {url}")
 
     return await post_streaming_url(
@@ -1044,7 +821,7 @@ async def get_openai_models(
     url_idx: Optional[int] = None,
     user=Depends(get_verified_user),
 ):
-    if url_idx == None:
+    if url_idx is None:
         models = await get_all_models()
 
         if app.state.config.ENABLE_MODEL_FILTER:
@@ -1099,7 +876,7 @@ async def get_openai_models(
                     res = r.json()
                     if "error" in res:
                         error_detail = f"Ollama: {res['error']}"
-                except:
+                except Exception:
                     error_detail = f"Ollama: {e}"
 
             raise HTTPException(
@@ -1125,7 +902,6 @@ def parse_huggingface_url(hf_url):
         path_components = parsed_url.path.split("/")
 
         # Extract the desired output
-        user_repo = "/".join(path_components[1:3])
         model_file = path_components[-1]
 
         return model_file
@@ -1190,7 +966,6 @@ async def download_model(
     url_idx: Optional[int] = None,
     user=Depends(get_admin_user),
 ):
-
     allowed_hosts = ["https://huggingface.co/", "https://github.com/"]
 
     if not any(form_data.url.startswith(host) for host in allowed_hosts):
@@ -1199,7 +974,7 @@ async def download_model(
             detail="Invalid file_url. Only URLs from allowed hosts are permitted.",
         )
 
-    if url_idx == None:
+    if url_idx is None:
         url_idx = 0
     url = app.state.config.OLLAMA_BASE_URLS[url_idx]
 
@@ -1222,7 +997,7 @@ def upload_model(
     url_idx: Optional[int] = None,
     user=Depends(get_admin_user),
 ):
-    if url_idx == None:
+    if url_idx is None:
         url_idx = 0
     ollama_url = app.state.config.OLLAMA_BASE_URLS[url_idx]
 

+ 8 - 5
backend/apps/openai/main.py

@@ -17,7 +17,10 @@ from utils.utils import (
     get_verified_user,
     get_admin_user,
 )
-from utils.misc import apply_model_params_to_body, apply_model_system_prompt_to_body
+from utils.misc import (
+    apply_model_params_to_body_openai,
+    apply_model_system_prompt_to_body,
+)
 
 from config import (
     SRC_LOG_LEVELS,
@@ -30,7 +33,7 @@ from config import (
     MODEL_FILTER_LIST,
     AppConfig,
 )
-from typing import List, Optional, Literal, overload
+from typing import Optional, Literal, overload
 
 
 import hashlib
@@ -86,11 +89,11 @@ async def update_config(form_data: OpenAIConfigForm, user=Depends(get_admin_user
 
 
 class UrlsUpdateForm(BaseModel):
-    urls: List[str]
+    urls: list[str]
 
 
 class KeysUpdateForm(BaseModel):
-    keys: List[str]
+    keys: list[str]
 
 
 @app.get("/urls")
@@ -368,7 +371,7 @@ async def generate_chat_completion(
             payload["model"] = model_info.base_model_id
 
         params = model_info.params.model_dump()
-        payload = apply_model_params_to_body(params, payload)
+        payload = apply_model_params_to_body_openai(params, payload)
         payload = apply_model_system_prompt_to_body(params, payload, user)
 
     model = app.state.MODELS[payload.get("model")]

+ 9 - 9
backend/apps/rag/main.py

@@ -13,7 +13,7 @@ import os, shutil, logging, re
 from datetime import datetime
 
 from pathlib import Path
-from typing import List, Union, Sequence, Iterator, Any
+from typing import Union, Sequence, Iterator, Any
 
 from chromadb.utils.batch_utils import create_batches
 from langchain_core.documents import Document
@@ -376,7 +376,7 @@ async def update_reranking_config(
     try:
         app.state.config.RAG_RERANKING_MODEL = form_data.reranking_model
 
-        update_reranking_model(app.state.config.RAG_RERANKING_MODEL), True
+        update_reranking_model(app.state.config.RAG_RERANKING_MODEL, True)
 
         return {
             "status": True,
@@ -439,7 +439,7 @@ class ChunkParamUpdateForm(BaseModel):
 
 
 class YoutubeLoaderConfig(BaseModel):
-    language: List[str]
+    language: list[str]
     translation: Optional[str] = None
 
 
@@ -642,7 +642,7 @@ def query_doc_handler(
 
 
 class QueryCollectionsForm(BaseModel):
-    collection_names: List[str]
+    collection_names: list[str]
     query: str
     k: Optional[int] = None
     r: Optional[float] = None
@@ -1021,7 +1021,7 @@ class TikaLoader:
         self.file_path = file_path
         self.mime_type = mime_type
 
-    def load(self) -> List[Document]:
+    def load(self) -> list[Document]:
         with open(self.file_path, "rb") as f:
             data = f.read()
 
@@ -1185,7 +1185,7 @@ def store_doc(
             f.close()
 
         f = open(file_path, "rb")
-        if collection_name == None:
+        if collection_name is None:
             collection_name = calculate_sha256(f)[:63]
         f.close()
 
@@ -1238,7 +1238,7 @@ def process_doc(
         f = open(file_path, "rb")
 
         collection_name = form_data.collection_name
-        if collection_name == None:
+        if collection_name is None:
             collection_name = calculate_sha256(f)[:63]
         f.close()
 
@@ -1296,7 +1296,7 @@ def store_text(
 ):
 
     collection_name = form_data.collection_name
-    if collection_name == None:
+    if collection_name is None:
         collection_name = calculate_sha256_string(form_data.content)
 
     result = store_text_in_vector_db(
@@ -1339,7 +1339,7 @@ def scan_docs_dir(user=Depends(get_admin_user)):
                         sanitized_filename = sanitize_filename(filename)
                         doc = Documents.get_doc_by_name(sanitized_filename)
 
-                        if doc == None:
+                        if doc is None:
                             doc = Documents.insert_new_doc(
                                 user.id,
                                 DocumentForm(

+ 2 - 2
backend/apps/rag/search/brave.py

@@ -1,5 +1,5 @@
 import logging
-from typing import List, Optional
+from typing import Optional
 import requests
 
 from apps.rag.search.main import SearchResult, get_filtered_results
@@ -10,7 +10,7 @@ log.setLevel(SRC_LOG_LEVELS["RAG"])
 
 
 def search_brave(
-    api_key: str, query: str, count: int, filter_list: Optional[List[str]] = None
+    api_key: str, query: str, count: int, filter_list: Optional[list[str]] = None
 ) -> list[SearchResult]:
     """Search using Brave's Search API and return the results as a list of SearchResult objects.
 

+ 3 - 3
backend/apps/rag/search/duckduckgo.py

@@ -1,5 +1,5 @@
 import logging
-from typing import List, Optional
+from typing import Optional
 from apps.rag.search.main import SearchResult, get_filtered_results
 from duckduckgo_search import DDGS
 from config import SRC_LOG_LEVELS
@@ -9,7 +9,7 @@ log.setLevel(SRC_LOG_LEVELS["RAG"])
 
 
 def search_duckduckgo(
-    query: str, count: int, filter_list: Optional[List[str]] = None
+    query: str, count: int, filter_list: Optional[list[str]] = None
 ) -> list[SearchResult]:
     """
     Search using DuckDuckGo's Search API and return the results as a list of SearchResult objects.
@@ -18,7 +18,7 @@ def search_duckduckgo(
         count (int): The number of results to return
 
     Returns:
-        List[SearchResult]: A list of search results
+        list[SearchResult]: A list of search results
     """
     # Use the DDGS context manager to create a DDGS object
     with DDGS() as ddgs:

+ 2 - 2
backend/apps/rag/search/google_pse.py

@@ -1,6 +1,6 @@
 import json
 import logging
-from typing import List, Optional
+from typing import Optional
 import requests
 
 from apps.rag.search.main import SearchResult, get_filtered_results
@@ -15,7 +15,7 @@ def search_google_pse(
     search_engine_id: str,
     query: str,
     count: int,
-    filter_list: Optional[List[str]] = None,
+    filter_list: Optional[list[str]] = None,
 ) -> list[SearchResult]:
     """Search using Google's Programmable Search Engine API and return the results as a list of SearchResult objects.
 

+ 1 - 1
backend/apps/rag/search/jina_search.py

@@ -17,7 +17,7 @@ def search_jina(query: str, count: int) -> list[SearchResult]:
         count (int): The number of results to return
 
     Returns:
-        List[SearchResult]: A list of search results
+        list[SearchResult]: A list of search results
     """
     jina_search_endpoint = "https://s.jina.ai/"
     headers = {

+ 5 - 5
backend/apps/rag/search/searxng.py

@@ -1,7 +1,7 @@
 import logging
 import requests
 
-from typing import List, Optional
+from typing import Optional
 
 from apps.rag.search.main import SearchResult, get_filtered_results
 from config import SRC_LOG_LEVELS
@@ -14,9 +14,9 @@ def search_searxng(
     query_url: str,
     query: str,
     count: int,
-    filter_list: Optional[List[str]] = None,
+    filter_list: Optional[list[str]] = None,
     **kwargs,
-) -> List[SearchResult]:
+) -> list[SearchResult]:
     """
     Search a SearXNG instance for a given query and return the results as a list of SearchResult objects.
 
@@ -31,10 +31,10 @@ def search_searxng(
         language (str): Language filter for the search results; e.g., "en-US". Defaults to an empty string.
         safesearch (int): Safe search filter for safer web results; 0 = off, 1 = moderate, 2 = strict. Defaults to 1 (moderate).
         time_range (str): Time range for filtering results by date; e.g., "2023-04-05..today" or "all-time". Defaults to ''.
-        categories: (Optional[List[str]]): Specific categories within which the search should be performed, defaulting to an empty string if not provided.
+        categories: (Optional[list[str]]): Specific categories within which the search should be performed, defaulting to an empty string if not provided.
 
     Returns:
-        List[SearchResult]: A list of SearchResults sorted by relevance score in descending order.
+        list[SearchResult]: A list of SearchResults sorted by relevance score in descending order.
 
     Raise:
         requests.exceptions.RequestException: If a request error occurs during the search process.

+ 2 - 2
backend/apps/rag/search/serper.py

@@ -1,6 +1,6 @@
 import json
 import logging
-from typing import List, Optional
+from typing import Optional
 import requests
 
 from apps.rag.search.main import SearchResult, get_filtered_results
@@ -11,7 +11,7 @@ log.setLevel(SRC_LOG_LEVELS["RAG"])
 
 
 def search_serper(
-    api_key: str, query: str, count: int, filter_list: Optional[List[str]] = None
+    api_key: str, query: str, count: int, filter_list: Optional[list[str]] = None
 ) -> list[SearchResult]:
     """Search using serper.dev's API and return the results as a list of SearchResult objects.
 

+ 2 - 2
backend/apps/rag/search/serply.py

@@ -1,6 +1,6 @@
 import json
 import logging
-from typing import List, Optional
+from typing import Optional
 import requests
 from urllib.parse import urlencode
 
@@ -19,7 +19,7 @@ def search_serply(
     limit: int = 10,
     device_type: str = "desktop",
     proxy_location: str = "US",
-    filter_list: Optional[List[str]] = None,
+    filter_list: Optional[list[str]] = None,
 ) -> list[SearchResult]:
     """Search using serper.dev's API and return the results as a list of SearchResult objects.
 

+ 2 - 2
backend/apps/rag/search/serpstack.py

@@ -1,6 +1,6 @@
 import json
 import logging
-from typing import List, Optional
+from typing import Optional
 import requests
 
 from apps.rag.search.main import SearchResult, get_filtered_results
@@ -14,7 +14,7 @@ def search_serpstack(
     api_key: str,
     query: str,
     count: int,
-    filter_list: Optional[List[str]] = None,
+    filter_list: Optional[list[str]] = None,
     https_enabled: bool = True,
 ) -> list[SearchResult]:
     """Search using serpstack.com's and return the results as a list of SearchResult objects.

+ 1 - 1
backend/apps/rag/search/tavily.py

@@ -17,7 +17,7 @@ def search_tavily(api_key: str, query: str, count: int) -> list[SearchResult]:
         query (str): The query to search for
 
     Returns:
-        List[SearchResult]: A list of search results
+        list[SearchResult]: A list of search results
     """
     url = "https://api.tavily.com/search"
     data = {"query": query, "api_key": api_key}

+ 6 - 6
backend/apps/rag/utils.py

@@ -2,7 +2,7 @@ import os
 import logging
 import requests
 
-from typing import List, Union
+from typing import Union
 
 from apps.ollama.main import (
     generate_ollama_embeddings,
@@ -142,7 +142,7 @@ def merge_and_sort_query_results(query_results, k, reverse=False):
 
 
 def query_collection(
-    collection_names: List[str],
+    collection_names: list[str],
     query: str,
     embedding_function,
     k: int,
@@ -157,13 +157,13 @@ def query_collection(
                 embedding_function=embedding_function,
             )
             results.append(result)
-        except:
+        except Exception:
             pass
     return merge_and_sort_query_results(results, k=k)
 
 
 def query_collection_with_hybrid_search(
-    collection_names: List[str],
+    collection_names: list[str],
     query: str,
     embedding_function,
     k: int,
@@ -182,7 +182,7 @@ def query_collection_with_hybrid_search(
                 r=r,
             )
             results.append(result)
-        except:
+        except Exception:
             pass
     return merge_and_sort_query_results(results, k=k, reverse=True)
 
@@ -411,7 +411,7 @@ class ChromaRetriever(BaseRetriever):
         query: str,
         *,
         run_manager: CallbackManagerForRetrieverRun,
-    ) -> List[Document]:
+    ) -> list[Document]:
         query_embeddings = self.embedding_function(query)
 
         results = self.collection.query(

+ 4 - 2
backend/apps/webui/main.py

@@ -22,7 +22,7 @@ from apps.webui.utils import load_function_module_by_id
 from utils.misc import (
     openai_chat_chunk_message_template,
     openai_chat_completion_message_template,
-    apply_model_params_to_body,
+    apply_model_params_to_body_openai,
     apply_model_system_prompt_to_body,
 )
 
@@ -46,6 +46,7 @@ from config import (
     AppConfig,
     OAUTH_USERNAME_CLAIM,
     OAUTH_PICTURE_CLAIM,
+    OAUTH_EMAIL_CLAIM,
 )
 
 from apps.socket.main import get_event_call, get_event_emitter
@@ -84,6 +85,7 @@ app.state.config.ENABLE_COMMUNITY_SHARING = ENABLE_COMMUNITY_SHARING
 
 app.state.config.OAUTH_USERNAME_CLAIM = OAUTH_USERNAME_CLAIM
 app.state.config.OAUTH_PICTURE_CLAIM = OAUTH_PICTURE_CLAIM
+app.state.config.OAUTH_EMAIL_CLAIM = OAUTH_EMAIL_CLAIM
 
 app.state.MODELS = {}
 app.state.TOOLS = {}
@@ -289,7 +291,7 @@ async def generate_function_chat_completion(form_data, user):
             form_data["model"] = model_info.base_model_id
 
         params = model_info.params.model_dump()
-        form_data = apply_model_params_to_body(params, form_data)
+        form_data = apply_model_params_to_body_openai(params, form_data)
         form_data = apply_model_system_prompt_to_body(params, form_data, user)
 
     pipe_id = get_pipe_id(form_data)

+ 6 - 6
backend/apps/webui/models/auths.py

@@ -140,7 +140,7 @@ class AuthsTable:
                         return None
                 else:
                     return None
-        except:
+        except Exception:
             return None
 
     def authenticate_user_by_api_key(self, api_key: str) -> Optional[UserModel]:
@@ -152,7 +152,7 @@ class AuthsTable:
         try:
             user = Users.get_user_by_api_key(api_key)
             return user if user else None
-        except:
+        except Exception:
             return False
 
     def authenticate_user_by_trusted_header(self, email: str) -> Optional[UserModel]:
@@ -163,7 +163,7 @@ class AuthsTable:
                 if auth:
                     user = Users.get_user_by_id(auth.id)
                     return user
-        except:
+        except Exception:
             return None
 
     def update_user_password_by_id(self, id: str, new_password: str) -> bool:
@@ -174,7 +174,7 @@ class AuthsTable:
                 )
                 db.commit()
                 return True if result == 1 else False
-        except:
+        except Exception:
             return False
 
     def update_email_by_id(self, id: str, email: str) -> bool:
@@ -183,7 +183,7 @@ class AuthsTable:
                 result = db.query(Auth).filter_by(id=id).update({"email": email})
                 db.commit()
                 return True if result == 1 else False
-        except:
+        except Exception:
             return False
 
     def delete_auth_by_id(self, id: str) -> bool:
@@ -200,7 +200,7 @@ class AuthsTable:
                     return True
                 else:
                     return False
-        except:
+        except Exception:
             return False
 
 

+ 20 - 20
backend/apps/webui/models/chats.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict
-from typing import List, Union, Optional
+from typing import Union, Optional
 
 import json
 import uuid
@@ -164,7 +164,7 @@ class ChatTable:
                 db.refresh(chat)
 
                 return self.get_chat_by_id(chat.share_id)
-        except:
+        except Exception:
             return None
 
     def delete_shared_chat_by_chat_id(self, chat_id: str) -> bool:
@@ -175,7 +175,7 @@ class ChatTable:
                 db.commit()
 
                 return True
-        except:
+        except Exception:
             return False
 
     def update_chat_share_id_by_id(
@@ -189,7 +189,7 @@ class ChatTable:
                 db.commit()
                 db.refresh(chat)
                 return ChatModel.model_validate(chat)
-        except:
+        except Exception:
             return None
 
     def toggle_chat_archive_by_id(self, id: str) -> Optional[ChatModel]:
@@ -201,7 +201,7 @@ class ChatTable:
                 db.commit()
                 db.refresh(chat)
                 return ChatModel.model_validate(chat)
-        except:
+        except Exception:
             return None
 
     def archive_all_chats_by_user_id(self, user_id: str) -> bool:
@@ -210,12 +210,12 @@ class ChatTable:
                 db.query(Chat).filter_by(user_id=user_id).update({"archived": True})
                 db.commit()
                 return True
-        except:
+        except Exception:
             return False
 
     def get_archived_chat_list_by_user_id(
         self, user_id: str, skip: int = 0, limit: int = 50
-    ) -> List[ChatModel]:
+    ) -> list[ChatModel]:
         with get_db() as db:
 
             all_chats = (
@@ -233,7 +233,7 @@ class ChatTable:
         include_archived: bool = False,
         skip: int = 0,
         limit: int = 50,
-    ) -> List[ChatModel]:
+    ) -> list[ChatModel]:
         with get_db() as db:
             query = db.query(Chat).filter_by(user_id=user_id)
             if not include_archived:
@@ -251,7 +251,7 @@ class ChatTable:
         include_archived: bool = False,
         skip: int = 0,
         limit: int = -1,
-    ) -> List[ChatTitleIdResponse]:
+    ) -> list[ChatTitleIdResponse]:
         with get_db() as db:
             query = db.query(Chat).filter_by(user_id=user_id)
             if not include_archived:
@@ -279,8 +279,8 @@ class ChatTable:
             ]
 
     def get_chat_list_by_chat_ids(
-        self, chat_ids: List[str], skip: int = 0, limit: int = 50
-    ) -> List[ChatModel]:
+        self, chat_ids: list[str], skip: int = 0, limit: int = 50
+    ) -> list[ChatModel]:
         with get_db() as db:
             all_chats = (
                 db.query(Chat)
@@ -297,7 +297,7 @@ class ChatTable:
 
                 chat = db.get(Chat, id)
                 return ChatModel.model_validate(chat)
-        except:
+        except Exception:
             return None
 
     def get_chat_by_share_id(self, id: str) -> Optional[ChatModel]:
@@ -319,10 +319,10 @@ class ChatTable:
 
                 chat = db.query(Chat).filter_by(id=id, user_id=user_id).first()
                 return ChatModel.model_validate(chat)
-        except:
+        except Exception:
             return None
 
-    def get_chats(self, skip: int = 0, limit: int = 50) -> List[ChatModel]:
+    def get_chats(self, skip: int = 0, limit: int = 50) -> list[ChatModel]:
         with get_db() as db:
 
             all_chats = (
@@ -332,7 +332,7 @@ class ChatTable:
             )
             return [ChatModel.model_validate(chat) for chat in all_chats]
 
-    def get_chats_by_user_id(self, user_id: str) -> List[ChatModel]:
+    def get_chats_by_user_id(self, user_id: str) -> list[ChatModel]:
         with get_db() as db:
 
             all_chats = (
@@ -342,7 +342,7 @@ class ChatTable:
             )
             return [ChatModel.model_validate(chat) for chat in all_chats]
 
-    def get_archived_chats_by_user_id(self, user_id: str) -> List[ChatModel]:
+    def get_archived_chats_by_user_id(self, user_id: str) -> list[ChatModel]:
         with get_db() as db:
 
             all_chats = (
@@ -360,7 +360,7 @@ class ChatTable:
                 db.commit()
 
                 return True and self.delete_shared_chat_by_chat_id(id)
-        except:
+        except Exception:
             return False
 
     def delete_chat_by_id_and_user_id(self, id: str, user_id: str) -> bool:
@@ -371,7 +371,7 @@ class ChatTable:
                 db.commit()
 
                 return True and self.delete_shared_chat_by_chat_id(id)
-        except:
+        except Exception:
             return False
 
     def delete_chats_by_user_id(self, user_id: str) -> bool:
@@ -385,7 +385,7 @@ class ChatTable:
                 db.commit()
 
                 return True
-        except:
+        except Exception:
             return False
 
     def delete_shared_chats_by_user_id(self, user_id: str) -> bool:
@@ -400,7 +400,7 @@ class ChatTable:
                 db.commit()
 
                 return True
-        except:
+        except Exception:
             return False
 
 

+ 5 - 5
backend/apps/webui/models/documents.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict
-from typing import List, Optional
+from typing import Optional
 import time
 import logging
 
@@ -93,7 +93,7 @@ class DocumentsTable:
                     return DocumentModel.model_validate(result)
                 else:
                     return None
-            except:
+            except Exception:
                 return None
 
     def get_doc_by_name(self, name: str) -> Optional[DocumentModel]:
@@ -102,10 +102,10 @@ class DocumentsTable:
 
                 document = db.query(Document).filter_by(name=name).first()
                 return DocumentModel.model_validate(document) if document else None
-        except:
+        except Exception:
             return None
 
-    def get_docs(self) -> List[DocumentModel]:
+    def get_docs(self) -> list[DocumentModel]:
         with get_db() as db:
 
             return [
@@ -160,7 +160,7 @@ class DocumentsTable:
                 db.query(Document).filter_by(name=name).delete()
                 db.commit()
                 return True
-        except:
+        except Exception:
             return False
 
 

+ 5 - 5
backend/apps/webui/models/files.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict
-from typing import List, Union, Optional
+from typing import Union, Optional
 import time
 import logging
 
@@ -90,10 +90,10 @@ class FilesTable:
             try:
                 file = db.get(File, id)
                 return FileModel.model_validate(file)
-            except:
+            except Exception:
                 return None
 
-    def get_files(self) -> List[FileModel]:
+    def get_files(self) -> list[FileModel]:
         with get_db() as db:
 
             return [FileModel.model_validate(file) for file in db.query(File).all()]
@@ -107,7 +107,7 @@ class FilesTable:
                 db.commit()
 
                 return True
-            except:
+            except Exception:
                 return False
 
     def delete_all_files(self) -> bool:
@@ -119,7 +119,7 @@ class FilesTable:
                 db.commit()
 
                 return True
-            except:
+            except Exception:
                 return False
 
 

+ 10 - 10
backend/apps/webui/models/functions.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict
-from typing import List, Union, Optional
+from typing import Union, Optional
 import time
 import logging
 
@@ -122,10 +122,10 @@ class FunctionsTable:
 
                 function = db.get(Function, id)
                 return FunctionModel.model_validate(function)
-        except:
+        except Exception:
             return None
 
-    def get_functions(self, active_only=False) -> List[FunctionModel]:
+    def get_functions(self, active_only=False) -> list[FunctionModel]:
         with get_db() as db:
 
             if active_only:
@@ -141,7 +141,7 @@ class FunctionsTable:
 
     def get_functions_by_type(
         self, type: str, active_only=False
-    ) -> List[FunctionModel]:
+    ) -> list[FunctionModel]:
         with get_db() as db:
 
             if active_only:
@@ -157,7 +157,7 @@ class FunctionsTable:
                     for function in db.query(Function).filter_by(type=type).all()
                 ]
 
-    def get_global_filter_functions(self) -> List[FunctionModel]:
+    def get_global_filter_functions(self) -> list[FunctionModel]:
         with get_db() as db:
 
             return [
@@ -167,7 +167,7 @@ class FunctionsTable:
                 .all()
             ]
 
-    def get_global_action_functions(self) -> List[FunctionModel]:
+    def get_global_action_functions(self) -> list[FunctionModel]:
         with get_db() as db:
             return [
                 FunctionModel.model_validate(function)
@@ -198,7 +198,7 @@ class FunctionsTable:
                 db.commit()
                 db.refresh(function)
                 return self.get_function_by_id(id)
-            except:
+            except Exception:
                 return None
 
     def get_user_valves_by_id_and_user_id(
@@ -256,7 +256,7 @@ class FunctionsTable:
                 )
                 db.commit()
                 return self.get_function_by_id(id)
-            except:
+            except Exception:
                 return None
 
     def deactivate_all_functions(self) -> Optional[bool]:
@@ -271,7 +271,7 @@ class FunctionsTable:
                 )
                 db.commit()
                 return True
-            except:
+            except Exception:
                 return None
 
     def delete_function_by_id(self, id: str) -> bool:
@@ -281,7 +281,7 @@ class FunctionsTable:
                 db.commit()
 
                 return True
-            except:
+            except Exception:
                 return False
 
 

+ 10 - 10
backend/apps/webui/models/memories.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict
-from typing import List, Union, Optional
+from typing import Union, Optional
 
 from sqlalchemy import Column, String, BigInteger, Text
 
@@ -80,25 +80,25 @@ class MemoriesTable:
                 )
                 db.commit()
                 return self.get_memory_by_id(id)
-            except:
+            except Exception:
                 return None
 
-    def get_memories(self) -> List[MemoryModel]:
+    def get_memories(self) -> list[MemoryModel]:
         with get_db() as db:
 
             try:
                 memories = db.query(Memory).all()
                 return [MemoryModel.model_validate(memory) for memory in memories]
-            except:
+            except Exception:
                 return None
 
-    def get_memories_by_user_id(self, user_id: str) -> List[MemoryModel]:
+    def get_memories_by_user_id(self, user_id: str) -> list[MemoryModel]:
         with get_db() as db:
 
             try:
                 memories = db.query(Memory).filter_by(user_id=user_id).all()
                 return [MemoryModel.model_validate(memory) for memory in memories]
-            except:
+            except Exception:
                 return None
 
     def get_memory_by_id(self, id: str) -> Optional[MemoryModel]:
@@ -107,7 +107,7 @@ class MemoriesTable:
             try:
                 memory = db.get(Memory, id)
                 return MemoryModel.model_validate(memory)
-            except:
+            except Exception:
                 return None
 
     def delete_memory_by_id(self, id: str) -> bool:
@@ -119,7 +119,7 @@ class MemoriesTable:
 
                 return True
 
-            except:
+            except Exception:
                 return False
 
     def delete_memories_by_user_id(self, user_id: str) -> bool:
@@ -130,7 +130,7 @@ class MemoriesTable:
                 db.commit()
 
                 return True
-            except:
+            except Exception:
                 return False
 
     def delete_memory_by_id_and_user_id(self, id: str, user_id: str) -> bool:
@@ -141,7 +141,7 @@ class MemoriesTable:
                 db.commit()
 
                 return True
-            except:
+            except Exception:
                 return False
 
 

+ 3 - 3
backend/apps/webui/models/models.py

@@ -137,7 +137,7 @@ class ModelsTable:
             print(e)
             return None
 
-    def get_all_models(self) -> List[ModelModel]:
+    def get_all_models(self) -> list[ModelModel]:
         with get_db() as db:
             return [ModelModel.model_validate(model) for model in db.query(Model).all()]
 
@@ -146,7 +146,7 @@ class ModelsTable:
             with get_db() as db:
                 model = db.get(Model, id)
                 return ModelModel.model_validate(model)
-        except:
+        except Exception:
             return None
 
     def update_model_by_id(self, id: str, model: ModelForm) -> Optional[ModelModel]:
@@ -175,7 +175,7 @@ class ModelsTable:
                 db.commit()
 
                 return True
-        except:
+        except Exception:
             return False
 
 

+ 5 - 5
backend/apps/webui/models/prompts.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict
-from typing import List, Optional
+from typing import Optional
 import time
 
 from sqlalchemy import String, Column, BigInteger, Text
@@ -79,10 +79,10 @@ class PromptsTable:
 
                 prompt = db.query(Prompt).filter_by(command=command).first()
                 return PromptModel.model_validate(prompt)
-        except:
+        except Exception:
             return None
 
-    def get_prompts(self) -> List[PromptModel]:
+    def get_prompts(self) -> list[PromptModel]:
         with get_db() as db:
 
             return [
@@ -101,7 +101,7 @@ class PromptsTable:
                 prompt.timestamp = int(time.time())
                 db.commit()
                 return PromptModel.model_validate(prompt)
-        except:
+        except Exception:
             return None
 
     def delete_prompt_by_command(self, command: str) -> bool:
@@ -112,7 +112,7 @@ class PromptsTable:
                 db.commit()
 
                 return True
-        except:
+        except Exception:
             return False
 
 

+ 8 - 8
backend/apps/webui/models/tags.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict
-from typing import List, Optional
+from typing import Optional
 
 import json
 import uuid
@@ -69,11 +69,11 @@ class ChatIdTagForm(BaseModel):
 
 
 class TagChatIdsResponse(BaseModel):
-    chat_ids: List[str]
+    chat_ids: list[str]
 
 
 class ChatTagsResponse(BaseModel):
-    tags: List[str]
+    tags: list[str]
 
 
 class TagTable:
@@ -109,7 +109,7 @@ class TagTable:
         self, user_id: str, form_data: ChatIdTagForm
     ) -> Optional[ChatIdTagModel]:
         tag = self.get_tag_by_name_and_user_id(form_data.tag_name, user_id)
-        if tag == None:
+        if tag is None:
             tag = self.insert_new_tag(form_data.tag_name, user_id)
 
         id = str(uuid.uuid4())
@@ -132,10 +132,10 @@ class TagTable:
                     return ChatIdTagModel.model_validate(result)
                 else:
                     return None
-        except:
+        except Exception:
             return None
 
-    def get_tags_by_user_id(self, user_id: str) -> List[TagModel]:
+    def get_tags_by_user_id(self, user_id: str) -> list[TagModel]:
         with get_db() as db:
             tag_names = [
                 chat_id_tag.tag_name
@@ -159,7 +159,7 @@ class TagTable:
 
     def get_tags_by_chat_id_and_user_id(
         self, chat_id: str, user_id: str
-    ) -> List[TagModel]:
+    ) -> list[TagModel]:
         with get_db() as db:
 
             tag_names = [
@@ -184,7 +184,7 @@ class TagTable:
 
     def get_chat_ids_by_tag_name_and_user_id(
         self, tag_name: str, user_id: str
-    ) -> List[ChatIdTagModel]:
+    ) -> list[ChatIdTagModel]:
         with get_db() as db:
 
             return [

+ 8 - 8
backend/apps/webui/models/tools.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict
-from typing import List, Optional
+from typing import Optional
 import time
 import logging
 from sqlalchemy import String, Column, BigInteger, Text
@@ -45,7 +45,7 @@ class ToolModel(BaseModel):
     user_id: str
     name: str
     content: str
-    specs: List[dict]
+    specs: list[dict]
     meta: ToolMeta
     updated_at: int  # timestamp in epoch
     created_at: int  # timestamp in epoch
@@ -81,7 +81,7 @@ class ToolValves(BaseModel):
 class ToolsTable:
 
     def insert_new_tool(
-        self, user_id: str, form_data: ToolForm, specs: List[dict]
+        self, user_id: str, form_data: ToolForm, specs: list[dict]
     ) -> Optional[ToolModel]:
 
         with get_db() as db:
@@ -115,10 +115,10 @@ class ToolsTable:
 
                 tool = db.get(Tool, id)
                 return ToolModel.model_validate(tool)
-        except:
+        except Exception:
             return None
 
-    def get_tools(self) -> List[ToolModel]:
+    def get_tools(self) -> list[ToolModel]:
         with get_db() as db:
             return [ToolModel.model_validate(tool) for tool in db.query(Tool).all()]
 
@@ -141,7 +141,7 @@ class ToolsTable:
                 )
                 db.commit()
                 return self.get_tool_by_id(id)
-        except:
+        except Exception:
             return None
 
     def get_user_valves_by_id_and_user_id(
@@ -196,7 +196,7 @@ class ToolsTable:
                 tool = db.query(Tool).get(id)
                 db.refresh(tool)
                 return ToolModel.model_validate(tool)
-        except:
+        except Exception:
             return None
 
     def delete_tool_by_id(self, id: str) -> bool:
@@ -206,7 +206,7 @@ class ToolsTable:
                 db.commit()
 
                 return True
-        except:
+        except Exception:
             return False
 
 

+ 12 - 12
backend/apps/webui/models/users.py

@@ -1,5 +1,5 @@
 from pydantic import BaseModel, ConfigDict, parse_obj_as
-from typing import List, Union, Optional
+from typing import Union, Optional
 import time
 
 from sqlalchemy import String, Column, BigInteger, Text
@@ -125,7 +125,7 @@ class UsersTable:
 
                 user = db.query(User).filter_by(api_key=api_key).first()
                 return UserModel.model_validate(user)
-        except:
+        except Exception:
             return None
 
     def get_user_by_email(self, email: str) -> Optional[UserModel]:
@@ -134,7 +134,7 @@ class UsersTable:
 
                 user = db.query(User).filter_by(email=email).first()
                 return UserModel.model_validate(user)
-        except:
+        except Exception:
             return None
 
     def get_user_by_oauth_sub(self, sub: str) -> Optional[UserModel]:
@@ -143,10 +143,10 @@ class UsersTable:
 
                 user = db.query(User).filter_by(oauth_sub=sub).first()
                 return UserModel.model_validate(user)
-        except:
+        except Exception:
             return None
 
-    def get_users(self, skip: int = 0, limit: int = 50) -> List[UserModel]:
+    def get_users(self, skip: int = 0, limit: int = 50) -> list[UserModel]:
         with get_db() as db:
             users = (
                 db.query(User)
@@ -164,7 +164,7 @@ class UsersTable:
             with get_db() as db:
                 user = db.query(User).order_by(User.created_at).first()
                 return UserModel.model_validate(user)
-        except:
+        except Exception:
             return None
 
     def update_user_role_by_id(self, id: str, role: str) -> Optional[UserModel]:
@@ -174,7 +174,7 @@ class UsersTable:
                 db.commit()
                 user = db.query(User).filter_by(id=id).first()
                 return UserModel.model_validate(user)
-        except:
+        except Exception:
             return None
 
     def update_user_profile_image_url_by_id(
@@ -189,7 +189,7 @@ class UsersTable:
 
                 user = db.query(User).filter_by(id=id).first()
                 return UserModel.model_validate(user)
-        except:
+        except Exception:
             return None
 
     def update_user_last_active_by_id(self, id: str) -> Optional[UserModel]:
@@ -203,7 +203,7 @@ class UsersTable:
 
                 user = db.query(User).filter_by(id=id).first()
                 return UserModel.model_validate(user)
-        except:
+        except Exception:
             return None
 
     def update_user_oauth_sub_by_id(
@@ -216,7 +216,7 @@ class UsersTable:
 
                 user = db.query(User).filter_by(id=id).first()
                 return UserModel.model_validate(user)
-        except:
+        except Exception:
             return None
 
     def update_user_by_id(self, id: str, updated: dict) -> Optional[UserModel]:
@@ -245,7 +245,7 @@ class UsersTable:
                 return True
             else:
                 return False
-        except:
+        except Exception:
             return False
 
     def update_user_api_key_by_id(self, id: str, api_key: str) -> str:
@@ -254,7 +254,7 @@ class UsersTable:
                 result = db.query(User).filter_by(id=id).update({"api_key": api_key})
                 db.commit()
                 return True if result == 1 else False
-        except:
+        except Exception:
             return False
 
     def get_user_api_key_by_id(self, id: str) -> Optional[str]:

+ 11 - 11
backend/apps/webui/routers/chats.py

@@ -1,6 +1,6 @@
 from fastapi import Depends, Request, HTTPException, status
 from datetime import datetime, timedelta
-from typing import List, Union, Optional
+from typing import Union, Optional
 from utils.utils import get_verified_user, get_admin_user
 from fastapi import APIRouter
 from pydantic import BaseModel
@@ -40,8 +40,8 @@ router = APIRouter()
 ############################
 
 
-@router.get("/", response_model=List[ChatTitleIdResponse])
-@router.get("/list", response_model=List[ChatTitleIdResponse])
+@router.get("/", response_model=list[ChatTitleIdResponse])
+@router.get("/list", response_model=list[ChatTitleIdResponse])
 async def get_session_user_chat_list(
     user=Depends(get_verified_user), page: Optional[int] = None
 ):
@@ -80,7 +80,7 @@ async def delete_all_user_chats(request: Request, user=Depends(get_verified_user
 ############################
 
 
-@router.get("/list/user/{user_id}", response_model=List[ChatTitleIdResponse])
+@router.get("/list/user/{user_id}", response_model=list[ChatTitleIdResponse])
 async def get_user_chat_list_by_user_id(
     user_id: str,
     user=Depends(get_admin_user),
@@ -119,7 +119,7 @@ async def create_new_chat(form_data: ChatForm, user=Depends(get_verified_user)):
 ############################
 
 
-@router.get("/all", response_model=List[ChatResponse])
+@router.get("/all", response_model=list[ChatResponse])
 async def get_user_chats(user=Depends(get_verified_user)):
     return [
         ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
@@ -132,7 +132,7 @@ async def get_user_chats(user=Depends(get_verified_user)):
 ############################
 
 
-@router.get("/all/archived", response_model=List[ChatResponse])
+@router.get("/all/archived", response_model=list[ChatResponse])
 async def get_user_archived_chats(user=Depends(get_verified_user)):
     return [
         ChatResponse(**{**chat.model_dump(), "chat": json.loads(chat.chat)})
@@ -145,7 +145,7 @@ async def get_user_archived_chats(user=Depends(get_verified_user)):
 ############################
 
 
-@router.get("/all/db", response_model=List[ChatResponse])
+@router.get("/all/db", response_model=list[ChatResponse])
 async def get_all_user_chats_in_db(user=Depends(get_admin_user)):
     if not ENABLE_ADMIN_EXPORT:
         raise HTTPException(
@@ -163,7 +163,7 @@ async def get_all_user_chats_in_db(user=Depends(get_admin_user)):
 ############################
 
 
-@router.get("/archived", response_model=List[ChatTitleIdResponse])
+@router.get("/archived", response_model=list[ChatTitleIdResponse])
 async def get_archived_session_user_chat_list(
     user=Depends(get_verified_user), skip: int = 0, limit: int = 50
 ):
@@ -216,7 +216,7 @@ class TagNameForm(BaseModel):
     limit: Optional[int] = 50
 
 
-@router.post("/tags", response_model=List[ChatTitleIdResponse])
+@router.post("/tags", response_model=list[ChatTitleIdResponse])
 async def get_user_chat_list_by_tag_name(
     form_data: TagNameForm, user=Depends(get_verified_user)
 ):
@@ -241,7 +241,7 @@ async def get_user_chat_list_by_tag_name(
 ############################
 
 
-@router.get("/tags/all", response_model=List[TagModel])
+@router.get("/tags/all", response_model=list[TagModel])
 async def get_all_tags(user=Depends(get_verified_user)):
     try:
         tags = Tags.get_tags_by_user_id(user.id)
@@ -417,7 +417,7 @@ async def delete_shared_chat_by_id(id: str, user=Depends(get_verified_user)):
 ############################
 
 
-@router.get("/{id}/tags", response_model=List[TagModel])
+@router.get("/{id}/tags", response_model=list[TagModel])
 async def get_chat_tags_by_id(id: str, user=Depends(get_verified_user)):
     tags = Tags.get_tags_by_chat_id_and_user_id(id, user.id)
 

+ 7 - 7
backend/apps/webui/routers/configs.py

@@ -1,7 +1,7 @@
 from fastapi import Response, Request
 from fastapi import Depends, FastAPI, HTTPException, status
 from datetime import datetime, timedelta
-from typing import List, Union
+from typing import Union
 
 from fastapi import APIRouter
 from pydantic import BaseModel
@@ -29,12 +29,12 @@ class SetDefaultModelsForm(BaseModel):
 
 
 class PromptSuggestion(BaseModel):
-    title: List[str]
+    title: list[str]
     content: str
 
 
 class SetDefaultSuggestionsForm(BaseModel):
-    suggestions: List[PromptSuggestion]
+    suggestions: list[PromptSuggestion]
 
 
 ############################
@@ -50,7 +50,7 @@ async def set_global_default_models(
     return request.app.state.config.DEFAULT_MODELS
 
 
-@router.post("/default/suggestions", response_model=List[PromptSuggestion])
+@router.post("/default/suggestions", response_model=list[PromptSuggestion])
 async def set_global_default_suggestions(
     request: Request,
     form_data: SetDefaultSuggestionsForm,
@@ -67,10 +67,10 @@ async def set_global_default_suggestions(
 
 
 class SetBannersForm(BaseModel):
-    banners: List[BannerModel]
+    banners: list[BannerModel]
 
 
-@router.post("/banners", response_model=List[BannerModel])
+@router.post("/banners", response_model=list[BannerModel])
 async def set_banners(
     request: Request,
     form_data: SetBannersForm,
@@ -81,7 +81,7 @@ async def set_banners(
     return request.app.state.config.BANNERS
 
 
-@router.get("/banners", response_model=List[BannerModel])
+@router.get("/banners", response_model=list[BannerModel])
 async def get_banners(
     request: Request,
     user=Depends(get_verified_user),

+ 4 - 4
backend/apps/webui/routers/documents.py

@@ -1,6 +1,6 @@
 from fastapi import Depends, FastAPI, HTTPException, status
 from datetime import datetime, timedelta
-from typing import List, Union, Optional
+from typing import Union, Optional
 
 from fastapi import APIRouter
 from pydantic import BaseModel
@@ -24,7 +24,7 @@ router = APIRouter()
 ############################
 
 
-@router.get("/", response_model=List[DocumentResponse])
+@router.get("/", response_model=list[DocumentResponse])
 async def get_documents(user=Depends(get_verified_user)):
     docs = [
         DocumentResponse(
@@ -46,7 +46,7 @@ async def get_documents(user=Depends(get_verified_user)):
 @router.post("/create", response_model=Optional[DocumentResponse])
 async def create_new_doc(form_data: DocumentForm, user=Depends(get_admin_user)):
     doc = Documents.get_doc_by_name(form_data.name)
-    if doc == None:
+    if doc is None:
         doc = Documents.insert_new_doc(user.id, form_data)
 
         if doc:
@@ -102,7 +102,7 @@ class TagItem(BaseModel):
 
 class TagDocumentForm(BaseModel):
     name: str
-    tags: List[dict]
+    tags: list[dict]
 
 
 @router.post("/doc/tags", response_model=Optional[DocumentResponse])

+ 2 - 2
backend/apps/webui/routers/files.py

@@ -11,7 +11,7 @@ from fastapi import (
 
 
 from datetime import datetime, timedelta
-from typing import List, Union, Optional
+from typing import Union, Optional
 from pathlib import Path
 
 from fastapi import APIRouter
@@ -104,7 +104,7 @@ def upload_file(file: UploadFile = File(...), user=Depends(get_verified_user)):
 ############################
 
 
-@router.get("/", response_model=List[FileModel])
+@router.get("/", response_model=list[FileModel])
 async def list_files(user=Depends(get_verified_user)):
     files = Files.get_files()
     return files

+ 5 - 5
backend/apps/webui/routers/functions.py

@@ -1,6 +1,6 @@
 from fastapi import Depends, FastAPI, HTTPException, status, Request
 from datetime import datetime, timedelta
-from typing import List, Union, Optional
+from typing import Union, Optional
 
 from fastapi import APIRouter
 from pydantic import BaseModel
@@ -30,7 +30,7 @@ router = APIRouter()
 ############################
 
 
-@router.get("/", response_model=List[FunctionResponse])
+@router.get("/", response_model=list[FunctionResponse])
 async def get_functions(user=Depends(get_verified_user)):
     return Functions.get_functions()
 
@@ -40,7 +40,7 @@ async def get_functions(user=Depends(get_verified_user)):
 ############################
 
 
-@router.get("/export", response_model=List[FunctionModel])
+@router.get("/export", response_model=list[FunctionModel])
 async def get_functions(user=Depends(get_admin_user)):
     return Functions.get_functions()
 
@@ -63,7 +63,7 @@ async def create_new_function(
     form_data.id = form_data.id.lower()
 
     function = Functions.get_function_by_id(form_data.id)
-    if function == None:
+    if function is None:
         function_path = os.path.join(FUNCTIONS_DIR, f"{form_data.id}.py")
         try:
             with open(function_path, "w") as function_file:
@@ -235,7 +235,7 @@ async def delete_function_by_id(
         function_path = os.path.join(FUNCTIONS_DIR, f"{id}.py")
         try:
             os.remove(function_path)
-        except:
+        except Exception:
             pass
 
     return result

+ 2 - 2
backend/apps/webui/routers/memories.py

@@ -1,7 +1,7 @@
 from fastapi import Response, Request
 from fastapi import Depends, FastAPI, HTTPException, status
 from datetime import datetime, timedelta
-from typing import List, Union, Optional
+from typing import Union, Optional
 
 from fastapi import APIRouter
 from pydantic import BaseModel
@@ -30,7 +30,7 @@ async def get_embeddings(request: Request):
 ############################
 
 
-@router.get("/", response_model=List[MemoryModel])
+@router.get("/", response_model=list[MemoryModel])
 async def get_memories(user=Depends(get_verified_user)):
     return Memories.get_memories_by_user_id(user.id)
 

+ 2 - 2
backend/apps/webui/routers/models.py

@@ -1,6 +1,6 @@
 from fastapi import Depends, FastAPI, HTTPException, status, Request
 from datetime import datetime, timedelta
-from typing import List, Union, Optional
+from typing import Union, Optional
 
 from fastapi import APIRouter
 from pydantic import BaseModel
@@ -18,7 +18,7 @@ router = APIRouter()
 ###########################
 
 
-@router.get("/", response_model=List[ModelResponse])
+@router.get("/", response_model=list[ModelResponse])
 async def get_models(user=Depends(get_verified_user)):
     return Models.get_all_models()
 

+ 3 - 3
backend/apps/webui/routers/prompts.py

@@ -1,6 +1,6 @@
 from fastapi import Depends, FastAPI, HTTPException, status
 from datetime import datetime, timedelta
-from typing import List, Union, Optional
+from typing import Union, Optional
 
 from fastapi import APIRouter
 from pydantic import BaseModel
@@ -18,7 +18,7 @@ router = APIRouter()
 ############################
 
 
-@router.get("/", response_model=List[PromptModel])
+@router.get("/", response_model=list[PromptModel])
 async def get_prompts(user=Depends(get_verified_user)):
     return Prompts.get_prompts()
 
@@ -31,7 +31,7 @@ async def get_prompts(user=Depends(get_verified_user)):
 @router.post("/create", response_model=Optional[PromptModel])
 async def create_new_prompt(form_data: PromptForm, user=Depends(get_admin_user)):
     prompt = Prompts.get_prompt_by_command(form_data.command)
-    if prompt == None:
+    if prompt is None:
         prompt = Prompts.insert_new_prompt(user.id, form_data)
 
         if prompt:

+ 3 - 3
backend/apps/webui/routers/tools.py

@@ -1,5 +1,5 @@
 from fastapi import Depends, HTTPException, status, Request
-from typing import List, Optional
+from typing import Optional
 
 from fastapi import APIRouter
 
@@ -27,7 +27,7 @@ router = APIRouter()
 ############################
 
 
-@router.get("/", response_model=List[ToolResponse])
+@router.get("/", response_model=list[ToolResponse])
 async def get_toolkits(user=Depends(get_verified_user)):
     toolkits = [toolkit for toolkit in Tools.get_tools()]
     return toolkits
@@ -38,7 +38,7 @@ async def get_toolkits(user=Depends(get_verified_user)):
 ############################
 
 
-@router.get("/export", response_model=List[ToolModel])
+@router.get("/export", response_model=list[ToolModel])
 async def get_toolkits(user=Depends(get_admin_user)):
     toolkits = [toolkit for toolkit in Tools.get_tools()]
     return toolkits

+ 2 - 2
backend/apps/webui/routers/users.py

@@ -1,7 +1,7 @@
 from fastapi import Response, Request
 from fastapi import Depends, FastAPI, HTTPException, status
 from datetime import datetime, timedelta
-from typing import List, Union, Optional
+from typing import Union, Optional
 
 from fastapi import APIRouter
 from pydantic import BaseModel
@@ -39,7 +39,7 @@ router = APIRouter()
 ############################
 
 
-@router.get("/", response_model=List[UserModel])
+@router.get("/", response_model=list[UserModel])
 async def get_users(skip: int = 0, limit: int = 50, user=Depends(get_admin_user)):
     return Users.get_users(skip, limit)
 

+ 2 - 2
backend/apps/webui/routers/utils.py

@@ -17,7 +17,7 @@ from utils.misc import calculate_sha256, get_gravatar_url
 
 from config import OLLAMA_BASE_URLS, DATA_DIR, UPLOAD_DIR, ENABLE_ADMIN_EXPORT
 from constants import ERROR_MESSAGES
-from typing import List
+
 
 router = APIRouter()
 
@@ -57,7 +57,7 @@ async def get_html_from_markdown(
 
 class ChatForm(BaseModel):
     title: str
-    messages: List[dict]
+    messages: list[dict]
 
 
 @router.post("/pdf")

+ 14 - 0
backend/apps/webui/utils.py

@@ -1,6 +1,8 @@
 from importlib import util
 import os
 import re
+import sys
+import subprocess
 
 from config import TOOLS_DIR, FUNCTIONS_DIR
 
@@ -52,6 +54,7 @@ def load_toolkit_module_by_id(toolkit_id):
     frontmatter = extract_frontmatter(toolkit_path)
 
     try:
+        install_frontmatter_requirements(frontmatter.get("requirements", ""))
         spec.loader.exec_module(module)
         print(f"Loaded module: {module.__name__}")
         if hasattr(module, "Tools"):
@@ -73,6 +76,7 @@ def load_function_module_by_id(function_id):
     frontmatter = extract_frontmatter(function_path)
 
     try:
+        install_frontmatter_requirements(frontmatter.get("requirements", ""))
         spec.loader.exec_module(module)
         print(f"Loaded module: {module.__name__}")
         if hasattr(module, "Pipe"):
@@ -88,3 +92,13 @@ def load_function_module_by_id(function_id):
         # Move the file to the error folder
         os.rename(function_path, f"{function_path}.error")
         raise e
+
+
+def install_frontmatter_requirements(requirements):
+    if requirements:
+        req_list = [req.strip() for req in requirements.split(",")]
+        for req in req_list:
+            print(f"Installing requirement: {req}")
+            subprocess.check_call([sys.executable, "-m", "pip", "install", req])
+    else:
+        print("No requirements found in frontmatter.")

+ 14 - 8
backend/config.py

@@ -104,7 +104,7 @@ ENV = os.environ.get("ENV", "dev")
 
 try:
     PACKAGE_DATA = json.loads((BASE_DIR / "package.json").read_text())
-except:
+except Exception:
     try:
         PACKAGE_DATA = {"version": importlib.metadata.version("open-webui")}
     except importlib.metadata.PackageNotFoundError:
@@ -137,7 +137,7 @@ try:
     with open(str(changelog_path.absolute()), "r", encoding="utf8") as file:
         changelog_content = file.read()
 
-except:
+except Exception:
     changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
 
 
@@ -202,12 +202,12 @@ if RESET_CONFIG_ON_START:
         os.remove(f"{DATA_DIR}/config.json")
         with open(f"{DATA_DIR}/config.json", "w") as f:
             f.write("{}")
-    except:
+    except Exception:
         pass
 
 try:
     CONFIG_DATA = json.loads((DATA_DIR / "config.json").read_text())
-except:
+except Exception:
     CONFIG_DATA = {}
 
 
@@ -433,6 +433,12 @@ OAUTH_PICTURE_CLAIM = PersistentConfig(
     os.environ.get("OAUTH_PICTURE_CLAIM", "picture"),
 )
 
+OAUTH_EMAIL_CLAIM = PersistentConfig(
+    "OAUTH_EMAIL_CLAIM",
+    "oauth.oidc.email_claim",
+    os.environ.get("OAUTH_EMAIL_CLAIM", "email"),
+)
+
 
 def load_oauth_providers():
     OAUTH_PROVIDERS.clear()
@@ -641,7 +647,7 @@ if AIOHTTP_CLIENT_TIMEOUT == "":
 else:
     try:
         AIOHTTP_CLIENT_TIMEOUT = int(AIOHTTP_CLIENT_TIMEOUT)
-    except:
+    except Exception:
         AIOHTTP_CLIENT_TIMEOUT = 300
 
 
@@ -721,7 +727,7 @@ try:
     OPENAI_API_KEY = OPENAI_API_KEYS.value[
         OPENAI_API_BASE_URLS.value.index("https://api.openai.com/v1")
     ]
-except:
+except Exception:
     pass
 
 OPENAI_API_BASE_URL = "https://api.openai.com/v1"
@@ -1037,7 +1043,7 @@ RAG_EMBEDDING_MODEL = PersistentConfig(
     "rag.embedding_model",
     os.environ.get("RAG_EMBEDDING_MODEL", "sentence-transformers/all-MiniLM-L6-v2"),
 )
-log.info(f"Embedding model set: {RAG_EMBEDDING_MODEL.value}"),
+log.info(f"Embedding model set: {RAG_EMBEDDING_MODEL.value}")
 
 RAG_EMBEDDING_MODEL_AUTO_UPDATE = (
     os.environ.get("RAG_EMBEDDING_MODEL_AUTO_UPDATE", "").lower() == "true"
@@ -1059,7 +1065,7 @@ RAG_RERANKING_MODEL = PersistentConfig(
     os.environ.get("RAG_RERANKING_MODEL", ""),
 )
 if RAG_RERANKING_MODEL.value != "":
-    log.info(f"Reranking model set: {RAG_RERANKING_MODEL.value}"),
+    log.info(f"Reranking model set: {RAG_RERANKING_MODEL.value}")
 
 RAG_RERANKING_MODEL_AUTO_UPDATE = (
     os.environ.get("RAG_RERANKING_MODEL_AUTO_UPDATE", "").lower() == "true"

+ 5 - 4
backend/main.py

@@ -51,7 +51,7 @@ from apps.webui.internal.db import Session
 
 
 from pydantic import BaseModel
-from typing import List, Optional
+from typing import Optional
 
 from apps.webui.models.auths import Auths
 from apps.webui.models.models import Models
@@ -1883,7 +1883,7 @@ async def get_pipeline_valves(
                 res = r.json()
                 if "detail" in res:
                     detail = res["detail"]
-            except:
+            except Exception:
                 pass
 
         raise HTTPException(
@@ -2027,7 +2027,7 @@ async def get_model_filter_config(user=Depends(get_admin_user)):
 
 class ModelFilterConfigForm(BaseModel):
     enabled: bool
-    models: List[str]
+    models: list[str]
 
 
 @app.post("/api/config/model/filter")
@@ -2158,7 +2158,8 @@ async def oauth_callback(provider: str, request: Request, response: Response):
         log.warning(f"OAuth callback failed, sub is missing: {user_data}")
         raise HTTPException(400, detail=ERROR_MESSAGES.INVALID_CRED)
     provider_sub = f"{provider}@{sub}"
-    email = user_data.get("email", "").lower()
+    email_claim = webui_app.state.config.OAUTH_EMAIL_CLAIM
+    email = user_data.get(email_claim, "").lower()
     # We currently mandate that email addresses are provided
     if not email:
         log.warning(f"OAuth callback failed, email is missing: {user_data}")

+ 6 - 6
backend/requirements.txt

@@ -11,7 +11,7 @@ python-jose==3.3.0
 passlib[bcrypt]==1.7.4
 
 requests==2.32.3
-aiohttp==3.9.5
+aiohttp==3.10.2
 
 sqlalchemy==2.0.31
 alembic==1.13.2
@@ -34,12 +34,12 @@ anthropic
 google-generativeai==0.7.2
 tiktoken
 
-langchain==0.2.11
+langchain==0.2.12
 langchain-community==0.2.10
 langchain-chroma==0.1.2
 
 fake-useragent==1.5.1
-chromadb==0.5.4
+chromadb==0.5.5
 sentence-transformers==3.0.1
 pypdf==4.3.1
 docx2txt==0.8
@@ -62,11 +62,11 @@ rank-bm25==0.2.2
 
 faster-whisper==1.0.2
 
-PyJWT[crypto]==2.8.0
+PyJWT[crypto]==2.9.0
 authlib==1.3.1
 
 black==24.8.0
-langfuse==2.39.2
+langfuse==2.43.3
 youtube-transcript-api==0.6.2
 pytube==15.0.0
 
@@ -76,5 +76,5 @@ duckduckgo-search~=6.2.1
 
 ## Tests
 docker~=7.1.0
-pytest~=8.2.2
+pytest~=8.3.2
 pytest-docker~=3.1.1

+ 0 - 1
backend/start.sh

@@ -30,7 +30,6 @@ if [[ "${USE_CUDA_DOCKER,,}" == "true" ]]; then
   export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib"
 fi
 
-
 # Check if SPACE_ID is set, if so, configure for space
 if [ -n "$SPACE_ID" ]; then
   echo "Configuring for HuggingFace Space deployment"

+ 55 - 14
backend/utils/misc.py

@@ -2,14 +2,14 @@ from pathlib import Path
 import hashlib
 import re
 from datetime import timedelta
-from typing import Optional, List, Tuple
+from typing import Optional, Callable
 import uuid
 import time
 
 from utils.task import prompt_template
 
 
-def get_last_user_message_item(messages: List[dict]) -> Optional[dict]:
+def get_last_user_message_item(messages: list[dict]) -> Optional[dict]:
     for message in reversed(messages):
         if message["role"] == "user":
             return message
@@ -26,7 +26,7 @@ def get_content_from_message(message: dict) -> Optional[str]:
     return None
 
 
-def get_last_user_message(messages: List[dict]) -> Optional[str]:
+def get_last_user_message(messages: list[dict]) -> Optional[str]:
     message = get_last_user_message_item(messages)
     if message is None:
         return None
@@ -34,31 +34,31 @@ def get_last_user_message(messages: List[dict]) -> Optional[str]:
     return get_content_from_message(message)
 
 
-def get_last_assistant_message(messages: List[dict]) -> Optional[str]:
+def get_last_assistant_message(messages: list[dict]) -> Optional[str]:
     for message in reversed(messages):
         if message["role"] == "assistant":
             return get_content_from_message(message)
     return None
 
 
-def get_system_message(messages: List[dict]) -> Optional[dict]:
+def get_system_message(messages: list[dict]) -> Optional[dict]:
     for message in messages:
         if message["role"] == "system":
             return message
     return None
 
 
-def remove_system_message(messages: List[dict]) -> List[dict]:
+def remove_system_message(messages: list[dict]) -> list[dict]:
     return [message for message in messages if message["role"] != "system"]
 
 
-def pop_system_message(messages: List[dict]) -> Tuple[Optional[dict], List[dict]]:
+def pop_system_message(messages: list[dict]) -> tuple[Optional[dict], list[dict]]:
     return get_system_message(messages), remove_system_message(messages)
 
 
 def prepend_to_first_user_message_content(
-    content: str, messages: List[dict]
-) -> List[dict]:
+    content: str, messages: list[dict]
+) -> list[dict]:
     for message in messages:
         if message["role"] == "user":
             if isinstance(message["content"], list):
@@ -71,7 +71,7 @@ def prepend_to_first_user_message_content(
     return messages
 
 
-def add_or_update_system_message(content: str, messages: List[dict]):
+def add_or_update_system_message(content: str, messages: list[dict]):
     """
     Adds a new system message at the beginning of the messages list
     or updates the existing system message at the beginning.
@@ -135,10 +135,21 @@ def apply_model_system_prompt_to_body(params: dict, form_data: dict, user) -> di
 
 
 # inplace function: form_data is modified
-def apply_model_params_to_body(params: dict, form_data: dict) -> dict:
+def apply_model_params_to_body(
+    params: dict, form_data: dict, mappings: dict[str, Callable]
+) -> dict:
     if not params:
         return form_data
 
+    for key, cast_func in mappings.items():
+        if (value := params.get(key)) is not None:
+            form_data[key] = cast_func(value)
+
+    return form_data
+
+
+# inplace function: form_data is modified
+def apply_model_params_to_body_openai(params: dict, form_data: dict) -> dict:
     mappings = {
         "temperature": float,
         "top_p": int,
@@ -147,10 +158,40 @@ def apply_model_params_to_body(params: dict, form_data: dict) -> dict:
         "seed": lambda x: x,
         "stop": lambda x: [bytes(s, "utf-8").decode("unicode_escape") for s in x],
     }
+    return apply_model_params_to_body(params, form_data, mappings)
+
+
+def apply_model_params_to_body_ollama(params: dict, form_data: dict) -> dict:
+    opts = [
+        "temperature",
+        "top_p",
+        "seed",
+        "mirostat",
+        "mirostat_eta",
+        "mirostat_tau",
+        "num_ctx",
+        "num_batch",
+        "num_keep",
+        "repeat_last_n",
+        "tfs_z",
+        "top_k",
+        "min_p",
+        "use_mmap",
+        "use_mlock",
+        "num_thread",
+        "num_gpu",
+    ]
+    mappings = {i: lambda x: x for i in opts}
+    form_data = apply_model_params_to_body(params, form_data, mappings)
+
+    name_differences = {
+        "max_tokens": "num_predict",
+        "frequency_penalty": "repeat_penalty",
+    }
 
-    for key, cast_func in mappings.items():
-        if (value := params.get(key)) is not None:
-            form_data[key] = cast_func(value)
+    for key, value in name_differences.items():
+        if (param := params.get(key, None)) is not None:
+            form_data[value] = param
 
     return form_data
 

+ 2 - 2
backend/utils/tools.py

@@ -1,5 +1,5 @@
 import inspect
-from typing import get_type_hints, List, Dict, Any
+from typing import get_type_hints
 
 
 def doc_to_dict(docstring):
@@ -16,7 +16,7 @@ def doc_to_dict(docstring):
     return ret_dict
 
 
-def get_tools_specs(tools) -> List[dict]:
+def get_tools_specs(tools) -> list[dict]:
     function_list = [
         {"name": func, "function": getattr(tools, func)}
         for func in dir(tools)

+ 18 - 13
cypress/e2e/chat.cy.ts

@@ -38,9 +38,10 @@ describe('Settings', () => {
 			// User's message should be visible
 			cy.get('.chat-user').should('exist');
 			// Wait for the response
-			cy.get('.chat-assistant', { timeout: 120_000 }) // .chat-assistant is created after the first token is received
-				.find('div[aria-label="Generation Info"]', { timeout: 120_000 }) // Generation Info is created after the stop token is received
-				.should('exist');
+			// .chat-assistant is created after the first token is received
+			cy.get('.chat-assistant', { timeout: 10_000 }).should('exist');
+			// Generation Info is created after the stop token is received
+			cy.get('div[aria-label="Generation Info"]', { timeout: 120_000 }).should('exist');
 		});
 
 		it('user can share chat', () => {
@@ -57,21 +58,24 @@ describe('Settings', () => {
 			// User's message should be visible
 			cy.get('.chat-user').should('exist');
 			// Wait for the response
-			cy.get('.chat-assistant', { timeout: 120_000 }) // .chat-assistant is created after the first token is received
-				.find('div[aria-label="Generation Info"]', { timeout: 120_000 }) // Generation Info is created after the stop token is received
-				.should('exist');
+			// .chat-assistant is created after the first token is received
+			cy.get('.chat-assistant', { timeout: 10_000 }).should('exist');
+			// Generation Info is created after the stop token is received
+			cy.get('div[aria-label="Generation Info"]', { timeout: 120_000 }).should('exist');
 			// spy on requests
 			const spy = cy.spy();
-			cy.intercept('GET', '/api/v1/chats/*', spy);
+			cy.intercept('POST', '/api/v1/chats/**/share', spy);
 			// Open context menu
 			cy.get('#chat-context-menu-button').click();
 			// Click share button
 			cy.get('#chat-share-button').click();
 			// Check if the share dialog is visible
 			cy.get('#copy-and-share-chat-button').should('exist');
-			cy.wrap({}, { timeout: 5000 }).should(() => {
-				// Check if the request was made twice (once for to replace chat object and once more due to change event)
-				expect(spy).to.be.callCount(2);
+			// Click the copy button
+			cy.get('#copy-and-share-chat-button').click();
+			cy.wrap({}, { timeout: 5_000 }).should(() => {
+				// Check if the share request was made
+				expect(spy).to.be.callCount(1);
 			});
 		});
 
@@ -89,9 +93,10 @@ describe('Settings', () => {
 			// User's message should be visible
 			cy.get('.chat-user').should('exist');
 			// Wait for the response
-			cy.get('.chat-assistant', { timeout: 120_000 }) // .chat-assistant is created after the first token is received
-				.find('div[aria-label="Generation Info"]', { timeout: 120_000 }) // Generation Info is created after the stop token is received
-				.should('exist');
+			// .chat-assistant is created after the first token is received
+			cy.get('.chat-assistant', { timeout: 10_000 }).should('exist');
+			// Generation Info is created after the stop token is received
+			cy.get('div[aria-label="Generation Info"]', { timeout: 120_000 }).should('exist');
 			// Click on the generate image button
 			cy.get('[aria-label="Generate Image"]').click();
 			// Wait for image to be visible

+ 0 - 1
docs/CONTRIBUTING.md

@@ -22,7 +22,6 @@ Noticed something off? Have an idea? Check our [Issues tab](https://github.com/o
 > [!IMPORTANT]
 >
 > - **Template Compliance:** Please be aware that failure to follow the provided issue template, or not providing the requested information at all, will likely result in your issue being closed without further consideration. This approach is critical for maintaining the manageability and integrity of issue tracking.
->
 > - **Detail is Key:** To ensure your issue is understood and can be effectively addressed, it's imperative to include comprehensive details. Descriptions should be clear, including steps to reproduce, expected outcomes, and actual results. Lack of sufficient detail may hinder our ability to resolve your issue.
 
 ### 🧭 Scope of Support

File diff suppressed because it is too large
+ 247 - 270
package-lock.json


+ 22 - 16
package.json

@@ -1,6 +1,6 @@
 {
 	"name": "open-webui",
-	"version": "0.3.12",
+	"version": "0.3.13",
 	"private": true,
 	"scripts": {
 		"dev": "npm run pyodide:fetch && vite dev --host",
@@ -20,30 +20,31 @@
 		"pyodide:fetch": "node scripts/prepare-pyodide.js"
 	},
 	"devDependencies": {
-		"@sveltejs/adapter-auto": "^2.0.0",
-		"@sveltejs/adapter-static": "^2.0.3",
-		"@sveltejs/kit": "^1.30.0",
-		"@tailwindcss/typography": "^0.5.10",
+		"@sveltejs/adapter-auto": "3.2.2",
+		"@sveltejs/adapter-static": "^3.0.2",
+		"@sveltejs/kit": "^2.5.20",
+		"@sveltejs/vite-plugin-svelte": "^3.1.1",
+		"@tailwindcss/typography": "^0.5.13",
 		"@types/bun": "latest",
 		"@typescript-eslint/eslint-plugin": "^6.17.0",
 		"@typescript-eslint/parser": "^6.17.0",
 		"autoprefixer": "^10.4.16",
 		"cypress": "^13.8.1",
 		"eslint": "^8.56.0",
-		"eslint-config-prettier": "^8.5.0",
-		"eslint-plugin-cypress": "^3.0.2",
-		"eslint-plugin-svelte": "^2.30.0",
-		"i18next-parser": "^8.13.0",
+		"eslint-config-prettier": "^9.1.0",
+		"eslint-plugin-cypress": "^3.4.0",
+		"eslint-plugin-svelte": "^2.43.0",
+		"i18next-parser": "^9.0.1",
 		"postcss": "^8.4.31",
-		"prettier": "^2.8.0",
-		"prettier-plugin-svelte": "^2.10.1",
-		"svelte": "^4.0.5",
-		"svelte-check": "^3.4.3",
+		"prettier": "^3.3.3",
+		"prettier-plugin-svelte": "^3.2.6",
+		"svelte": "^4.2.18",
+		"svelte-check": "^3.8.5",
 		"svelte-confetti": "^1.3.2",
 		"tailwindcss": "^3.3.3",
 		"tslib": "^2.4.1",
-		"typescript": "^5.0.0",
-		"vite": "^4.4.2",
+		"typescript": "^5.5.4",
+		"vite": "^5.3.5",
 		"vitest": "^1.6.0"
 	},
 	"type": "module",
@@ -52,7 +53,7 @@
 		"@codemirror/lang-python": "^6.1.6",
 		"@codemirror/theme-one-dark": "^6.1.2",
 		"@pyscript/core": "^0.4.32",
-		"@sveltejs/adapter-node": "^1.3.1",
+		"@sveltejs/adapter-node": "^2.0.0",
 		"async": "^3.2.5",
 		"bits-ui": "^0.19.7",
 		"codemirror": "^6.0.1",
@@ -69,6 +70,7 @@
 		"js-sha256": "^0.10.1",
 		"katex": "^0.16.9",
 		"marked": "^9.1.0",
+		"marked-katex-extension": "^5.1.1",
 		"mermaid": "^10.9.1",
 		"pyodide": "^0.26.1",
 		"socket.io-client": "^4.2.0",
@@ -77,5 +79,9 @@
 		"tippy.js": "^6.3.7",
 		"turndown": "^7.2.0",
 		"uuid": "^9.0.1"
+	},
+	"engines": {
+		"node": ">=18.13.0 <=21.x.x",
+		"npm": ">=6.0.0"
 	}
 }

+ 6 - 6
pyproject.toml

@@ -1,6 +1,6 @@
 [project]
 name = "open-webui"
-description = "Open WebUI (Formerly Ollama WebUI)"
+description = "Open WebUI"
 authors = [
     { name = "Timothy Jaeryang Baek", email = "tim@openwebui.com" }
 ]
@@ -19,7 +19,7 @@ dependencies = [
     "passlib[bcrypt]==1.7.4",
 
     "requests==2.32.3",
-    "aiohttp==3.9.5",
+    "aiohttp==3.10.2",
 
     "sqlalchemy==2.0.31",
     "alembic==1.13.2",
@@ -41,12 +41,12 @@ dependencies = [
     "google-generativeai==0.7.2",
     "tiktoken",
 
-    "langchain==0.2.11",
+    "langchain==0.2.12",
     "langchain-community==0.2.10",
     "langchain-chroma==0.1.2",
 
     "fake-useragent==1.5.1",
-    "chromadb==0.5.4",
+    "chromadb==0.5.5",
     "sentence-transformers==3.0.1",
     "pypdf==4.3.1",
     "docx2txt==0.8",
@@ -69,11 +69,11 @@ dependencies = [
 
     "faster-whisper==1.0.2",
 
-    "PyJWT[crypto]==2.8.0",
+    "PyJWT[crypto]==2.9.0",
     "authlib==1.3.1",
 
     "black==24.8.0",
-    "langfuse==2.39.2",
+    "langfuse==2.43.3",
     "youtube-transcript-api==0.6.2",
     "pytube==15.0.0",
 

+ 8 - 6
requirements-dev.lock

@@ -10,7 +10,9 @@
 #   universal: false
 
 -e file:.
-aiohttp==3.9.5
+aiohappyeyeballs==2.3.5
+    # via aiohttp
+aiohttp==3.10.2
     # via langchain
     # via langchain-community
     # via open-webui
@@ -84,9 +86,9 @@ chardet==5.2.0
 charset-normalizer==3.3.2
     # via requests
     # via unstructured-client
-chroma-hnswlib==0.7.5
+chroma-hnswlib==0.7.6
     # via chromadb
-chromadb==0.5.4
+chromadb==0.5.5
     # via langchain-chroma
     # via open-webui
 click==8.1.7
@@ -269,7 +271,7 @@ jsonpointer==2.4
     # via jsonpatch
 kubernetes==29.0.0
     # via chromadb
-langchain==0.2.11
+langchain==0.2.12
     # via langchain-community
     # via open-webui
 langchain-chroma==0.1.2
@@ -285,7 +287,7 @@ langchain-text-splitters==0.2.0
     # via langchain
 langdetect==1.0.9
     # via unstructured
-langfuse==2.39.2
+langfuse==2.43.3
     # via open-webui
 langsmith==0.1.96
     # via langchain
@@ -491,7 +493,7 @@ pydub==0.25.1
     # via open-webui
 pygments==2.18.0
     # via rich
-pyjwt==2.8.0
+pyjwt==2.9.0
     # via open-webui
 pymongo==4.8.0
     # via open-webui

+ 8 - 6
requirements.lock

@@ -10,7 +10,9 @@
 #   universal: false
 
 -e file:.
-aiohttp==3.9.5
+aiohappyeyeballs==2.3.5
+    # via aiohttp
+aiohttp==3.10.2
     # via langchain
     # via langchain-community
     # via open-webui
@@ -84,9 +86,9 @@ chardet==5.2.0
 charset-normalizer==3.3.2
     # via requests
     # via unstructured-client
-chroma-hnswlib==0.7.5
+chroma-hnswlib==0.7.6
     # via chromadb
-chromadb==0.5.4
+chromadb==0.5.5
     # via langchain-chroma
     # via open-webui
 click==8.1.7
@@ -269,7 +271,7 @@ jsonpointer==2.4
     # via jsonpatch
 kubernetes==29.0.0
     # via chromadb
-langchain==0.2.11
+langchain==0.2.12
     # via langchain-community
     # via open-webui
 langchain-chroma==0.1.2
@@ -285,7 +287,7 @@ langchain-text-splitters==0.2.0
     # via langchain
 langdetect==1.0.9
     # via unstructured
-langfuse==2.39.2
+langfuse==2.43.3
     # via open-webui
 langsmith==0.1.96
     # via langchain
@@ -491,7 +493,7 @@ pydub==0.25.1
     # via open-webui
 pygments==2.18.0
     # via rich
-pyjwt==2.8.0
+pyjwt==2.9.0
     # via open-webui
 pymongo==4.8.0
     # via open-webui

+ 1 - 1
src/app.html

@@ -1,4 +1,4 @@
-<!DOCTYPE html>
+<!doctype html>
 <html lang="en">
 	<head>
 		<meta charset="utf-8" />

+ 1 - 0
src/lib/apis/index.ts

@@ -69,6 +69,7 @@ type ChatCompletedForm = {
 	model: string;
 	messages: string[];
 	chat_id: string;
+	session_id: string;
 };
 
 export const chatCompleted = async (token: string, body: ChatCompletedForm) => {

+ 0 - 50
src/lib/apis/ollama/index.ts

@@ -1,5 +1,4 @@
 import { OLLAMA_API_BASE_URL } from '$lib/constants';
-import { titleGenerationTemplate } from '$lib/utils';
 
 export const getOllamaConfig = async (token: string = '') => {
 	let error = null;
@@ -203,55 +202,6 @@ export const getOllamaModels = async (token: string = '') => {
 		});
 };
 
-// TODO: migrate to backend
-export const generateTitle = async (
-	token: string = '',
-	template: string,
-	model: string,
-	prompt: string
-) => {
-	let error = null;
-
-	template = titleGenerationTemplate(template, prompt);
-
-	console.log(template);
-
-	const res = await fetch(`${OLLAMA_API_BASE_URL}/api/generate`, {
-		method: 'POST',
-		headers: {
-			Accept: 'application/json',
-			'Content-Type': 'application/json',
-			Authorization: `Bearer ${token}`
-		},
-		body: JSON.stringify({
-			model: model,
-			prompt: template,
-			stream: false,
-			options: {
-				// Restrict the number of tokens generated to 50
-				num_predict: 50
-			}
-		})
-	})
-		.then(async (res) => {
-			if (!res.ok) throw await res.json();
-			return res.json();
-		})
-		.catch((err) => {
-			console.log(err);
-			if ('detail' in err) {
-				error = err.detail;
-			}
-			return null;
-		});
-
-	if (error) {
-		throw error;
-	}
-
-	return res?.response.replace(/["']/g, '') ?? 'New Chat';
-};
-
 export const generatePrompt = async (token: string = '', model: string, conversation: string) => {
 	let error = null;
 

+ 1 - 126
src/lib/apis/openai/index.ts

@@ -1,6 +1,4 @@
 import { OPENAI_API_BASE_URL } from '$lib/constants';
-import { titleGenerationTemplate } from '$lib/utils';
-import { type Model, models, settings } from '$lib/stores';
 
 export const getOpenAIConfig = async (token: string = '') => {
 	let error = null;
@@ -260,7 +258,7 @@ export const getOpenAIModelsDirect = async (
 		throw error;
 	}
 
-	const models = Array.isArray(res) ? res : res?.data ?? null;
+	const models = Array.isArray(res) ? res : (res?.data ?? null);
 
 	return models
 		.map((model) => ({ id: model.id, name: model.name ?? model.id, external: true }))
@@ -330,126 +328,3 @@ export const synthesizeOpenAISpeech = async (
 
 	return res;
 };
-
-export const generateTitle = async (
-	token: string = '',
-	template: string,
-	model: string,
-	prompt: string,
-	chat_id?: string,
-	url: string = OPENAI_API_BASE_URL
-) => {
-	let error = null;
-
-	template = titleGenerationTemplate(template, prompt);
-
-	console.log(template);
-
-	const res = await fetch(`${url}/chat/completions`, {
-		method: 'POST',
-		headers: {
-			Accept: 'application/json',
-			'Content-Type': 'application/json',
-			Authorization: `Bearer ${token}`
-		},
-		body: JSON.stringify({
-			model: model,
-			messages: [
-				{
-					role: 'user',
-					content: template
-				}
-			],
-			stream: false,
-			// Restricting the max tokens to 50 to avoid long titles
-			max_tokens: 50,
-			...(chat_id && { chat_id: chat_id }),
-			title: true
-		})
-	})
-		.then(async (res) => {
-			if (!res.ok) throw await res.json();
-			return res.json();
-		})
-		.catch((err) => {
-			console.log(err);
-			if ('detail' in err) {
-				error = err.detail;
-			}
-			return null;
-		});
-
-	if (error) {
-		throw error;
-	}
-
-	return res?.choices[0]?.message?.content.replace(/["']/g, '') ?? 'New Chat';
-};
-
-export const generateSearchQuery = async (
-	token: string = '',
-	model: string,
-	previousMessages: string[],
-	prompt: string,
-	url: string = OPENAI_API_BASE_URL
-): Promise<string | undefined> => {
-	let error = null;
-
-	// TODO: Allow users to specify the prompt
-	// Get the current date in the format "January 20, 2024"
-	const currentDate = new Intl.DateTimeFormat('en-US', {
-		year: 'numeric',
-		month: 'long',
-		day: '2-digit'
-	}).format(new Date());
-
-	const res = await fetch(`${url}/chat/completions`, {
-		method: 'POST',
-		headers: {
-			Accept: 'application/json',
-			'Content-Type': 'application/json',
-			Authorization: `Bearer ${token}`
-		},
-		body: JSON.stringify({
-			model: model,
-			// Few shot prompting
-			messages: [
-				{
-					role: 'assistant',
-					content: `You are tasked with generating web search queries. Give me an appropriate query to answer my question for google search. Answer with only the query. Today is ${currentDate}.`
-				},
-				{
-					role: 'user',
-					content: prompt
-				}
-				// {
-				// 	role: 'user',
-				// 	content:
-				// 		(previousMessages.length > 0
-				// 			? `Previous Questions:\n${previousMessages.join('\n')}\n\n`
-				// 			: '') + `Current Question: ${prompt}`
-				// }
-			],
-			stream: false,
-			// Restricting the max tokens to 30 to avoid long search queries
-			max_tokens: 30
-		})
-	})
-		.then(async (res) => {
-			if (!res.ok) throw await res.json();
-			return res.json();
-		})
-		.catch((err) => {
-			console.log(err);
-			if ('detail' in err) {
-				error = err.detail;
-			}
-			return undefined;
-		});
-
-	if (error) {
-		throw error;
-	}
-
-	return res?.choices[0]?.message?.content.replace(/["']/g, '') ?? undefined;
-};

+ 6 - 6
src/lib/components/ChangelogModal.svelte

@@ -75,12 +75,12 @@
 										class="font-semibold uppercase text-xs {section === 'added'
 											? 'text-white bg-blue-600'
 											: section === 'fixed'
-											? 'text-white bg-green-600'
-											: section === 'changed'
-											? 'text-white bg-yellow-600'
-											: section === 'removed'
-											? 'text-white bg-red-600'
-											: ''}  w-fit px-3 rounded-full my-2.5"
+												? 'text-white bg-green-600'
+												: section === 'changed'
+													? 'text-white bg-yellow-600'
+													: section === 'removed'
+														? 'text-white bg-red-600'
+														: ''}  w-fit px-3 rounded-full my-2.5"
 									>
 										{section}
 									</div>

+ 1 - 1
src/lib/components/admin/Settings/Documents.svelte

@@ -112,7 +112,7 @@
 							url: OpenAIUrl,
 							batch_size: OpenAIBatchSize
 						}
-				  }
+					}
 				: {})
 		}).catch(async (error) => {
 			toast.error(error);

+ 24 - 16
src/lib/components/chat/Chat.svelte

@@ -579,8 +579,8 @@
 		let selectedModelIds = modelId
 			? [modelId]
 			: atSelectedModel !== undefined
-			? [atSelectedModel.id]
-			: selectedModels;
+				? [atSelectedModel.id]
+				: selectedModels;
 
 		// Create response messages for each selected model
 		const responseMessageIds = {};
@@ -739,11 +739,11 @@
 								? await getAndUpdateUserLocation(localStorage.token)
 								: undefined
 						)}${
-							responseMessage?.userContext ?? null
+							(responseMessage?.userContext ?? null)
 								? `\n\nUser Context:\n${responseMessage?.userContext ?? ''}`
 								: ''
 						}`
-				  }
+					}
 				: undefined,
 			...messages
 		]
@@ -811,10 +811,10 @@
 			options: {
 				...(params ?? $settings.params ?? {}),
 				stop:
-					params?.stop ?? $settings?.params?.stop ?? undefined
+					(params?.stop ?? $settings?.params?.stop ?? undefined)
 						? (params?.stop.split(',').map((token) => token.trim()) ?? $settings.params.stop).map(
 								(str) => decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
-						  )
+							)
 						: undefined,
 				num_predict: params?.max_tokens ?? $settings?.params?.max_tokens ?? undefined,
 				repeat_penalty:
@@ -877,6 +877,10 @@
 								} else {
 									responseMessage.content += data.message.content;
 
+									if (navigator.vibrate && ($settings?.hapticFeedback ?? false)) {
+										navigator.vibrate(5);
+									}
+
 									const sentences = extractSentencesForAudio(responseMessage.content);
 									sentences.pop();
 
@@ -1056,10 +1060,10 @@
 					stream: true,
 					model: model.id,
 					stream_options:
-						model.info?.meta?.capabilities?.usage ?? false
+						(model.info?.meta?.capabilities?.usage ?? false)
 							? {
 									include_usage: true
-							  }
+								}
 							: undefined,
 					messages: [
 						params?.system || $settings.system || (responseMessage?.userContext ?? null)
@@ -1072,11 +1076,11 @@
 											? await getAndUpdateUserLocation(localStorage.token)
 											: undefined
 									)}${
-										responseMessage?.userContext ?? null
+										(responseMessage?.userContext ?? null)
 											? `\n\nUser Context:\n${responseMessage?.userContext ?? ''}`
 											: ''
 									}`
-							  }
+								}
 							: undefined,
 						...messages
 					]
@@ -1092,7 +1096,7 @@
 												text:
 													arr.length - 1 !== idx
 														? message.content
-														: message?.raContent ?? message.content
+														: (message?.raContent ?? message.content)
 											},
 											...message.files
 												.filter((file) => file.type === 'image')
@@ -1103,20 +1107,20 @@
 													}
 												}))
 										]
-								  }
+									}
 								: {
 										content:
 											arr.length - 1 !== idx
 												? message.content
-												: message?.raContent ?? message.content
-								  })
+												: (message?.raContent ?? message.content)
+									})
 						})),
 					seed: params?.seed ?? $settings?.params?.seed ?? undefined,
 					stop:
-						params?.stop ?? $settings?.params?.stop ?? undefined
+						(params?.stop ?? $settings?.params?.stop ?? undefined)
 							? (params?.stop.split(',').map((token) => token.trim()) ?? $settings.params.stop).map(
 									(str) => decodeURIComponent(JSON.parse('"' + str.replace(/\"/g, '\\"') + '"'))
-							  )
+								)
 							: undefined,
 					temperature: params?.temperature ?? $settings?.params?.temperature ?? undefined,
 					top_p: params?.top_p ?? $settings?.params?.top_p ?? undefined,
@@ -1177,6 +1181,10 @@
 					} else {
 						responseMessage.content += value;
 
+						if (navigator.vibrate && ($settings?.hapticFeedback ?? false)) {
+							navigator.vibrate(5);
+						}
+
 						const sentences = extractSentencesForAudio(responseMessage.content);
 						sentences.pop();
 

+ 3 - 1
src/lib/components/chat/Controls/Controls.svelte

@@ -9,6 +9,8 @@
 	import FileItem from '$lib/components/common/FileItem.svelte';
 	import Collapsible from '$lib/components/common/Collapsible.svelte';
 
+	import { user } from '$lib/stores';
+
 	export let models = [];
 
 	export let chatFiles = [];
@@ -78,7 +80,7 @@
 		<Collapsible title={$i18n.t('Advanced Params')} open={true}>
 			<div class="text-sm mt-1.5" slot="content">
 				<div>
-					<AdvancedParams bind:params />
+					<AdvancedParams admin={$user?.role === 'admin'} bind:params />
 				</div>
 			</div>
 		</Collapsible>

+ 16 - 16
src/lib/components/chat/MessageInput/CallOverlay.svelte

@@ -609,10 +609,10 @@
 								style="font-size:{rmsLevel * 100 > 4
 									? '4.5'
 									: rmsLevel * 100 > 2
-									? '4.25'
-									: rmsLevel * 100 > 1
-									? '3.75'
-									: '3.5'}rem;width: 100%; text-align:center;"
+										? '4.25'
+										: rmsLevel * 100 > 1
+											? '3.75'
+											: '3.5'}rem;width: 100%; text-align:center;"
 							>
 								{emoji}
 							</div>
@@ -658,10 +658,10 @@
 								class=" {rmsLevel * 100 > 4
 									? ' size-[4.5rem]'
 									: rmsLevel * 100 > 2
-									? ' size-16'
-									: rmsLevel * 100 > 1
-									? 'size-14'
-									: 'size-12'}  transition-all rounded-full {(model?.info?.meta
+										? ' size-16'
+										: rmsLevel * 100 > 1
+											? 'size-14'
+											: 'size-12'}  transition-all rounded-full {(model?.info?.meta
 									?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
 									? ' bg-cover bg-center bg-no-repeat'
 									: 'bg-black dark:bg-white'}  bg-black dark:bg-white"
@@ -691,10 +691,10 @@
 									style="font-size:{rmsLevel * 100 > 4
 										? '13'
 										: rmsLevel * 100 > 2
-										? '12'
-										: rmsLevel * 100 > 1
-										? '11.5'
-										: '11'}rem;width:100%;text-align:center;"
+											? '12'
+											: rmsLevel * 100 > 1
+												? '11.5'
+												: '11'}rem;width:100%;text-align:center;"
 								>
 									{emoji}
 								</div>
@@ -740,10 +740,10 @@
 									class=" {rmsLevel * 100 > 4
 										? ' size-52'
 										: rmsLevel * 100 > 2
-										? 'size-48'
-										: rmsLevel * 100 > 1
-										? 'size-[11.5rem]'
-										: 'size-44'}  transition-all rounded-full {(model?.info?.meta
+											? 'size-48'
+											: rmsLevel * 100 > 1
+												? 'size-[11.5rem]'
+												: 'size-44'}  transition-all rounded-full {(model?.info?.meta
 										?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
 										? ' bg-cover bg-center bg-no-repeat'
 										: 'bg-black dark:bg-white'} "

+ 1 - 1
src/lib/components/chat/MessageInput/Documents.svelte

@@ -27,7 +27,7 @@
 						title: $i18n.t('All Documents'),
 						collection_names: $documents.map((doc) => doc.collection_name)
 					}
-			  ]
+				]
 			: []),
 		...$documents
 			.reduce((a, e, i, arr) => {

+ 4 - 4
src/lib/components/chat/Messages.svelte

@@ -305,7 +305,7 @@
 				{#each messages as message, messageIdx}
 					<div class=" w-full {messageIdx === messages.length - 1 ? ' pb-12' : ''}">
 						<div
-							class="flex flex-col justify-between px-5 mb-3 {$settings?.widescreenMode ?? null
+							class="flex flex-col justify-between px-5 mb-3 {($settings?.widescreenMode ?? null)
 								? 'max-w-full'
 								: 'max-w-5xl'} mx-auto rounded-lg group"
 						>
@@ -317,10 +317,10 @@
 									{message}
 									isFirstMessage={messageIdx === 0}
 									siblings={message.parentId !== null
-										? history.messages[message.parentId]?.childrenIds ?? []
-										: Object.values(history.messages)
+										? (history.messages[message.parentId]?.childrenIds ?? [])
+										: (Object.values(history.messages)
 												.filter((message) => message.parentId === null)
-												.map((message) => message.id) ?? []}
+												.map((message) => message.id) ?? [])}
 									{confirmEditMessage}
 									{showPreviousMessage}
 									{showNextMessage}

+ 2 - 2
src/lib/components/chat/Messages/CitationsModal.svelte

@@ -60,8 +60,8 @@
 									href={document?.metadata?.file_id
 										? `/api/v1/files/${document?.metadata?.file_id}/content`
 										: document.source.name.includes('http')
-										? document.source.name
-										: `#`}
+											? document.source.name
+											: `#`}
 									target="_blank"
 								>
 									{document?.metadata?.name ?? document.source.name}

+ 101 - 61
src/lib/components/chat/Messages/CodeBlock.svelte

@@ -1,17 +1,25 @@
 <script lang="ts">
-	import Spinner from '$lib/components/common/Spinner.svelte';
-	import { copyToClipboard } from '$lib/utils';
 	import hljs from 'highlight.js';
-	import 'highlight.js/styles/github-dark.min.css';
 	import { loadPyodide } from 'pyodide';
-	import { onMount, tick } from 'svelte';
+	import mermaid from 'mermaid';
+
+	import { getContext, getAllContexts, onMount } from 'svelte';
+	import { copyToClipboard } from '$lib/utils';
+
+	import 'highlight.js/styles/github-dark.min.css';
+
 	import PyodideWorker from '$lib/workers/pyodide.worker?worker';
 
+	const i18n = getContext('i18n');
+
 	export let id = '';
 
+	export let token;
 	export let lang = '';
 	export let code = '';
 
+	let mermaidHtml = null;
+
 	let highlightedCode = null;
 	let executing = false;
 
@@ -204,70 +212,102 @@ __builtins__.input = input`);
 	};
 
 	let debounceTimeout;
+
 	$: if (code) {
-		// Function to perform the code highlighting
-		const highlightCode = () => {
-			highlightedCode = hljs.highlightAuto(code, hljs.getLanguage(lang)?.aliases).value || code;
-		};
+		if (lang === 'mermaid' && (token?.raw ?? '').endsWith('```')) {
+			(async () => {
+				try {
+					const { svg } = await mermaid.render(`mermaid-${id}`, code);
+					mermaidHtml = svg;
+				} catch (error) {
+					console.error('Error:', error);
+				}
+			})();
+		} else {
+			// Function to perform the code highlighting
+			const highlightCode = () => {
+				highlightedCode = hljs.highlightAuto(code, hljs.getLanguage(lang)?.aliases).value || code;
+			};
+
+			// Clear the previous timeout if it exists
+			clearTimeout(debounceTimeout);
+			// Set a new timeout to debounce the code highlighting
+			debounceTimeout = setTimeout(highlightCode, 10);
+		}
+	}
 
-		// Clear the previous timeout if it exists
-		clearTimeout(debounceTimeout);
+	onMount(async () => {
+		await mermaid.initialize({ startOnLoad: true });
 
-		// Set a new timeout to debounce the code highlighting
-		debounceTimeout = setTimeout(highlightCode, 10);
-	}
+		if (lang === 'mermaid' && (token?.raw ?? '').endsWith('```')) {
+			try {
+				const { svg } = await mermaid.render(`mermaid-${id}`, code);
+				mermaidHtml = svg;
+			} catch (error) {
+				console.error('Error:', error);
+			}
+		}
+	});
 </script>
 
 <div class="my-2" dir="ltr">
-	<div
-		class="flex justify-between bg-[#202123] text-white text-xs px-4 pt-1 pb-0.5 rounded-t-lg overflow-x-auto"
-	>
-		<div class="p-1">{@html lang}</div>
-
-		<div class="flex items-center">
-			{#if lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code))}
-				{#if executing}
-					<div class="copy-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
-				{:else}
-					<button
-						class="copy-code-button bg-none border-none p-1"
-						on:click={() => {
-							executePython(code);
-						}}>Run</button
-					>
+	{#if lang === 'mermaid'}
+		{#if mermaidHtml}
+			{@html mermaidHtml}
+		{:else}
+			<pre class=" mermaid-{id}">{code}</pre>
+		{/if}
+	{:else}
+		<div
+			class="flex justify-between bg-[#202123] text-white text-xs px-4 pt-1 pb-0.5 rounded-t-lg overflow-x-auto"
+		>
+			<div class="p-1">{@html lang}</div>
+
+			<div class="flex items-center">
+				{#if lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code))}
+					{#if executing}
+						<div class="copy-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
+					{:else}
+						<button
+							class="copy-code-button bg-none border-none p-1"
+							on:click={() => {
+								executePython(code);
+							}}>{$i18n.t('Run')}</button
+						>
+					{/if}
 				{/if}
-			{/if}
-			<button class="copy-code-button bg-none border-none p-1" on:click={copyCode}
-				>{copied ? 'Copied' : 'Copy Code'}</button
-			>
-		</div>
-	</div>
-
-	<pre
-		class=" hljs p-4 px-5 overflow-x-auto"
-		style="border-top-left-radius: 0px; border-top-right-radius: 0px; {(executing ||
-			stdout ||
-			stderr ||
-			result) &&
-			'border-bottom-left-radius: 0px; border-bottom-right-radius: 0px;'}"><code
-			class="language-{lang} rounded-t-none whitespace-pre"
-			>{#if highlightedCode}{@html highlightedCode}{:else}{code}{/if}</code
-		></pre>
-
-	<div
-		id="plt-canvas-{id}"
-		class="bg-[#202123] text-white max-w-full overflow-x-auto scrollbar-hidden"
-	/>
-
-	{#if executing}
-		<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
-			<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
-			<div class="text-sm">Running...</div>
-		</div>
-	{:else if stdout || stderr || result}
-		<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
-			<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
-			<div class="text-sm">{stdout || stderr || result}</div>
+				<button class="copy-code-button bg-none border-none p-1" on:click={copyCode}
+					>{copied ? $i18n.t('Copied') : $i18n.t('Copy Code')}</button
+				>
+			</div>
 		</div>
+
+		<pre
+			class=" hljs p-4 px-5 overflow-x-auto"
+			style="border-top-left-radius: 0px; border-top-right-radius: 0px; {(executing ||
+				stdout ||
+				stderr ||
+				result) &&
+				'border-bottom-left-radius: 0px; border-bottom-right-radius: 0px;'}"><code
+				class="language-{lang} rounded-t-none whitespace-pre"
+				>{#if highlightedCode}{@html highlightedCode}{:else}{code}{/if}</code
+			></pre>
+
+		<div
+			id="plt-canvas-{id}"
+			class="bg-[#202123] text-white max-w-full overflow-x-auto scrollbar-hidden"
+		/>
+
+		{#if executing}
+			<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
+				<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
+				<div class="text-sm">Running...</div>
+			</div>
+		{:else if stdout || stderr || result}
+			<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
+				<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
+				<div class="text-sm">{stdout || stderr || result}</div>
+			</div>
+		{/if}
 	{/if}
 </div>

+ 9 - 0
src/lib/components/chat/Messages/KatexRenderer.svelte

@@ -0,0 +1,9 @@
+<script lang="ts">
+	import katex from 'katex';
+	import 'katex/contrib/mhchem';
+
+	export let content: string;
+	export let displayMode: boolean = false;
+</script>
+
+{@html katex.renderToString(content, { displayMode, throwOnError: false })}

+ 10 - 3
src/lib/components/chat/Messages/MarkdownInlineTokens.svelte

@@ -1,8 +1,11 @@
 <script lang="ts">
 	import type { Token } from 'marked';
-	import { unescapeHtml } from '$lib/utils';
+	import { revertSanitizedResponseContent, unescapeHtml } from '$lib/utils';
+	import { onMount } from 'svelte';
 	import Image from '$lib/components/common/Image.svelte';
 
+	import KatexRenderer from './KatexRenderer.svelte';
+
 	export let id: string;
 	export let tokens: Token[];
 </script>
@@ -25,14 +28,18 @@
 			<svelte:self id={`${id}-em`} tokens={token.tokens} />
 		</em>
 	{:else if token.type === 'codespan'}
-		<code class="codespan">{unescapeHtml(token.text.replaceAll('&amp;', '&'))}</code>
+		<code class="codespan">{revertSanitizedResponseContent(token.raw)}</code>
 	{:else if token.type === 'br'}
 		<br />
 	{:else if token.type === 'del'}
 		<del>
 			<svelte:self id={`${id}-del`} tokens={token.tokens} />
 		</del>
+	{:else if token.type === 'inlineKatex'}
+		{#if token.text}
+			<KatexRenderer content={revertSanitizedResponseContent(token.text)} displayMode={false} />
+		{/if}
 	{:else if token.type === 'text'}
-		{unescapeHtml(token.text)}
+		{token.raw}
 	{/if}
 {/each}

+ 114 - 119
src/lib/components/chat/Messages/MarkdownTokens.svelte

@@ -1,137 +1,132 @@
 <script lang="ts">
-	import { marked } from 'marked';
+	import { onMount } from 'svelte';
 	import type { Token } from 'marked';
 	import { revertSanitizedResponseContent, unescapeHtml } from '$lib/utils';
 
-	import { onMount } from 'svelte';
-
-	import Image from '$lib/components/common/Image.svelte';
 	import CodeBlock from '$lib/components/chat/Messages/CodeBlock.svelte';
-
 	import MarkdownInlineTokens from '$lib/components/chat/Messages/MarkdownInlineTokens.svelte';
+	import KatexRenderer from './KatexRenderer.svelte';
 
 	export let id: string;
 	export let tokens: Token[];
 	export let top = true;
 
-	let containerElement;
-
 	const headerComponent = (depth: number) => {
 		return 'h' + depth;
 	};
-
-	const renderer = new marked.Renderer();
-	// For code blocks with simple backticks
-	renderer.codespan = (code) => {
-		return `<code class="codespan">${code.replaceAll('&amp;', '&')}</code>`;
-	};
-
-	let codes = [];
-	renderer.code = (code, lang) => {
-		codes.push({
-			code: code,
-			lang: lang
-		});
-		codes = codes;
-		const codeId = `${id}-${codes.length}`;
-
-		const interval = setInterval(() => {
-			const codeElement = document.getElementById(`code-${codeId}`);
-			if (codeElement) {
-				clearInterval(interval);
-				// If the code is already loaded, don't load it again
-				if (codeElement.innerHTML) {
-					return;
-				}
-
-				new CodeBlock({
-					target: codeElement,
-					props: {
-						id: `${id}-${codes.length}`,
-						lang: lang,
-						code: revertSanitizedResponseContent(code)
-					},
-					hydrate: true,
-					$$inline: true
-				});
-			}
-		}, 10);
-
-		return `<div id="code-${id}-${codes.length}"></div>`;
-	};
-
-	let images = [];
-	renderer.image = (href, title, text) => {
-		images.push({
-			href: href,
-			title: title,
-			text: text
-		});
-		images = images;
-
-		const imageId = `${id}-${images.length}`;
-		const interval = setInterval(() => {
-			const imageElement = document.getElementById(`image-${imageId}`);
-			if (imageElement) {
-				clearInterval(interval);
-
-				// If the image is already loaded, don't load it again
-				if (imageElement.innerHTML) {
-					return;
-				}
-
-				console.log('image', href, text);
-				new Image({
-					target: imageElement,
-					props: {
-						src: href,
-						alt: text
-					},
-					$$inline: true
-				});
-			}
-		}, 10);
-
-		return `<div id="image-${id}-${images.length}"></div>`;
-	};
-
-	// Open all links in a new tab/window (from https://github.com/markedjs/marked/issues/655#issuecomment-383226346)
-	const origLinkRenderer = renderer.link;
-	renderer.link = (href, title, text) => {
-		const html = origLinkRenderer.call(renderer, href, title, text);
-		return html.replace(/^<a /, '<a target="_blank" rel="nofollow" ');
-	};
-
-	const { extensions, ...defaults } = marked.getDefaults() as marked.MarkedOptions & {
-		// eslint-disable-next-line @typescript-eslint/no-explicit-any
-		extensions: any;
-	};
-
-	$: if (tokens) {
-		images = [];
-		codes = [];
-	}
 </script>
 
-<div bind:this={containerElement} class="flex flex-col">
-	{#each tokens as token, tokenIdx (`${id}-${tokenIdx}`)}
-		{#if token.type === 'code'}
-			{#if token.lang === 'mermaid'}
-				<pre class="mermaid">{revertSanitizedResponseContent(token.text)}</pre>
-			{:else}
-				<CodeBlock
-					id={`${id}-${tokenIdx}`}
-					lang={token?.lang ?? ''}
-					code={revertSanitizedResponseContent(token?.text ?? '')}
-				/>
-			{/if}
+<!-- {JSON.stringify(tokens)} -->
+{#each tokens as token, tokenIdx}
+	{#if token.type === 'hr'}
+		<hr />
+	{:else if token.type === 'heading'}
+		<svelte:element this={headerComponent(token.depth)}>
+			<MarkdownInlineTokens id={`${id}-${tokenIdx}-h`} tokens={token.tokens} />
+		</svelte:element>
+	{:else if token.type === 'code'}
+		<CodeBlock
+			id={`${id}-${tokenIdx}`}
+			{token}
+			lang={token?.lang ?? ''}
+			code={revertSanitizedResponseContent(token?.text ?? '')}
+		/>
+	{:else if token.type === 'table'}
+		<table>
+			<thead>
+				<tr>
+					{#each token.header as header, headerIdx}
+						<th style={token.align[headerIdx] ? '' : `text-align: ${token.align[headerIdx]}`}>
+							<MarkdownInlineTokens
+								id={`${id}-${tokenIdx}-header-${headerIdx}`}
+								tokens={header.tokens}
+							/>
+						</th>
+					{/each}
+				</tr>
+			</thead>
+			<tbody>
+				{#each token.rows as row, rowIdx}
+					<tr>
+						{#each row ?? [] as cell, cellIdx}
+							<td style={token.align[cellIdx] ? '' : `text-align: ${token.align[cellIdx]}`}>
+								<MarkdownInlineTokens
+									id={`${id}-${tokenIdx}-row-${rowIdx}-${cellIdx}`}
+									tokens={cell.tokens}
+								/>
+							</td>
+						{/each}
+					</tr>
+				{/each}
+			</tbody>
+		</table>
+	{:else if token.type === 'blockquote'}
+		<blockquote>
+			<svelte:self id={`${id}-${tokenIdx}`} tokens={token.tokens} />
+		</blockquote>
+	{:else if token.type === 'list'}
+		{#if token.ordered}
+			<ol start={token.start || 1}>
+				{#each token.items as item, itemIdx}
+					<li>
+						<svelte:self
+							id={`${id}-${tokenIdx}-${itemIdx}`}
+							tokens={item.tokens}
+							top={token.loose}
+						/>
+					</li>
+				{/each}
+			</ol>
+		{:else}
+			<ul>
+				{#each token.items as item, itemIdx}
+					<li>
+						<svelte:self
+							id={`${id}-${tokenIdx}-${itemIdx}`}
+							tokens={item.tokens}
+							top={token.loose}
+						/>
+					</li>
+				{/each}
+			</ul>
+		{/if}
+	{:else if token.type === 'html'}
+		{@html token.text}
+	{:else if token.type === 'paragraph'}
+		<p>
+			<MarkdownInlineTokens id={`${id}-${tokenIdx}-p`} tokens={token.tokens ?? []} />
+		</p>
+	{:else if token.type === 'text'}
+		{#if top}
+			<p>
+				{#if token.tokens}
+					<MarkdownInlineTokens id={`${id}-${tokenIdx}-t`} tokens={token.tokens} />
+				{:else}
+					{unescapeHtml(token.text)}
+				{/if}
+			</p>
+		{:else if token.tokens}
+			<MarkdownInlineTokens id={`${id}-${tokenIdx}-p`} tokens={token.tokens ?? []} />
 		{:else}
-			{@html marked.parse(token.raw, {
-				...defaults,
-				gfm: true,
-				breaks: true,
-				renderer
-			})}
+			{unescapeHtml(token.text)}
+		{/if}
+	{:else if token.type === 'inlineKatex'}
+		{#if token.text}
+			<KatexRenderer
+				content={revertSanitizedResponseContent(token.text)}
+				displayMode={token?.displayMode ?? false}
+			/>
+		{/if}
+	{:else if token.type === 'blockKatex'}
+		{#if token.text}
+			<KatexRenderer
+				content={revertSanitizedResponseContent(token.text)}
+				displayMode={token?.displayMode ?? false}
+			/>
 		{/if}
-	{/each}
-</div>
+	{:else if token.type === 'space'}
+		{''}
+	{:else}
+		{console.log('Unknown token', token)}
+	{/if}
+{/each}

+ 76 - 132
src/lib/components/chat/Messages/ResponseMessage.svelte

@@ -2,10 +2,6 @@
 	import { toast } from 'svelte-sonner';
 	import dayjs from 'dayjs';
 	import { marked } from 'marked';
-	import tippy from 'tippy.js';
-	import auto_render from 'katex/dist/contrib/auto-render.mjs';
-	import 'katex/dist/katex.min.css';
-	import mermaid from 'mermaid';
 
 	import { fade } from 'svelte/transition';
 	import { createEventDispatcher } from 'svelte';
@@ -79,104 +75,24 @@
 
 	let tokens;
 
+	import 'katex/dist/katex.min.css';
+
+	import markedKatex from '$lib/utils/katex-extension';
+
+	const options = {
+		throwOnError: false
+	};
+
+	marked.use(markedKatex(options));
+
 	$: (async () => {
 		if (message?.content) {
 			tokens = marked.lexer(
 				replaceTokens(sanitizeResponseContent(message?.content), model?.name, $user?.name)
 			);
-			// console.log(message?.content, tokens);
 		}
 	})();
 
-	$: if (message) {
-		renderStyling();
-	}
-
-	const renderStyling = async () => {
-		await tick();
-
-		if (tooltipInstance) {
-			tooltipInstance[0]?.destroy();
-		}
-
-		renderLatex();
-
-		if (message.info) {
-			let tooltipContent = '';
-			if (message.info.openai) {
-				tooltipContent = `prompt_tokens: ${message.info.prompt_tokens ?? 'N/A'}<br/>
-													completion_tokens: ${message.info.completion_tokens ?? 'N/A'}<br/>
-													total_tokens: ${message.info.total_tokens ?? 'N/A'}`;
-			} else {
-				tooltipContent = `response_token/s: ${
-					`${
-						Math.round(
-							((message.info.eval_count ?? 0) / (message.info.eval_duration / 1000000000)) * 100
-						) / 100
-					} tokens` ?? 'N/A'
-				}<br/>
-					prompt_token/s: ${
-						Math.round(
-							((message.info.prompt_eval_count ?? 0) /
-								(message.info.prompt_eval_duration / 1000000000)) *
-								100
-						) / 100 ?? 'N/A'
-					} tokens<br/>
-                    total_duration: ${
-											Math.round(((message.info.total_duration ?? 0) / 1000000) * 100) / 100 ??
-											'N/A'
-										}ms<br/>
-                    load_duration: ${
-											Math.round(((message.info.load_duration ?? 0) / 1000000) * 100) / 100 ?? 'N/A'
-										}ms<br/>
-                    prompt_eval_count: ${message.info.prompt_eval_count ?? 'N/A'}<br/>
-                    prompt_eval_duration: ${
-											Math.round(((message.info.prompt_eval_duration ?? 0) / 1000000) * 100) /
-												100 ?? 'N/A'
-										}ms<br/>
-                    eval_count: ${message.info.eval_count ?? 'N/A'}<br/>
-                    eval_duration: ${
-											Math.round(((message.info.eval_duration ?? 0) / 1000000) * 100) / 100 ?? 'N/A'
-										}ms<br/>
-                    approximate_total: ${approximateToHumanReadable(message.info.total_duration)}`;
-			}
-			tooltipInstance = tippy(`#info-${message.id}`, {
-				content: `<span class="text-xs" id="tooltip-${message.id}">${tooltipContent}</span>`,
-				allowHTML: true,
-				theme: 'dark',
-				arrow: false,
-				offset: [0, 4]
-			});
-		}
-	};
-
-	const renderLatex = () => {
-		let chatMessageElements = document
-			.getElementById(`message-${message.id}`)
-			?.getElementsByClassName('chat-assistant');
-
-		if (chatMessageElements) {
-			for (const element of chatMessageElements) {
-				auto_render(element, {
-					// customised options
-					// • auto-render specific keys, e.g.:
-					delimiters: [
-						{ left: '$$', right: '$$', display: false },
-						{ left: '$ ', right: ' $', display: false },
-						{ left: '\\pu{', right: '}', display: false },
-						{ left: '\\ce{', right: '}', display: false },
-						{ left: '\\(', right: '\\)', display: false },
-						{ left: '( ', right: ' )', display: false },
-						{ left: '\\[', right: '\\]', display: false },
-						{ left: '[ ', right: ' ]', display: false }
-					],
-					// • rendering keys, e.g.:
-					throwOnError: false
-				});
-			}
-		}
-	};
-
 	const playAudio = (idx) => {
 		return new Promise((res) => {
 			speakingIdx = idx;
@@ -242,7 +158,7 @@
 							const res = await synthesizeOpenAISpeech(
 								localStorage.token,
 								$settings?.audio?.tts?.defaultVoice === $config.audio.tts.voice
-									? $settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice
+									? ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice)
 									: $config?.audio?.tts?.voice,
 								sentence
 							).catch((error) => {
@@ -330,14 +246,12 @@
 		editedContent = '';
 
 		await tick();
-		renderStyling();
 	};
 
 	const cancelEditMessage = async () => {
 		edit = false;
 		editedContent = '';
 		await tick();
-		renderStyling();
 	};
 
 	const generateImage = async (message) => {
@@ -362,21 +276,11 @@
 	$: if (!edit) {
 		(async () => {
 			await tick();
-			renderStyling();
-
-			await mermaid.run({
-				querySelector: '.mermaid'
-			});
 		})();
 	}
 
 	onMount(async () => {
 		await tick();
-		renderStyling();
-
-		await mermaid.run({
-			querySelector: '.mermaid'
-		});
 	});
 </script>
 
@@ -420,7 +324,7 @@
 				{/if}
 
 				<div
-					class="prose chat-{message.role} w-full max-w-full dark:prose-invert prose-p:my-0 prose-img:my-1 prose-headings:my-1 prose-pre:my-0 prose-table:my-0 prose-blockquote:my-0 prose-ul:-my-2 prose-ol:-my-2 prose-li:-my-3 whitespace-pre-line"
+					class="prose chat-{message.role} w-full max-w-full dark:prose-invert prose-p:my-0 prose-img:my-1 prose-headings:my-1 prose-pre:my-0 prose-table:my-0 prose-blockquote:my-0 prose-ul:-my-0 prose-ol:-my-0 prose-li:-my-0 whitespace-pre-line"
 				>
 					<div>
 						{#if (message?.statusHistory ?? [...(message?.status ? [message?.status] : [])]).length > 0}
@@ -841,31 +745,71 @@
 								{/if}
 
 								{#if message.info}
-									<Tooltip content={$i18n.t('Generation Info')} placement="bottom">
-										<button
-											class=" {isLastMessage
-												? 'visible'
-												: 'invisible group-hover:visible'} p-1.5 hover:bg-black/5 dark:hover:bg-white/5 rounded-lg dark:hover:text-white hover:text-black transition whitespace-pre-wrap"
-											on:click={() => {
-												console.log(message);
-											}}
-											id="info-{message.id}"
-										>
-											<svg
-												xmlns="http://www.w3.org/2000/svg"
-												fill="none"
-												viewBox="0 0 24 24"
-												stroke-width="2.3"
-												stroke="currentColor"
-												class="w-4 h-4"
+									<Tooltip
+										content={message.info.openai
+											? `prompt_tokens: ${message.info.prompt_tokens ?? 'N/A'}<br/>
+													completion_tokens: ${message.info.completion_tokens ?? 'N/A'}<br/>
+													total_tokens: ${message.info.total_tokens ?? 'N/A'}`
+											: `response_token/s: ${
+													`${
+														Math.round(
+															((message.info.eval_count ?? 0) /
+																(message.info.eval_duration / 1000000000)) *
+																100
+														) / 100
+													} tokens` ?? 'N/A'
+												}<br/>
+					prompt_token/s: ${
+						Math.round(
+							((message.info.prompt_eval_count ?? 0) /
+								(message.info.prompt_eval_duration / 1000000000)) *
+								100
+						) / 100 ?? 'N/A'
+					} tokens<br/>
+		            total_duration: ${
+									Math.round(((message.info.total_duration ?? 0) / 1000000) * 100) / 100 ?? 'N/A'
+								}ms<br/>
+		            load_duration: ${
+									Math.round(((message.info.load_duration ?? 0) / 1000000) * 100) / 100 ?? 'N/A'
+								}ms<br/>
+		            prompt_eval_count: ${message.info.prompt_eval_count ?? 'N/A'}<br/>
+		            prompt_eval_duration: ${
+									Math.round(((message.info.prompt_eval_duration ?? 0) / 1000000) * 100) / 100 ??
+									'N/A'
+								}ms<br/>
+		            eval_count: ${message.info.eval_count ?? 'N/A'}<br/>
+		            eval_duration: ${
+									Math.round(((message.info.eval_duration ?? 0) / 1000000) * 100) / 100 ?? 'N/A'
+								}ms<br/>
+		            approximate_total: ${approximateToHumanReadable(message.info.total_duration)}`}
+										placement="top"
+									>
+										<Tooltip content={$i18n.t('Generation Info')} placement="bottom">
+											<button
+												class=" {isLastMessage
+													? 'visible'
+													: 'invisible group-hover:visible'} p-1.5 hover:bg-black/5 dark:hover:bg-white/5 rounded-lg dark:hover:text-white hover:text-black transition whitespace-pre-wrap"
+												on:click={() => {
+													console.log(message);
+												}}
+												id="info-{message.id}"
 											>
-												<path
-													stroke-linecap="round"
-													stroke-linejoin="round"
-													d="M11.25 11.25l.041-.02a.75.75 0 011.063.852l-.708 2.836a.75.75 0 001.063.853l.041-.021M21 12a9 9 0 11-18 0 9 9 0 0118 0zm-9-3.75h.008v.008H12V8.25z"
-												/>
-											</svg>
-										</button>
+												<svg
+													xmlns="http://www.w3.org/2000/svg"
+													fill="none"
+													viewBox="0 0 24 24"
+													stroke-width="2.3"
+													stroke="currentColor"
+													class="w-4 h-4"
+												>
+													<path
+														stroke-linecap="round"
+														stroke-linejoin="round"
+														d="M11.25 11.25l.041-.02a.75.75 0 011.063.852l-.708 2.836a.75.75 0 001.063.853l.041-.021M21 12a9 9 0 11-18 0 9 9 0 0118 0zm-9-3.75h.008v.008H12V8.25z"
+													/>
+												</svg>
+											</button>
+										</Tooltip>
 									</Tooltip>
 								{/if}
 

+ 7 - 7
src/lib/components/chat/Messages/UserMessage.svelte

@@ -62,8 +62,8 @@
 	{#if !($settings?.chatBubble ?? true)}
 		<ProfileImage
 			src={message.user
-				? $models.find((m) => m.id === message.user)?.info?.meta?.profile_image_url ?? '/user.png'
-				: user?.profile_image_url ?? '/user.png'}
+				? ($models.find((m) => m.id === message.user)?.info?.meta?.profile_image_url ?? '/user.png')
+				: (user?.profile_image_url ?? '/user.png')}
 		/>
 	{/if}
 	<div class="w-full overflow-hidden pl-1">
@@ -96,7 +96,7 @@
 			{#if message.files}
 				<div class="mt-2.5 mb-1 w-full flex flex-col justify-end overflow-x-auto gap-1 flex-wrap">
 					{#each message.files as file}
-						<div class={$settings?.chatBubble ?? true ? 'self-end' : ''}>
+						<div class={($settings?.chatBubble ?? true) ? 'self-end' : ''}>
 							{#if file.type === 'image'}
 								<img src={file.url} alt="input" class=" max-h-96 rounded-lg" draggable="false" />
 							{:else}
@@ -162,12 +162,12 @@
 				</div>
 			{:else}
 				<div class="w-full">
-					<div class="flex {$settings?.chatBubble ?? true ? 'justify-end' : ''} mb-2">
+					<div class="flex {($settings?.chatBubble ?? true) ? 'justify-end' : ''} mb-2">
 						<div
-							class="rounded-3xl {$settings?.chatBubble ?? true
+							class="rounded-3xl {($settings?.chatBubble ?? true)
 								? `max-w-[90%] px-5 py-2  bg-gray-50 dark:bg-gray-850 ${
 										message.files ? 'rounded-tr-lg' : ''
-								  }`
+									}`
 								: ''}  "
 						>
 							<pre id="user-message">{message.content}</pre>
@@ -175,7 +175,7 @@
 					</div>
 
 					<div
-						class=" flex {$settings?.chatBubble ?? true
+						class=" flex {($settings?.chatBubble ?? true)
 							? 'justify-end'
 							: ''}  text-gray-600 dark:text-gray-500"
 					>

+ 1 - 1
src/lib/components/chat/ModelSelector/Selector.svelte

@@ -66,7 +66,7 @@
 	$: filteredItems = searchValue
 		? fuse.search(searchValue).map((e) => {
 				return e.item;
-		  })
+			})
 		: items.filter((item) => !item.model?.info?.meta?.hidden);
 
 	const pullModelHandler = async () => {

+ 2 - 2
src/lib/components/chat/Settings/About.svelte

@@ -65,8 +65,8 @@
 							{updateAvailable === null
 								? $i18n.t('Checking for updates...')
 								: updateAvailable
-								? `(v${version.latest} ${$i18n.t('available!')})`
-								: $i18n.t('(latest)')}
+									? `(v${version.latest} ${$i18n.t('available!')})`
+									: $i18n.t('(latest)')}
 						</a>
 					</div>
 

+ 47 - 0
src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte

@@ -29,6 +29,7 @@
 		use_mmap: null,
 		use_mlock: null,
 		num_thread: null,
+		num_gpu: null,
 		template: null
 	};
 
@@ -864,6 +865,52 @@
 			{/if}
 		</div>
 
+		<div class=" py-0.5 w-full justify-between">
+			<div class="flex w-full justify-between">
+				<div class=" self-center text-xs font-medium">{$i18n.t('num_gpu (Ollama)')}</div>
+
+				<button
+					class="p-1 px-3 text-xs flex rounded transition flex-shrink-0 outline-none"
+					type="button"
+					on:click={() => {
+						params.num_gpu = (params?.num_gpu ?? null) === null ? 0 : null;
+					}}
+				>
+					{#if (params?.num_gpu ?? null) === null}
+						<span class="ml-2 self-center">{$i18n.t('Default')}</span>
+					{:else}
+						<span class="ml-2 self-center">{$i18n.t('Custom')}</span>
+					{/if}
+				</button>
+			</div>
+
+			{#if (params?.num_gpu ?? null) !== null}
+				<div class="flex mt-0.5 space-x-2">
+					<div class=" flex-1">
+						<input
+							id="steps-range"
+							type="range"
+							min="0"
+							max="256"
+							step="1"
+							bind:value={params.num_gpu}
+							class="w-full h-2 rounded-lg appearance-none cursor-pointer dark:bg-gray-700"
+						/>
+					</div>
+					<div class="">
+						<input
+							bind:value={params.num_gpu}
+							type="number"
+							class=" bg-transparent text-center w-14"
+							min="0"
+							max="256"
+							step="1"
+						/>
+					</div>
+				</div>
+			{/if}
+		</div>
+
 		<!-- <div class=" py-0.5 w-full justify-between">
 			<div class="flex w-full justify-between">
 				<div class=" self-center text-xs font-medium">{$i18n.t('Template')}</div>

+ 28 - 0
src/lib/components/chat/Settings/Interface.svelte

@@ -34,6 +34,7 @@
 
 	let showEmojiInCall = false;
 	let voiceInterruption = false;
+	let hapticFeedback = false;
 
 	const toggleSplitLargeChunks = async () => {
 		splitLargeChunks = !splitLargeChunks;
@@ -70,6 +71,11 @@
 		saveSettings({ voiceInterruption: voiceInterruption });
 	};
 
+	const toggleHapticFeedback = async () => {
+		hapticFeedback = !hapticFeedback;
+		saveSettings({ hapticFeedback: hapticFeedback });
+	};
+
 	const toggleUserLocation = async () => {
 		userLocation = !userLocation;
 
@@ -151,6 +157,8 @@
 		chatDirection = $settings.chatDirection ?? 'LTR';
 		userLocation = $settings.userLocation ?? false;
 
+		hapticFeedback = $settings.hapticFeedback ?? false;
+
 		defaultModelId = $settings?.models?.at(0) ?? '';
 		if ($config?.default_models) {
 			defaultModelId = $config.default_models.split(',')[0];
@@ -438,6 +446,26 @@
 				</div>
 			</div>
 
+			<div>
+				<div class=" py-0.5 flex w-full justify-between">
+					<div class=" self-center text-xs">{$i18n.t('Haptic Feedback')}</div>
+
+					<button
+						class="p-1 px-3 text-xs flex rounded transition"
+						on:click={() => {
+							toggleHapticFeedback();
+						}}
+						type="button"
+					>
+						{#if hapticFeedback === true}
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
+						{:else}
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
+						{/if}
+					</button>
+				</div>
+			</div>
+
 			<div class=" my-1.5 text-sm font-medium">{$i18n.t('Voice')}</div>
 
 			<div>

+ 1 - 1
src/lib/components/common/Valves.svelte

@@ -27,7 +27,7 @@
 					on:click={() => {
 						valves[property] =
 							(valves[property] ?? null) === null
-								? valvesSpec.properties[property]?.default ?? ''
+								? (valvesSpec.properties[property]?.default ?? '')
 								: null;
 
 						dispatch('change');

+ 6 - 6
src/lib/components/layout/Sidebar/ChatItem.svelte

@@ -83,8 +83,8 @@
 			class=" w-full flex justify-between rounded-xl px-3 py-2 {chat.id === $chatId || confirmEdit
 				? 'bg-gray-200 dark:bg-gray-900'
 				: selected
-				? 'bg-gray-100 dark:bg-gray-950'
-				: 'group-hover:bg-gray-100 dark:group-hover:bg-gray-950'}  whitespace-nowrap text-ellipsis"
+					? 'bg-gray-100 dark:bg-gray-950'
+					: 'group-hover:bg-gray-100 dark:group-hover:bg-gray-950'}  whitespace-nowrap text-ellipsis"
 		>
 			<input
 				use:focusEdit
@@ -97,8 +97,8 @@
 			class=" w-full flex justify-between rounded-xl px-3 py-2 {chat.id === $chatId || confirmEdit
 				? 'bg-gray-200 dark:bg-gray-900'
 				: selected
-				? 'bg-gray-100 dark:bg-gray-950'
-				: ' group-hover:bg-gray-100 dark:group-hover:bg-gray-950'}  whitespace-nowrap text-ellipsis"
+					? 'bg-gray-100 dark:bg-gray-950'
+					: ' group-hover:bg-gray-100 dark:group-hover:bg-gray-950'}  whitespace-nowrap text-ellipsis"
 			href="/c/{chat.id}"
 			on:click={() => {
 				dispatch('select');
@@ -134,8 +134,8 @@
         {chat.id === $chatId || confirmEdit
 			? 'from-gray-200 dark:from-gray-900'
 			: selected
-			? 'from-gray-100 dark:from-gray-950'
-			: 'invisible group-hover:visible from-gray-100 dark:from-gray-950'}
+				? 'from-gray-100 dark:from-gray-950'
+				: 'invisible group-hover:visible from-gray-100 dark:from-gray-950'}
             absolute right-[10px] top-[6px] py-1 pr-2 pl-5 bg-gradient-to-l from-80%
 
               to-transparent"

+ 1 - 1
src/lib/components/playground/Playground.svelte

@@ -121,7 +121,7 @@
 						? {
 								role: 'system',
 								content: system
-						  }
+							}
 						: undefined,
 					...messages
 				].filter((message) => message)

+ 1 - 1
src/lib/components/workspace/Documents.svelte

@@ -88,7 +88,7 @@
 				tags?.length > 0
 					? {
 							tags: tags
-					  }
+						}
 					: null
 			).catch((error) => {
 				toast.error(error);

+ 2 - 2
src/lib/components/workspace/Models.svelte

@@ -292,7 +292,7 @@
 			>
 				<div class=" self-start w-8 pt-0.5">
 					<div
-						class=" rounded-full bg-stone-700 {model?.info?.meta?.hidden ?? false
+						class=" rounded-full bg-stone-700 {(model?.info?.meta?.hidden ?? false)
 							? 'brightness-90 dark:brightness-50'
 							: ''} "
 					>
@@ -305,7 +305,7 @@
 				</div>
 
 				<div
-					class=" flex-1 self-center {model?.info?.meta?.hidden ?? false ? 'text-gray-500' : ''}"
+					class=" flex-1 self-center {(model?.info?.meta?.hidden ?? false) ? 'text-gray-500' : ''}"
 				>
 					<div class="  font-semibold line-clamp-1">{model.name}</div>
 					<div class=" text-xs overflow-hidden text-ellipsis line-clamp-1">

+ 1 - 1
src/lib/components/workspace/Models/Knowledge/Selector.svelte

@@ -25,7 +25,7 @@
 							title: $i18n.t('All Documents'),
 							collection_names: $documents.map((doc) => doc.collection_name)
 						}
-				  ]
+					]
 				: []),
 			...$documents
 				.reduce((a, e, i, arr) => {

+ 5 - 0
src/lib/i18n/locales/ar-BH/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "متابعة الرد",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "تم نسخ عنوان URL للدردشة المشتركة إلى الحافظة",
 	"Copy": "نسخ",
+	"Copy Code": "",
 	"Copy last code block": "انسخ كتلة التعليمات البرمجية الأخيرة",
 	"Copy last response": "انسخ الرد الأخير",
 	"Copy Link": "أنسخ الرابط",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "مفتاح واجهة برمجة تطبيقات PSE من Google",
 	"Google PSE Engine Id": "معرف محرك PSE من Google",
 	"h:mm a": "الساعة:الدقائق صباحا/مساء",
+	"Haptic Feedback": "",
 	"has no conversations.": "ليس لديه محادثات.",
 	"Hello, {{name}}": " {{name}} مرحبا",
 	"Help": "مساعدة",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "ملاحظة: إذا قمت بتعيين الحد الأدنى من النقاط، فلن يؤدي البحث إلا إلى إرجاع المستندات التي لها نقاط أكبر من أو تساوي الحد الأدنى من النقاط.",
 	"Notifications": "إشعارات",
 	"November": "نوفمبر",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (أولاما)",
 	"OAuth ID": "",
 	"October": "اكتوبر",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "من اليمين إلى اليسار",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "حفظ",

+ 5 - 0
src/lib/i18n/locales/bg-BG/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "Продължи отговора",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Копирана е връзката за чат!",
 	"Copy": "Копирай",
+	"Copy Code": "",
 	"Copy last code block": "Копиране на последен код блок",
 	"Copy last response": "Копиране на последен отговор",
 	"Copy Link": "Копиране на връзка",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Google PSE API ключ",
 	"Google PSE Engine Id": "Идентификатор на двигателя на Google PSE",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "няма разговори.",
 	"Hello, {{name}}": "Здравей, {{name}}",
 	"Help": "Помощ",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Забележка: Ако зададете минимален резултат, търсенето ще върне само документи с резултат, по-голям или равен на минималния резултат.",
 	"Notifications": "Десктоп Известия",
 	"November": "Ноември",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "",
 	"October": "Октомври",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "Запис",

+ 5 - 0
src/lib/i18n/locales/bn-BD/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "যাচাই করুন",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "শেয়ারকৃত কথা-ব্যবহারের URL ক্লিপবোর্ডে কপি করা হয়েছে!",
 	"Copy": "অনুলিপি",
+	"Copy Code": "",
 	"Copy last code block": "সর্বশেষ কোড ব্লক কপি করুন",
 	"Copy last response": "সর্বশেষ রেসপন্স কপি করুন",
 	"Copy Link": "লিংক কপি করুন",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "গুগল পিএসই এপিআই কী",
 	"Google PSE Engine Id": "গুগল পিএসই ইঞ্জিন আইডি",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "কোন কনভার্সেশন আছে না।",
 	"Hello, {{name}}": "হ্যালো, {{name}}",
 	"Help": "সহায়তা",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "দ্রষ্টব্য: আপনি যদি ন্যূনতম স্কোর সেট করেন তবে অনুসন্ধানটি কেবলমাত্র ন্যূনতম স্কোরের চেয়ে বেশি বা সমান স্কোর সহ নথিগুলি ফেরত দেবে।",
 	"Notifications": "নোটিফিকেশনসমূহ",
 	"November": "নভেম্বর",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (ওলামা)",
 	"OAuth ID": "",
 	"October": "অক্টোবর",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "রোজ পাইন",
 	"Rosé Pine Dawn": "ভোরের রোজ পাইন",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "সংরক্ষণ",

+ 7 - 2
src/lib/i18n/locales/ca-ES/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "Continuar la resposta",
 	"Continue with {{provider}}": "Continuar amb {{provider}}",
 	"Controls": "Controls",
+	"Copied": "Copiat",
 	"Copied shared chat URL to clipboard!": "S'ha copiat l'URL compartida al porta-retalls!",
 	"Copy": "Copiar",
+	"Copy Code": "Copiar el codi",
 	"Copy last code block": "Copiar l'últim bloc de codi",
 	"Copy last response": "Copiar l'última resposta",
 	"Copy Link": "Copiar l'enllaç",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Clau API PSE de Google",
 	"Google PSE Engine Id": "Identificador del motor PSE de Google",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "no té converses.",
 	"Hello, {{name}}": "Hola, {{name}}",
 	"Help": "Ajuda",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Nota: Si s'estableix una puntuació mínima, la cerca només retornarà documents amb una puntuació major o igual a la puntuació mínima.",
 	"Notifications": "Notificacions",
 	"November": "Novembre",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "ID OAuth",
 	"October": "Octubre",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Albada Rosé Pine",
 	"RTL": "RTL",
+	"Run": "Executar",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "Executa Llama 2, Code Llama, i altres models. Personalitza i crea els teus propis models.",
 	"Running": "S'està executant",
 	"Save": "Desar",
@@ -509,7 +514,7 @@
 	"Scan": "Escanejar",
 	"Scan complete!": "Escaneigr completat!",
 	"Scan for documents from {{path}}": "Escanejar documents des de {{path}}",
-	"Scroll to bottom when switching between branches": "",
+	"Scroll to bottom when switching between branches": "Desplaçar a la part inferior quan es canviï de branca",
 	"Search": "Cercar",
 	"Search a model": "Cercar un model",
 	"Search Chats": "Cercar xats",
@@ -624,7 +629,7 @@
 	"To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "Per accedir a la WebUI, poseu-vos en contacte amb l'administrador. Els administradors poden gestionar els estats dels usuaris des del tauler d'administració.",
 	"To add documents here, upload them to the \"Documents\" workspace first.": "Per afegir documents aquí, puja-ls primer a l'espai de treball \"Documents\".",
 	"to chat input.": "a l'entrada del xat.",
-	"To select actions here, add them to the \"Functions\" workspace first.": "Per seleccionar accions aquí, afegeix-los primer a l'espai de treball \"Funcions\".",
+	"To select actions here, add them to the \"Functions\" workspace first.": "Per seleccionar accions aquí, afegeix-les primer a l'espai de treball \"Funcions\".",
 	"To select filters here, add them to the \"Functions\" workspace first.": "Per seleccionar filtres aquí, afegeix-los primer a l'espai de treball \"Funcions\".",
 	"To select toolkits here, add them to the \"Tools\" workspace first.": "Per seleccionar kits d'eines aquí, afegeix-los primer a l'espai de treball \"Eines\".",
 	"Today": "Avui",

+ 5 - 0
src/lib/i18n/locales/ceb-PH/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copy": "",
+	"Copy Code": "",
 	"Copy last code block": "Kopyaha ang katapusang bloke sa code",
 	"Copy last response": "Kopyaha ang kataposang tubag",
 	"Copy Link": "",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "",
 	"Google PSE Engine Id": "",
 	"h:mm a": "",
+	"Haptic Feedback": "",
 	"has no conversations.": "",
 	"Hello, {{name}}": "Maayong buntag, {{name}}",
 	"Help": "",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
 	"Notifications": "Mga pahibalo sa desktop",
 	"November": "",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "",
 	"OAuth ID": "",
 	"October": "",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Aube Pine Rosé",
 	"RTL": "",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "Tipigi",

+ 5 - 0
src/lib/i18n/locales/de-DE/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "Antwort fortsetzen",
 	"Continue with {{provider}}": "Mit {{provider}} fortfahren",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Freigabelink in die Zwischenablage kopiert!",
 	"Copy": "Kopieren",
+	"Copy Code": "",
 	"Copy last code block": "Letzten Codeblock kopieren",
 	"Copy last response": "Letzte Antwort kopieren",
 	"Copy Link": "Link kopieren",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Google PSE-API-Schlüssel",
 	"Google PSE Engine Id": "Google PSE-Engine-ID",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "hat keine Unterhaltungen.",
 	"Hello, {{name}}": "Hallo, {{name}}",
 	"Help": "Hilfe",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Hinweis: Wenn Sie eine Mindestpunktzahl festlegen, werden in der Suche nur Dokumente mit einer Punktzahl größer oder gleich der Mindestpunktzahl zurückgegeben.",
 	"Notifications": "Benachrichtigungen",
 	"November": "November",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "OAuth-ID",
 	"October": "Oktober",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "Läuft",
 	"Save": "Speichern",

+ 5 - 0
src/lib/i18n/locales/dg-DG/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copy": "",
+	"Copy Code": "",
 	"Copy last code block": "Copy last code block",
 	"Copy last response": "Copy last response",
 	"Copy Link": "",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "",
 	"Google PSE Engine Id": "",
 	"h:mm a": "",
+	"Haptic Feedback": "",
 	"has no conversations.": "",
 	"Hello, {{name}}": "Much helo, {{name}}",
 	"Help": "",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
 	"Notifications": "Notifications",
 	"November": "",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "",
 	"OAuth ID": "",
 	"October": "",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "Save much wow",

+ 5 - 0
src/lib/i18n/locales/en-GB/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copy": "",
+	"Copy Code": "",
 	"Copy last code block": "",
 	"Copy last response": "",
 	"Copy Link": "",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "",
 	"Google PSE Engine Id": "",
 	"h:mm a": "",
+	"Haptic Feedback": "",
 	"has no conversations.": "",
 	"Hello, {{name}}": "",
 	"Help": "",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
 	"Notifications": "",
 	"November": "",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "",
 	"OAuth ID": "",
 	"October": "",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "",
 	"Rosé Pine Dawn": "",
 	"RTL": "",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "",

+ 5 - 0
src/lib/i18n/locales/en-US/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
 	"Copy": "",
+	"Copy Code": "",
 	"Copy last code block": "",
 	"Copy last response": "",
 	"Copy Link": "",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "",
 	"Google PSE Engine Id": "",
 	"h:mm a": "",
+	"Haptic Feedback": "",
 	"has no conversations.": "",
 	"Hello, {{name}}": "",
 	"Help": "",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "",
 	"Notifications": "",
 	"November": "",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "",
 	"OAuth ID": "",
 	"October": "",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "",
 	"Rosé Pine Dawn": "",
 	"RTL": "",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "",

+ 5 - 0
src/lib/i18n/locales/es-ES/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "Continuar Respuesta",
 	"Continue with {{provider}}": "Continuar con {{provider}}",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "¡URL de chat compartido copiado al portapapeles!",
 	"Copy": "Copiar",
+	"Copy Code": "",
 	"Copy last code block": "Copia el último bloque de código",
 	"Copy last response": "Copia la última respuesta",
 	"Copy Link": "Copiar enlace",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Clave API de Google PSE",
 	"Google PSE Engine Id": "ID del motor PSE de Google",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "no tiene conversaciones.",
 	"Hello, {{name}}": "Hola, {{name}}",
 	"Help": "Ayuda",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Nota: Si estableces una puntuación mínima, la búsqueda sólo devolverá documentos con una puntuación mayor o igual a la puntuación mínima.",
 	"Notifications": "Notificaciones",
 	"November": "Noviembre",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "OAuth ID",
 	"October": "Octubre",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "Ejecutando",
 	"Save": "Guardar",

+ 5 - 0
src/lib/i18n/locales/fa-IR/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "ادامه پاسخ",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL چت به کلیپ بورد کپی شد!",
 	"Copy": "کپی",
+	"Copy Code": "",
 	"Copy last code block": "کپی آخرین بلوک کد",
 	"Copy last response": "کپی آخرین پاسخ",
 	"Copy Link": "کپی لینک",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "گوگل PSE API کلید",
 	"Google PSE Engine Id": "شناسه موتور PSE گوگل",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "ندارد.",
 	"Hello, {{name}}": "سلام، {{name}}",
 	"Help": "کمک",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "توجه: اگر حداقل نمره را تعیین کنید، جستجو تنها اسنادی را با نمره بیشتر یا برابر با حداقل نمره باز می گرداند.",
 	"Notifications": "اعلان",
 	"November": "نوامبر",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (اولاما)",
 	"OAuth ID": "",
 	"October": "اکتبر",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "ذخیره",

+ 5 - 0
src/lib/i18n/locales/fi-FI/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "Jatka vastausta",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Jaettu keskustelulinkki kopioitu leikepöydälle!",
 	"Copy": "Kopioi",
+	"Copy Code": "",
 	"Copy last code block": "Kopioi viimeisin koodilohko",
 	"Copy last response": "Kopioi viimeisin vastaus",
 	"Copy Link": "Kopioi linkki",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Google PSE API -avain",
 	"Google PSE Engine Id": "Google PSE -moduulin tunnus",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "ei ole keskusteluja.",
 	"Hello, {{name}}": "Terve, {{name}}",
 	"Help": "Apua",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Huom: Jos asetat vähimmäispisteet, haku palauttaa vain asiakirjat, joiden pisteet ovat suurempia tai yhtä suuria kuin vähimmäispistemäärä.",
 	"Notifications": "Ilmoitukset",
 	"November": "marraskuu",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "",
 	"October": "lokakuu",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosee-mänty",
 	"Rosé Pine Dawn": "Aamuinen Rosee-mänty",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "Tallenna",

+ 5 - 0
src/lib/i18n/locales/fr-CA/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "Continuer la réponse",
 	"Continue with {{provider}}": "Continuer avec {{provider}}",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL du chat copiée dans le presse-papiers\u00a0!",
 	"Copy": "Copie",
+	"Copy Code": "",
 	"Copy last code block": "Copier le dernier bloc de code",
 	"Copy last response": "Copier la dernière réponse",
 	"Copy Link": "Copier le lien",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Clé API Google PSE",
 	"Google PSE Engine Id": "ID du moteur de recherche personnalisé de Google",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "n'a aucune conversation.",
 	"Hello, {{name}}": "Bonjour, {{name}}.",
 	"Help": "Aide",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Note : Si vous définissez un score minimum, seuls les documents ayant un score supérieur ou égal à ce score minimum seront retournés par la recherche.",
 	"Notifications": "Notifications",
 	"November": "Novembre",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "ID OAuth",
 	"October": "Octobre",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Pin rosé",
 	"Rosé Pine Dawn": "Aube de Pin Rosé",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "Courir",
 	"Save": "Enregistrer",

+ 5 - 0
src/lib/i18n/locales/fr-FR/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "Continuer la réponse",
 	"Continue with {{provider}}": "Continuer avec {{provider}}",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL du chat copiée dans le presse-papiers\u00a0!",
 	"Copy": "Copie",
+	"Copy Code": "",
 	"Copy last code block": "Copier le dernier bloc de code",
 	"Copy last response": "Copier la dernière réponse",
 	"Copy Link": "Copier le lien",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Clé API Google PSE",
 	"Google PSE Engine Id": "ID du moteur de recherche personnalisé de Google",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "n'a aucune conversation.",
 	"Hello, {{name}}": "Bonjour, {{name}}.",
 	"Help": "Aide",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Note : Si vous définissez un score minimum, seuls les documents ayant un score supérieur ou égal à ce score minimum seront retournés par la recherche.",
 	"Notifications": "Notifications",
 	"November": "Novembre",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "ID OAuth",
 	"October": "Octobre",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Pin rosé",
 	"Rosé Pine Dawn": "Aube de Pin Rosé",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "Courir",
 	"Save": "Enregistrer",

+ 5 - 0
src/lib/i18n/locales/he-IL/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "המשך תגובה",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "העתקת כתובת URL של צ'אט משותף ללוח!",
 	"Copy": "העתק",
+	"Copy Code": "",
 	"Copy last code block": "העתק את בלוק הקוד האחרון",
 	"Copy last response": "העתק את התגובה האחרונה",
 	"Copy Link": "העתק קישור",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "מפתח API של Google PSE",
 	"Google PSE Engine Id": "מזהה מנוע PSE של Google",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "אין שיחות.",
 	"Hello, {{name}}": "שלום, {{name}}",
 	"Help": "עזרה",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "הערה: אם תקבע ציון מינימלי, החיפוש יחזיר רק מסמכים עם ציון שגבוה או שווה לציון המינימלי.",
 	"Notifications": "התראות",
 	"November": "נובמבר",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "",
 	"October": "אוקטובר",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "שמור",

+ 5 - 0
src/lib/i18n/locales/hi-IN/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "प्रतिक्रिया जारी रखें",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "साझा चैट URL को क्लिपबोर्ड पर कॉपी किया गया!",
 	"Copy": "कॉपी",
+	"Copy Code": "",
 	"Copy last code block": "अंतिम कोड ब्लॉक कॉपी करें",
 	"Copy last response": "अंतिम प्रतिक्रिया कॉपी करें",
 	"Copy Link": "लिंक को कॉपी करें",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Google PSE API कुंजी",
 	"Google PSE Engine Id": "Google PSE इंजन आईडी",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "कोई बातचीत नहीं है",
 	"Hello, {{name}}": "नमस्ते, {{name}}",
 	"Help": "मदद",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "ध्यान दें: यदि आप न्यूनतम स्कोर निर्धारित करते हैं, तो खोज केवल न्यूनतम स्कोर से अधिक या उसके बराबर स्कोर वाले दस्तावेज़ वापस लाएगी।",
 	"Notifications": "सूचनाएं",
 	"November": "नवंबर",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (ओलामा)",
 	"OAuth ID": "",
 	"October": "अक्टूबर",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "रोसे पिन",
 	"Rosé Pine Dawn": "रोसे पिन डेन",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "",
 	"Save": "सहेजें",

+ 5 - 0
src/lib/i18n/locales/hr-HR/translation.json

@@ -134,8 +134,10 @@
 	"Continue Response": "Nastavi odgovor",
 	"Continue with {{provider}}": "",
 	"Controls": "",
+	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL dijeljenog razgovora kopiran u međuspremnik!",
 	"Copy": "Kopiraj",
+	"Copy Code": "",
 	"Copy last code block": "Kopiraj zadnji blok koda",
 	"Copy last response": "Kopiraj zadnji odgovor",
 	"Copy Link": "Kopiraj vezu",
@@ -312,6 +314,7 @@
 	"Google PSE API Key": "Google PSE API ključ",
 	"Google PSE Engine Id": "ID Google PSE modula",
 	"h:mm a": "h:mm a",
+	"Haptic Feedback": "",
 	"has no conversations.": "nema razgovora.",
 	"Hello, {{name}}": "Bok, {{name}}",
 	"Help": "Pomoć",
@@ -416,6 +419,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Napomena: Ako postavite minimalnu ocjenu, pretraga će vratiti samo dokumente s ocjenom većom ili jednakom minimalnoj ocjeni.",
 	"Notifications": "Obavijesti",
 	"November": "Studeni",
+	"num_gpu (Ollama)": "",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "",
 	"October": "Listopad",
@@ -499,6 +503,7 @@
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "RTL",
+	"Run": "",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
 	"Running": "Pokrenuto",
 	"Save": "Spremi",

Some files were not shown because too many files changed in this diff