ソースを参照

Merge pull request #5362 from open-webui/dev

0.3.22
Timothy Jaeryang Baek 7 ヶ月 前
コミット
83855b713b
100 ファイル変更3441 行追加992 行削除
  1. 28 0
      CHANGELOG.md
  2. 7 2
      Dockerfile
  3. 13 0
      backend/open_webui/__init__.py
  4. 70 4
      backend/open_webui/apps/audio/main.py
  5. 40 0
      backend/open_webui/apps/images/main.py
  6. 54 2
      backend/open_webui/apps/ollama/main.py
  7. 15 12
      backend/open_webui/apps/openai/main.py
  8. 143 48
      backend/open_webui/apps/rag/main.py
  9. 121 83
      backend/open_webui/apps/rag/utils.py
  10. 10 0
      backend/open_webui/apps/rag/vector/connector.py
  11. 122 0
      backend/open_webui/apps/rag/vector/dbs/chroma.py
  12. 205 0
      backend/open_webui/apps/rag/vector/dbs/milvus.py
  13. 19 0
      backend/open_webui/apps/rag/vector/main.py
  14. 14 6
      backend/open_webui/apps/socket/main.py
  15. 2 2
      backend/open_webui/apps/webui/routers/auths.py
  16. 52 41
      backend/open_webui/apps/webui/routers/memories.py
  17. 12 20
      backend/open_webui/apps/webui/routers/models.py
  18. 22 3
      backend/open_webui/apps/webui/utils.py
  19. 87 91
      backend/open_webui/config.py
  20. 29 0
      backend/open_webui/env.py
  21. 60 14
      backend/open_webui/main.py
  22. 2 2
      backend/open_webui/utils/payload.py
  23. 115 0
      backend/open_webui/utils/security_headers.py
  24. 5 0
      backend/requirements.txt
  25. 8 0
      kubernetes/manifest/base/kustomization.yaml
  26. 8 0
      kubernetes/manifest/gpu/kustomization.yaml
  27. 0 0
      kubernetes/manifest/gpu/ollama-statefulset-gpu.yaml
  28. 0 13
      kubernetes/manifest/kustomization.yaml
  29. 104 49
      package-lock.json
  30. 2 2
      package.json
  31. 5 0
      pyproject.toml
  32. 17 15
      src/app.css
  33. 20 14
      src/app.html
  34. 69 1
      src/lib/components/admin/Settings/Audio.svelte
  35. 9 7
      src/lib/components/admin/Settings/Connections.svelte
  36. 11 5
      src/lib/components/admin/Settings/Documents.svelte
  37. 97 0
      src/lib/components/admin/Settings/Images.svelte
  38. 3 2
      src/lib/components/admin/Settings/Interface.svelte
  39. 246 189
      src/lib/components/chat/Chat.svelte
  40. 100 57
      src/lib/components/chat/ChatControls.svelte
  41. 18 15
      src/lib/components/chat/MessageInput.svelte
  42. 7 7
      src/lib/components/chat/MessageInput/CallOverlay.svelte
  43. 1 1
      src/lib/components/chat/MessageInput/Suggestions.svelte
  44. 50 31
      src/lib/components/chat/Messages.svelte
  45. 38 27
      src/lib/components/chat/Messages/Citations.svelte
  46. 1 0
      src/lib/components/chat/Messages/CitationsModal.svelte
  47. 25 23
      src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte
  48. 3 15
      src/lib/components/chat/Messages/ProfileImage.svelte
  49. 21 0
      src/lib/components/chat/Messages/ProfileImageBase.svelte
  50. 49 21
      src/lib/components/chat/Messages/ResponseMessage.svelte
  51. 1 1
      src/lib/components/chat/Messages/Skeleton.svelte
  52. 1 1
      src/lib/components/chat/Messages/UserMessage.svelte
  53. 174 0
      src/lib/components/chat/Overview.svelte
  54. 36 0
      src/lib/components/chat/Overview/Flow.svelte
  55. 62 0
      src/lib/components/chat/Overview/Node.svelte
  56. 1 1
      src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte
  57. 21 0
      src/lib/components/chat/Settings/Audio.svelte
  58. 23 0
      src/lib/components/chat/Settings/General.svelte
  59. 30 0
      src/lib/components/chat/Settings/Interface.svelte
  60. 4 4
      src/lib/components/common/ConfirmDialog.svelte
  61. 12 3
      src/lib/components/common/Drawer.svelte
  62. 3 3
      src/lib/components/common/ImagePreview.svelte
  63. 19 0
      src/lib/components/icons/ArrowUpCircle.svelte
  64. 19 0
      src/lib/components/icons/Clipboard.svelte
  65. 19 0
      src/lib/components/icons/Map.svelte
  66. 18 17
      src/lib/components/layout/Navbar.svelte
  67. 59 12
      src/lib/components/layout/Navbar/Menu.svelte
  68. 19 9
      src/lib/components/layout/Sidebar.svelte
  69. 1 1
      src/lib/components/workspace/Documents.svelte
  70. 56 1
      src/lib/components/workspace/Models.svelte
  71. 44 0
      src/lib/components/workspace/Models/Capabilities.svelte
  72. 13 0
      src/lib/components/workspace/Models/ModelMenu.svelte
  73. 19 0
      src/lib/i18n/locales/ar-BH/translation.json
  74. 19 0
      src/lib/i18n/locales/bg-BG/translation.json
  75. 19 0
      src/lib/i18n/locales/bn-BD/translation.json
  76. 21 2
      src/lib/i18n/locales/ca-ES/translation.json
  77. 19 0
      src/lib/i18n/locales/ceb-PH/translation.json
  78. 19 0
      src/lib/i18n/locales/de-DE/translation.json
  79. 19 0
      src/lib/i18n/locales/dg-DG/translation.json
  80. 19 0
      src/lib/i18n/locales/en-GB/translation.json
  81. 19 0
      src/lib/i18n/locales/en-US/translation.json
  82. 19 0
      src/lib/i18n/locales/es-ES/translation.json
  83. 19 0
      src/lib/i18n/locales/fa-IR/translation.json
  84. 19 0
      src/lib/i18n/locales/fi-FI/translation.json
  85. 19 0
      src/lib/i18n/locales/fr-CA/translation.json
  86. 132 113
      src/lib/i18n/locales/fr-FR/translation.json
  87. 19 0
      src/lib/i18n/locales/he-IL/translation.json
  88. 19 0
      src/lib/i18n/locales/hi-IN/translation.json
  89. 19 0
      src/lib/i18n/locales/hr-HR/translation.json
  90. 19 0
      src/lib/i18n/locales/id-ID/translation.json
  91. 19 0
      src/lib/i18n/locales/it-IT/translation.json
  92. 19 0
      src/lib/i18n/locales/ja-JP/translation.json
  93. 19 0
      src/lib/i18n/locales/ka-GE/translation.json
  94. 19 0
      src/lib/i18n/locales/ko-KR/translation.json
  95. 19 0
      src/lib/i18n/locales/lt-LT/translation.json
  96. 19 0
      src/lib/i18n/locales/ms-MY/translation.json
  97. 19 0
      src/lib/i18n/locales/nb-NO/translation.json
  98. 19 0
      src/lib/i18n/locales/nl-NL/translation.json
  99. 19 0
      src/lib/i18n/locales/pa-IN/translation.json
  100. 19 0
      src/lib/i18n/locales/pl-PL/translation.json

+ 28 - 0
CHANGELOG.md

@@ -5,6 +5,34 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.3.22] - 2024-09-19
+
+### Added
+
+- **⭐ Chat Overview**: Introducing a node-based interactive messages diagram for improved visualization of conversation flows.
+- **🔗 Multiple Vector DB Support**: Now supports multiple vector databases, including the newly added Milvus support. Community contributions for additional database support are highly encouraged!
+- **📡 Experimental Non-Stream Chat Completion**: Experimental feature allowing the use of OpenAI o1 models, which do not support streaming, ensuring more versatile model deployment.
+- **🔍 Experimental Colbert-AI Reranker Integration**: Added support for "jinaai/jina-colbert-v2" as a reranker, enhancing search relevance and accuracy. Note: it may not function at all on low-spec computers.
+- **🕸️ ENABLE_WEBSOCKET_SUPPORT**: Added environment variable for instances to ignore websocket upgrades, stabilizing connections on platforms with websocket issues.
+- **🔊 Azure Speech Service Integration**: Added support for Azure Speech services for Text-to-Speech (TTS).
+- **🎚️ Customizable Playback Speed**: Playback speed control is now available in Call mode settings, allowing users to adjust audio playback speed to their preferences.
+- **🧠 Enhanced Error Messaging**: System now displays helpful error messages directly to users during chat completion issues.
+- **📂 Save Model as Transparent PNG**: Model profile images are now saved as PNGs, supporting transparency and improving visual integration.
+- **📱 iPhone Compatibility Adjustments**: Added padding to accommodate the iPhone navigation bar, improving UI display on these devices.
+- **🔗 Secure Response Headers**: Implemented security response headers, bolstering web application security.
+- **🔧 Enhanced AUTOMATIC1111 Settings**: Users can now configure 'CFG Scale', 'Sampler', and 'Scheduler' parameters directly in the admin settings, enhancing workflow flexibility without source code modifications.
+- **🌍 i18n Updates**: Enhanced translations for Chinese, Ukrainian, Russian, and French, fostering a better localized experience.
+
+### Fixed
+
+- **🛠️ Chat Message Deletion**: Resolved issues with chat message deletion, ensuring a smoother user interaction and system stability.
+- **🔢 Ordered List Numbering**: Fixed the incorrect ordering in lists.
+
+### Changed
+
+- **🎨 Transparent Icon Handling**: Allowed model icons to be displayed on transparent backgrounds, improving UI aesthetics.
+- **📝 Improved RAG Template**: Enhanced Retrieval-Augmented Generation template, optimizing context handling and error checking for more precise operation.
+
 ## [0.3.21] - 2024-09-08
 
 ### Added

+ 7 - 2
Dockerfile

@@ -74,6 +74,10 @@ ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
 
 ## Hugging Face download cache ##
 ENV HF_HOME="/app/backend/data/cache/embedding/models"
+
+## Torch Extensions ##
+# ENV TORCH_EXTENSIONS_DIR="/.cache/torch_extensions"
+
 #### Other models ##########################################################
 
 WORKDIR /app/backend
@@ -96,7 +100,7 @@ RUN chown -R $UID:$GID /app $HOME
 RUN if [ "$USE_OLLAMA" = "true" ]; then \
     apt-get update && \
     # Install pandoc and netcat
-    apt-get install -y --no-install-recommends pandoc netcat-openbsd curl && \
+    apt-get install -y --no-install-recommends git build-essential pandoc netcat-openbsd curl && \
     apt-get install -y --no-install-recommends gcc python3-dev && \
     # for RAG OCR
     apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
@@ -109,7 +113,7 @@ RUN if [ "$USE_OLLAMA" = "true" ]; then \
     else \
     apt-get update && \
     # Install pandoc, netcat and gcc
-    apt-get install -y --no-install-recommends pandoc gcc netcat-openbsd curl jq && \
+    apt-get install -y --no-install-recommends git build-essential pandoc gcc netcat-openbsd curl jq && \
     apt-get install -y --no-install-recommends gcc python3-dev && \
     # for RAG OCR
     apt-get install -y --no-install-recommends ffmpeg libsm6 libxext6 && \
@@ -157,5 +161,6 @@ USER $UID:$GID
 
 ARG BUILD_HASH
 ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
+ENV DOCKER true
 
 CMD [ "bash", "start.sh"]

+ 13 - 0
backend/open_webui/__init__.py

@@ -39,6 +39,19 @@ def serve(
                 "/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib",
             ]
         )
+        try:
+            import torch
+
+            assert torch.cuda.is_available(), "CUDA not available"
+            typer.echo("CUDA seems to be working")
+        except Exception as e:
+            typer.echo(
+                "Error when testing CUDA but USE_CUDA_DOCKER is true. "
+                "Resetting USE_CUDA_DOCKER to false and removing "
+                f"LD_LIBRARY_PATH modifications: {e}"
+            )
+            os.environ["USE_CUDA_DOCKER"] = "false"
+            os.environ["LD_LIBRARY_PATH"] = ":".join(LD_LIBRARY_PATH)
     import open_webui.main  # we need set environment variables before importing main
 
     uvicorn.run(open_webui.main.app, host=host, port=port, forwarded_allow_ips="*")

+ 70 - 4
backend/open_webui/apps/audio/main.py

@@ -19,16 +19,18 @@ from open_webui.config import (
     AUDIO_TTS_OPENAI_API_KEY,
     AUDIO_TTS_SPLIT_ON,
     AUDIO_TTS_VOICE,
+    AUDIO_TTS_AZURE_SPEECH_REGION,
+    AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT,
     CACHE_DIR,
     CORS_ALLOW_ORIGIN,
-    DEVICE_TYPE,
     WHISPER_MODEL,
     WHISPER_MODEL_AUTO_UPDATE,
     WHISPER_MODEL_DIR,
     AppConfig,
 )
+
 from open_webui.constants import ERROR_MESSAGES
-from open_webui.env import SRC_LOG_LEVELS
+from open_webui.env import SRC_LOG_LEVELS, DEVICE_TYPE
 from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile, status
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.responses import FileResponse
@@ -62,6 +64,9 @@ app.state.config.TTS_VOICE = AUDIO_TTS_VOICE
 app.state.config.TTS_API_KEY = AUDIO_TTS_API_KEY
 app.state.config.TTS_SPLIT_ON = AUDIO_TTS_SPLIT_ON
 
+app.state.config.TTS_AZURE_SPEECH_REGION = AUDIO_TTS_AZURE_SPEECH_REGION
+app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT = AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT
+
 # setting device type for whisper model
 whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
 log.info(f"whisper_device_type: {whisper_device_type}")
@@ -78,6 +83,8 @@ class TTSConfigForm(BaseModel):
     MODEL: str
     VOICE: str
     SPLIT_ON: str
+    AZURE_SPEECH_REGION: str
+    AZURE_SPEECH_OUTPUT_FORMAT: str
 
 
 class STTConfigForm(BaseModel):
@@ -130,6 +137,8 @@ async def get_audio_config(user=Depends(get_admin_user)):
             "MODEL": app.state.config.TTS_MODEL,
             "VOICE": app.state.config.TTS_VOICE,
             "SPLIT_ON": app.state.config.TTS_SPLIT_ON,
+            "AZURE_SPEECH_REGION": app.state.config.TTS_AZURE_SPEECH_REGION,
+            "AZURE_SPEECH_OUTPUT_FORMAT": app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT,
         },
         "stt": {
             "OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
@@ -151,6 +160,10 @@ async def update_audio_config(
     app.state.config.TTS_MODEL = form_data.tts.MODEL
     app.state.config.TTS_VOICE = form_data.tts.VOICE
     app.state.config.TTS_SPLIT_ON = form_data.tts.SPLIT_ON
+    app.state.config.TTS_AZURE_SPEECH_REGION = form_data.tts.AZURE_SPEECH_REGION
+    app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT = (
+        form_data.tts.AZURE_SPEECH_OUTPUT_FORMAT
+    )
 
     app.state.config.STT_OPENAI_API_BASE_URL = form_data.stt.OPENAI_API_BASE_URL
     app.state.config.STT_OPENAI_API_KEY = form_data.stt.OPENAI_API_KEY
@@ -166,6 +179,8 @@ async def update_audio_config(
             "MODEL": app.state.config.TTS_MODEL,
             "VOICE": app.state.config.TTS_VOICE,
             "SPLIT_ON": app.state.config.TTS_SPLIT_ON,
+            "AZURE_SPEECH_REGION": app.state.config.TTS_AZURE_SPEECH_REGION,
+            "AZURE_SPEECH_OUTPUT_FORMAT": app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT,
         },
         "stt": {
             "OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
@@ -301,6 +316,42 @@ async def speech(request: Request, user=Depends(get_verified_user)):
                 detail=error_detail,
             )
 
+    elif app.state.config.TTS_ENGINE == "azure":
+        payload = None
+        try:
+            payload = json.loads(body.decode("utf-8"))
+        except Exception as e:
+            log.exception(e)
+            raise HTTPException(status_code=400, detail="Invalid JSON payload")
+
+        region = app.state.config.TTS_AZURE_SPEECH_REGION
+        language = app.state.config.TTS_VOICE
+        locale = "-".join(app.state.config.TTS_VOICE.split("-")[:1])
+        output_format = app.state.config.TTS_AZURE_SPEECH_OUTPUT_FORMAT
+        url = f"https://{region}.tts.speech.microsoft.com/cognitiveservices/v1"
+
+        headers = {
+            "Ocp-Apim-Subscription-Key": app.state.config.TTS_API_KEY,
+            "Content-Type": "application/ssml+xml",
+            "X-Microsoft-OutputFormat": output_format,
+        }
+
+        data = f"""<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="{locale}">
+                <voice name="{language}">{payload["input"]}</voice>
+            </speak>"""
+
+        response = requests.post(url, headers=headers, data=data)
+
+        if response.status_code == 200:
+            with open(file_path, "wb") as f:
+                f.write(response.content)
+            return FileResponse(file_path)
+        else:
+            log.error(f"Error synthesizing speech - {response.reason}")
+            raise HTTPException(
+                status_code=500, detail=f"Error synthesizing speech - {response.reason}"
+            )
+
 
 @app.post("/transcriptions")
 def transcribe(
@@ -309,7 +360,7 @@ def transcribe(
 ):
     log.info(f"file.content_type: {file.content_type}")
 
-    if file.content_type not in ["audio/mpeg", "audio/wav"]:
+    if file.content_type not in ["audio/mpeg", "audio/wav", "audio/ogg"]:
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED,
@@ -443,7 +494,7 @@ def get_available_models() -> list[dict]:
 
         try:
             response = requests.get(
-                "https://api.elevenlabs.io/v1/models", headers=headers
+                "https://api.elevenlabs.io/v1/models", headers=headers, timeout=5
             )
             response.raise_for_status()
             models = response.json()
@@ -478,6 +529,21 @@ def get_available_voices() -> dict:
         except Exception:
             # Avoided @lru_cache with exception
             pass
+    elif app.state.config.TTS_ENGINE == "azure":
+        try:
+            region = app.state.config.TTS_AZURE_SPEECH_REGION
+            url = f"https://{region}.tts.speech.microsoft.com/cognitiveservices/voices/list"
+            headers = {"Ocp-Apim-Subscription-Key": app.state.config.TTS_API_KEY}
+
+            response = requests.get(url, headers=headers)
+            response.raise_for_status()
+            voices = response.json()
+            for voice in voices:
+                ret[voice["ShortName"]] = (
+                    f"{voice['DisplayName']} ({voice['ShortName']})"
+                )
+        except requests.RequestException as e:
+            log.error(f"Error fetching voices: {str(e)}")
 
     return ret
 

+ 40 - 0
backend/open_webui/apps/images/main.py

@@ -17,6 +17,9 @@ from open_webui.apps.images.utils.comfyui import (
 from open_webui.config import (
     AUTOMATIC1111_API_AUTH,
     AUTOMATIC1111_BASE_URL,
+    AUTOMATIC1111_CFG_SCALE,
+    AUTOMATIC1111_SAMPLER,
+    AUTOMATIC1111_SCHEDULER,
     CACHE_DIR,
     COMFYUI_BASE_URL,
     COMFYUI_WORKFLOW,
@@ -65,6 +68,9 @@ app.state.config.MODEL = IMAGE_GENERATION_MODEL
 
 app.state.config.AUTOMATIC1111_BASE_URL = AUTOMATIC1111_BASE_URL
 app.state.config.AUTOMATIC1111_API_AUTH = AUTOMATIC1111_API_AUTH
+app.state.config.AUTOMATIC1111_CFG_SCALE = AUTOMATIC1111_CFG_SCALE
+app.state.config.AUTOMATIC1111_SAMPLER = AUTOMATIC1111_SAMPLER
+app.state.config.AUTOMATIC1111_SCHEDULER = AUTOMATIC1111_SCHEDULER
 app.state.config.COMFYUI_BASE_URL = COMFYUI_BASE_URL
 app.state.config.COMFYUI_WORKFLOW = COMFYUI_WORKFLOW
 app.state.config.COMFYUI_WORKFLOW_NODES = COMFYUI_WORKFLOW_NODES
@@ -85,6 +91,9 @@ async def get_config(request: Request, user=Depends(get_admin_user)):
         "automatic1111": {
             "AUTOMATIC1111_BASE_URL": app.state.config.AUTOMATIC1111_BASE_URL,
             "AUTOMATIC1111_API_AUTH": app.state.config.AUTOMATIC1111_API_AUTH,
+            "AUTOMATIC1111_CFG_SCALE": app.state.config.AUTOMATIC1111_CFG_SCALE,
+            "AUTOMATIC1111_SAMPLER": app.state.config.AUTOMATIC1111_SAMPLER,
+            "AUTOMATIC1111_SCHEDULER": app.state.config.AUTOMATIC1111_SCHEDULER,
         },
         "comfyui": {
             "COMFYUI_BASE_URL": app.state.config.COMFYUI_BASE_URL,
@@ -102,6 +111,9 @@ class OpenAIConfigForm(BaseModel):
 class Automatic1111ConfigForm(BaseModel):
     AUTOMATIC1111_BASE_URL: str
     AUTOMATIC1111_API_AUTH: str
+    AUTOMATIC1111_CFG_SCALE: Optional[str]
+    AUTOMATIC1111_SAMPLER: Optional[str]
+    AUTOMATIC1111_SCHEDULER: Optional[str]
 
 
 class ComfyUIConfigForm(BaseModel):
@@ -133,6 +145,22 @@ async def update_config(form_data: ConfigForm, user=Depends(get_admin_user)):
         form_data.automatic1111.AUTOMATIC1111_API_AUTH
     )
 
+    app.state.config.AUTOMATIC1111_CFG_SCALE = (
+        float(form_data.automatic1111.AUTOMATIC1111_CFG_SCALE)
+        if form_data.automatic1111.AUTOMATIC1111_CFG_SCALE != ""
+        else None
+    )
+    app.state.config.AUTOMATIC1111_SAMPLER = (
+        form_data.automatic1111.AUTOMATIC1111_SAMPLER
+        if form_data.automatic1111.AUTOMATIC1111_SAMPLER != ""
+        else None
+    )
+    app.state.config.AUTOMATIC1111_SCHEDULER = (
+        form_data.automatic1111.AUTOMATIC1111_SCHEDULER
+        if form_data.automatic1111.AUTOMATIC1111_SCHEDULER != ""
+        else None
+    )
+
     app.state.config.COMFYUI_BASE_URL = form_data.comfyui.COMFYUI_BASE_URL.strip("/")
     app.state.config.COMFYUI_WORKFLOW = form_data.comfyui.COMFYUI_WORKFLOW
     app.state.config.COMFYUI_WORKFLOW_NODES = form_data.comfyui.COMFYUI_WORKFLOW_NODES
@@ -147,6 +175,9 @@ async def update_config(form_data: ConfigForm, user=Depends(get_admin_user)):
         "automatic1111": {
             "AUTOMATIC1111_BASE_URL": app.state.config.AUTOMATIC1111_BASE_URL,
             "AUTOMATIC1111_API_AUTH": app.state.config.AUTOMATIC1111_API_AUTH,
+            "AUTOMATIC1111_CFG_SCALE": app.state.config.AUTOMATIC1111_CFG_SCALE,
+            "AUTOMATIC1111_SAMPLER": app.state.config.AUTOMATIC1111_SAMPLER,
+            "AUTOMATIC1111_SCHEDULER": app.state.config.AUTOMATIC1111_SCHEDULER,
         },
         "comfyui": {
             "COMFYUI_BASE_URL": app.state.config.COMFYUI_BASE_URL,
@@ -524,6 +555,15 @@ async def image_generations(
             if form_data.negative_prompt is not None:
                 data["negative_prompt"] = form_data.negative_prompt
 
+            if app.state.config.AUTOMATIC1111_CFG_SCALE:
+                data["cfg_scale"] = app.state.config.AUTOMATIC1111_CFG_SCALE
+
+            if app.state.config.AUTOMATIC1111_SAMPLER:
+                data["sampler_name"] = app.state.config.AUTOMATIC1111_SAMPLER
+
+            if app.state.config.AUTOMATIC1111_SCHEDULER:
+                data["scheduler"] = app.state.config.AUTOMATIC1111_SCHEDULER
+
             # Use asyncio.to_thread for the requests.post call
             r = await asyncio.to_thread(
                 requests.post,

+ 54 - 2
backend/open_webui/apps/ollama/main.py

@@ -545,6 +545,55 @@ class GenerateEmbeddingsForm(BaseModel):
 
 @app.post("/api/embed")
 @app.post("/api/embed/{url_idx}")
+async def generate_embeddings(
+    form_data: GenerateEmbeddingsForm,
+    url_idx: Optional[int] = None,
+    user=Depends(get_verified_user),
+):
+    if url_idx is None:
+        model = form_data.model
+
+        if ":" not in model:
+            model = f"{model}:latest"
+
+        if model in app.state.MODELS:
+            url_idx = random.choice(app.state.MODELS[model]["urls"])
+        else:
+            raise HTTPException(
+                status_code=400,
+                detail=ERROR_MESSAGES.MODEL_NOT_FOUND(form_data.model),
+            )
+
+    url = app.state.config.OLLAMA_BASE_URLS[url_idx]
+    log.info(f"url: {url}")
+
+    r = requests.request(
+        method="POST",
+        url=f"{url}/api/embed",
+        headers={"Content-Type": "application/json"},
+        data=form_data.model_dump_json(exclude_none=True).encode(),
+    )
+    try:
+        r.raise_for_status()
+
+        return r.json()
+    except Exception as e:
+        log.exception(e)
+        error_detail = "Open WebUI: Server Connection Error"
+        if r is not None:
+            try:
+                res = r.json()
+                if "error" in res:
+                    error_detail = f"Ollama: {res['error']}"
+            except Exception:
+                error_detail = f"Ollama: {e}"
+
+        raise HTTPException(
+            status_code=r.status_code if r else 500,
+            detail=error_detail,
+        )
+
+
 @app.post("/api/embeddings")
 @app.post("/api/embeddings/{url_idx}")
 async def generate_embeddings(
@@ -571,7 +620,7 @@ async def generate_embeddings(
 
     r = requests.request(
         method="POST",
-        url=f"{url}/api/embed",
+        url=f"{url}/api/embeddings",
         headers={"Content-Type": "application/json"},
         data=form_data.model_dump_json(exclude_none=True).encode(),
     )
@@ -767,7 +816,10 @@ async def generate_chat_completion(
     log.debug(payload)
 
     return await post_streaming_url(
-        f"{url}/api/chat", json.dumps(payload), content_type="application/x-ndjson"
+        f"{url}/api/chat",
+        json.dumps(payload),
+        stream=form_data.stream,
+        content_type="application/x-ndjson",
     )
 
 

+ 15 - 12
backend/open_webui/apps/openai/main.py

@@ -423,6 +423,7 @@ async def generate_chat_completion(
     r = None
     session = None
     streaming = False
+    response = None
 
     try:
         session = aiohttp.ClientSession(
@@ -435,8 +436,6 @@ async def generate_chat_completion(
             headers=headers,
         )
 
-        r.raise_for_status()
-
         # Check if response is SSE
         if "text/event-stream" in r.headers.get("Content-Type", ""):
             streaming = True
@@ -449,19 +448,23 @@ async def generate_chat_completion(
                 ),
             )
         else:
-            response_data = await r.json()
-            return response_data
+            try:
+                response = await r.json()
+            except Exception as e:
+                log.error(e)
+                response = await r.text()
+
+            r.raise_for_status()
+            return response
     except Exception as e:
         log.exception(e)
         error_detail = "Open WebUI: Server Connection Error"
-        if r is not None:
-            try:
-                res = await r.json()
-                print(res)
-                if "error" in res:
-                    error_detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
-            except Exception:
-                error_detail = f"External: {e}"
+        if isinstance(response, dict):
+            if "error" in response:
+                error_detail = f"{response['error']['message'] if 'message' in response['error'] else response['error']}"
+        elif isinstance(response, str):
+            error_detail = response
+
         raise HTTPException(status_code=r.status if r else 500, detail=error_detail)
     finally:
         if not streaming and session:

+ 143 - 48
backend/open_webui/apps/rag/main.py

@@ -10,13 +10,21 @@ from datetime import datetime
 from pathlib import Path
 from typing import Iterator, Optional, Sequence, Union
 
+
+import numpy as np
+import torch
 import requests
 import validators
+
+from fastapi import Depends, FastAPI, File, Form, HTTPException, UploadFile, status
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel
+
+from open_webui.apps.rag.search.main import SearchResult
 from open_webui.apps.rag.search.brave import search_brave
 from open_webui.apps.rag.search.duckduckgo import search_duckduckgo
 from open_webui.apps.rag.search.google_pse import search_google_pse
 from open_webui.apps.rag.search.jina_search import search_jina
-from open_webui.apps.rag.search.main import SearchResult
 from open_webui.apps.rag.search.searchapi import search_searchapi
 from open_webui.apps.rag.search.searxng import search_searxng
 from open_webui.apps.rag.search.serper import search_serper
@@ -33,15 +41,12 @@ from open_webui.apps.rag.utils import (
 )
 from open_webui.apps.webui.models.documents import DocumentForm, Documents
 from open_webui.apps.webui.models.files import Files
-from chromadb.utils.batch_utils import create_batches
 from open_webui.config import (
     BRAVE_SEARCH_API_KEY,
-    CHROMA_CLIENT,
     CHUNK_OVERLAP,
     CHUNK_SIZE,
     CONTENT_EXTRACTION_ENGINE,
     CORS_ALLOW_ORIGIN,
-    DEVICE_TYPE,
     DOCS_DIR,
     ENABLE_RAG_HYBRID_SEARCH,
     ENABLE_RAG_LOCAL_WEB_FETCH,
@@ -64,6 +69,7 @@ from open_webui.config import (
     RAG_RERANKING_MODEL,
     RAG_RERANKING_MODEL_AUTO_UPDATE,
     RAG_RERANKING_MODEL_TRUST_REMOTE_CODE,
+    DEFAULT_RAG_TEMPLATE,
     RAG_TEMPLATE,
     RAG_TOP_K,
     RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
@@ -84,9 +90,16 @@ from open_webui.config import (
     AppConfig,
 )
 from open_webui.constants import ERROR_MESSAGES
-from open_webui.env import SRC_LOG_LEVELS
-from fastapi import Depends, FastAPI, File, Form, HTTPException, UploadFile, status
-from fastapi.middleware.cors import CORSMiddleware
+from open_webui.env import SRC_LOG_LEVELS, DEVICE_TYPE, DOCKER
+from open_webui.utils.misc import (
+    calculate_sha256,
+    calculate_sha256_string,
+    extract_folders_after_data_docs,
+    sanitize_filename,
+)
+from open_webui.utils.utils import get_admin_user, get_verified_user
+from open_webui.apps.rag.vector.connector import VECTOR_DB_CLIENT
+
 from langchain.text_splitter import RecursiveCharacterTextSplitter
 from langchain_community.document_loaders import (
     BSHTMLLoader,
@@ -105,14 +118,8 @@ from langchain_community.document_loaders import (
     YoutubeLoader,
 )
 from langchain_core.documents import Document
-from pydantic import BaseModel
-from open_webui.utils.misc import (
-    calculate_sha256,
-    calculate_sha256_string,
-    extract_folders_after_data_docs,
-    sanitize_filename,
-)
-from open_webui.utils.utils import get_admin_user, get_verified_user
+from colbert.infra import ColBERTConfig
+from colbert.modeling.checkpoint import Checkpoint
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["RAG"])
@@ -143,13 +150,11 @@ app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE = RAG_EMBEDDING_OPENAI_BATCH_SI
 app.state.config.RAG_RERANKING_MODEL = RAG_RERANKING_MODEL
 app.state.config.RAG_TEMPLATE = RAG_TEMPLATE
 
-
 app.state.config.OPENAI_API_BASE_URL = RAG_OPENAI_API_BASE_URL
 app.state.config.OPENAI_API_KEY = RAG_OPENAI_API_KEY
 
 app.state.config.PDF_EXTRACT_IMAGES = PDF_EXTRACT_IMAGES
 
-
 app.state.config.YOUTUBE_LOADER_LANGUAGE = YOUTUBE_LOADER_LANGUAGE
 app.state.YOUTUBE_LOADER_TRANSLATION = None
 
@@ -175,13 +180,13 @@ app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = RAG_WEB_SEARCH_CONCURRENT_
 
 def update_embedding_model(
     embedding_model: str,
-    update_model: bool = False,
+    auto_update: bool = False,
 ):
     if embedding_model and app.state.config.RAG_EMBEDDING_ENGINE == "":
         import sentence_transformers
 
         app.state.sentence_transformer_ef = sentence_transformers.SentenceTransformer(
-            get_model_path(embedding_model, update_model),
+            get_model_path(embedding_model, auto_update),
             device=DEVICE_TYPE,
             trust_remote_code=RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE,
         )
@@ -191,16 +196,108 @@ def update_embedding_model(
 
 def update_reranking_model(
     reranking_model: str,
-    update_model: bool = False,
+    auto_update: bool = False,
 ):
     if reranking_model:
-        import sentence_transformers
+        if any(model in reranking_model for model in ["jinaai/jina-colbert-v2"]):
+
+            class ColBERT:
+                def __init__(self, name) -> None:
+                    print("ColBERT: Loading model", name)
+                    self.device = "cuda" if torch.cuda.is_available() else "cpu"
+
+                    if DOCKER:
+                        # This is a workaround for the issue with the docker container
+                        # where the torch extension is not loaded properly
+                        # and the following error is thrown:
+                        # /root/.cache/torch_extensions/py311_cpu/segmented_maxsim_cpp/segmented_maxsim_cpp.so: cannot open shared object file: No such file or directory
+
+                        lock_file = "/root/.cache/torch_extensions/py311_cpu/segmented_maxsim_cpp/lock"
+                        if os.path.exists(lock_file):
+                            os.remove(lock_file)
+
+                    self.ckpt = Checkpoint(
+                        name,
+                        colbert_config=ColBERTConfig(model_name=name),
+                    ).to(self.device)
+                    pass
 
-        app.state.sentence_transformer_rf = sentence_transformers.CrossEncoder(
-            get_model_path(reranking_model, update_model),
-            device=DEVICE_TYPE,
-            trust_remote_code=RAG_RERANKING_MODEL_TRUST_REMOTE_CODE,
-        )
+                def calculate_similarity_scores(
+                    self, query_embeddings, document_embeddings
+                ):
+
+                    query_embeddings = query_embeddings.to(self.device)
+                    document_embeddings = document_embeddings.to(self.device)
+
+                    # Validate dimensions to ensure compatibility
+                    if query_embeddings.dim() != 3:
+                        raise ValueError(
+                            f"Expected query embeddings to have 3 dimensions, but got {query_embeddings.dim()}."
+                        )
+                    if document_embeddings.dim() != 3:
+                        raise ValueError(
+                            f"Expected document embeddings to have 3 dimensions, but got {document_embeddings.dim()}."
+                        )
+                    if query_embeddings.size(0) not in [1, document_embeddings.size(0)]:
+                        raise ValueError(
+                            "There should be either one query or queries equal to the number of documents."
+                        )
+
+                    # Transpose the query embeddings to align for matrix multiplication
+                    transposed_query_embeddings = query_embeddings.permute(0, 2, 1)
+                    # Compute similarity scores using batch matrix multiplication
+                    computed_scores = torch.matmul(
+                        document_embeddings, transposed_query_embeddings
+                    )
+                    # Apply max pooling to extract the highest semantic similarity across each document's sequence
+                    maximum_scores = torch.max(computed_scores, dim=1).values
+
+                    # Sum up the maximum scores across features to get the overall document relevance scores
+                    final_scores = maximum_scores.sum(dim=1)
+
+                    normalized_scores = torch.softmax(final_scores, dim=0)
+
+                    return normalized_scores.detach().cpu().numpy().astype(np.float32)
+
+                def predict(self, sentences):
+
+                    query = sentences[0][0]
+                    docs = [i[1] for i in sentences]
+
+                    # Embedding the documents
+                    embedded_docs = self.ckpt.docFromText(docs, bsize=32)[0]
+                    # Embedding the queries
+                    embedded_queries = self.ckpt.queryFromText([query], bsize=32)
+                    embedded_query = embedded_queries[0]
+
+                    # Calculate retrieval scores for the query against all documents
+                    scores = self.calculate_similarity_scores(
+                        embedded_query.unsqueeze(0), embedded_docs
+                    )
+
+                    return scores
+
+            try:
+                app.state.sentence_transformer_rf = ColBERT(
+                    get_model_path(reranking_model, auto_update)
+                )
+            except Exception as e:
+                log.error(f"ColBERT: {e}")
+                app.state.sentence_transformer_rf = None
+                app.state.config.ENABLE_RAG_HYBRID_SEARCH = False
+        else:
+            import sentence_transformers
+
+            try:
+                app.state.sentence_transformer_rf = sentence_transformers.CrossEncoder(
+                    get_model_path(reranking_model, auto_update),
+                    device=DEVICE_TYPE,
+                    trust_remote_code=RAG_RERANKING_MODEL_TRUST_REMOTE_CODE,
+                )
+            except:
+                log.error("CrossEncoder error")
+                app.state.sentence_transformer_rf = None
+                app.state.config.ENABLE_RAG_HYBRID_SEARCH = False
     else:
         app.state.sentence_transformer_rf = None
 
@@ -593,7 +690,7 @@ async def update_query_settings(
     form_data: QuerySettingsForm, user=Depends(get_admin_user)
 ):
     app.state.config.RAG_TEMPLATE = (
-        form_data.template if form_data.template else RAG_TEMPLATE
+        form_data.template if form_data.template != "" else DEFAULT_RAG_TEMPLATE
     )
     app.state.config.TOP_K = form_data.k if form_data.k else 4
     app.state.config.RELEVANCE_THRESHOLD = form_data.r if form_data.r else 0.0
@@ -998,14 +1095,11 @@ def store_docs_in_vector_db(
 
     try:
         if overwrite:
-            for collection in CHROMA_CLIENT.list_collections():
-                if collection_name == collection.name:
-                    log.info(f"deleting existing collection {collection_name}")
-                    CHROMA_CLIENT.delete_collection(name=collection_name)
-
-        collection = CHROMA_CLIENT.create_collection(name=collection_name)
+            if VECTOR_DB_CLIENT.has_collection(collection_name=collection_name):
+                log.info(f"deleting existing collection {collection_name}")
+                VECTOR_DB_CLIENT.delete_collection(collection_name=collection_name)
 
-        embedding_func = get_embedding_function(
+        embedding_function = get_embedding_function(
             app.state.config.RAG_EMBEDDING_ENGINE,
             app.state.config.RAG_EMBEDDING_MODEL,
             app.state.sentence_transformer_ef,
@@ -1014,17 +1108,18 @@ def store_docs_in_vector_db(
             app.state.config.RAG_EMBEDDING_OPENAI_BATCH_SIZE,
         )
 
-        embedding_texts = list(map(lambda x: x.replace("\n", " "), texts))
-        embeddings = embedding_func(embedding_texts)
-
-        for batch in create_batches(
-            api=CHROMA_CLIENT,
-            ids=[str(uuid.uuid4()) for _ in texts],
-            metadatas=metadatas,
-            embeddings=embeddings,
-            documents=texts,
-        ):
-            collection.add(*batch)
+        VECTOR_DB_CLIENT.insert(
+            collection_name=collection_name,
+            items=[
+                {
+                    "id": str(uuid.uuid4()),
+                    "text": text,
+                    "vector": embedding_function(text.replace("\n", " ")),
+                    "metadata": metadatas[idx],
+                }
+                for idx, text in enumerate(texts)
+            ],
+        )
 
         return True
     except Exception as e:
@@ -1158,7 +1253,7 @@ def get_loader(filename: str, file_content_type: str, file_path: str):
         elif (
             file_content_type
             == "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
-            or file_ext in ["doc", "docx"]
+            or file_ext == "docx"
         ):
             loader = Docx2txtLoader(file_path)
         elif file_content_type in [
@@ -1396,7 +1491,7 @@ def scan_docs_dir(user=Depends(get_admin_user)):
 
 @app.post("/reset/db")
 def reset_vector_db(user=Depends(get_admin_user)):
-    CHROMA_CLIENT.reset()
+    VECTOR_DB_CLIENT.reset()
 
 
 @app.post("/reset/uploads")
@@ -1437,7 +1532,7 @@ def reset(user=Depends(get_admin_user)) -> bool:
             log.error("Failed to delete %s. Reason: %s" % (file_path, e))
 
     try:
-        CHROMA_CLIENT.reset()
+        VECTOR_DB_CLIENT.reset()
     except Exception as e:
         log.exception(e)
 

+ 121 - 83
backend/open_webui/apps/rag/utils.py

@@ -1,24 +1,68 @@
 import logging
 import os
+import uuid
 from typing import Optional, Union
 
 import requests
-from open_webui.apps.ollama.main import (
-    GenerateEmbeddingsForm,
-    generate_ollama_embeddings,
-)
-from open_webui.config import CHROMA_CLIENT
-from open_webui.env import SRC_LOG_LEVELS
+
 from huggingface_hub import snapshot_download
 from langchain.retrievers import ContextualCompressionRetriever, EnsembleRetriever
 from langchain_community.retrievers import BM25Retriever
 from langchain_core.documents import Document
+
+
+from open_webui.apps.ollama.main import (
+    GenerateEmbeddingsForm,
+    generate_ollama_embeddings,
+)
+from open_webui.apps.rag.vector.connector import VECTOR_DB_CLIENT
 from open_webui.utils.misc import get_last_user_message
 
+from open_webui.env import SRC_LOG_LEVELS
+
+
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["RAG"])
 
 
+from typing import Any
+
+from langchain_core.callbacks import CallbackManagerForRetrieverRun
+from langchain_core.retrievers import BaseRetriever
+
+
+class VectorSearchRetriever(BaseRetriever):
+    collection_name: Any
+    embedding_function: Any
+    top_k: int
+
+    def _get_relevant_documents(
+        self,
+        query: str,
+        *,
+        run_manager: CallbackManagerForRetrieverRun,
+    ) -> list[Document]:
+        result = VECTOR_DB_CLIENT.search(
+            collection_name=self.collection_name,
+            vectors=[self.embedding_function(query)],
+            limit=self.top_k,
+        )
+
+        ids = result.ids[0]
+        metadatas = result.metadatas[0]
+        documents = result.documents[0]
+
+        results = []
+        for idx in range(len(ids)):
+            results.append(
+                Document(
+                    metadata=metadatas[idx],
+                    page_content=documents[idx],
+                )
+            )
+        return results
+
+
 def query_doc(
     collection_name: str,
     query: str,
@@ -26,17 +70,18 @@ def query_doc(
     k: int,
 ):
     try:
-        collection = CHROMA_CLIENT.get_collection(name=collection_name)
-        query_embeddings = embedding_function(query)
-
-        result = collection.query(
-            query_embeddings=[query_embeddings],
-            n_results=k,
+        result = VECTOR_DB_CLIENT.search(
+            collection_name=collection_name,
+            vectors=[embedding_function(query)],
+            limit=k,
         )
 
+        print("result", result)
+
         log.info(f"query_doc:result {result}")
         return result
     except Exception as e:
+        print(e)
         raise e
 
 
@@ -47,27 +92,25 @@ def query_doc_with_hybrid_search(
     k: int,
     reranking_function,
     r: float,
-):
+) -> dict:
     try:
-        collection = CHROMA_CLIENT.get_collection(name=collection_name)
-        documents = collection.get()  # get all documents
+        result = VECTOR_DB_CLIENT.get(collection_name=collection_name)
 
         bm25_retriever = BM25Retriever.from_texts(
-            texts=documents.get("documents"),
-            metadatas=documents.get("metadatas"),
+            texts=result.documents[0],
+            metadatas=result.metadatas[0],
         )
         bm25_retriever.k = k
 
-        chroma_retriever = ChromaRetriever(
-            collection=collection,
+        vector_search_retriever = VectorSearchRetriever(
+            collection_name=collection_name,
             embedding_function=embedding_function,
-            top_n=k,
+            top_k=k,
         )
 
         ensemble_retriever = EnsembleRetriever(
-            retrievers=[bm25_retriever, chroma_retriever], weights=[0.5, 0.5]
+            retrievers=[bm25_retriever, vector_search_retriever], weights=[0.5, 0.5]
         )
-
         compressor = RerankCompressor(
             embedding_function=embedding_function,
             top_n=k,
@@ -92,7 +135,9 @@ def query_doc_with_hybrid_search(
         raise e
 
 
-def merge_and_sort_query_results(query_results, k, reverse=False):
+def merge_and_sort_query_results(
+    query_results: list[dict], k: int, reverse: bool = False
+) -> list[dict]:
     # Initialize lists to store combined data
     combined_distances = []
     combined_documents = []
@@ -138,7 +183,7 @@ def query_collection(
     query: str,
     embedding_function,
     k: int,
-):
+) -> dict:
     results = []
     for collection_name in collection_names:
         if collection_name:
@@ -149,9 +194,9 @@ def query_collection(
                     k=k,
                     embedding_function=embedding_function,
                 )
-                results.append(result)
-            except Exception:
-                pass
+                results.append(result.model_dump())
+            except Exception as e:
+                log.exception(f"Error when querying the collection: {e}")
         else:
             pass
 
@@ -165,8 +210,9 @@ def query_collection_with_hybrid_search(
     k: int,
     reranking_function,
     r: float,
-):
+) -> dict:
     results = []
+    error = False
     for collection_name in collection_names:
         try:
             result = query_doc_with_hybrid_search(
@@ -178,14 +224,39 @@ def query_collection_with_hybrid_search(
                 r=r,
             )
             results.append(result)
-        except Exception:
-            pass
+        except Exception as e:
+            log.exception(
+                "Error when querying the collection with " f"hybrid_search: {e}"
+            )
+            error = True
+
+    if error:
+        raise Exception(
+            "Hybrid search failed for all collections. Using Non hybrid search as fallback."
+        )
+
     return merge_and_sort_query_results(results, k=k, reverse=True)
 
 
 def rag_template(template: str, context: str, query: str):
-    template = template.replace("[context]", context)
-    template = template.replace("[query]", query)
+    count = template.count("[context]")
+    assert "[context]" in template, "RAG template does not contain '[context]'"
+
+    if "<context>" in context and "</context>" in context:
+        log.debug(
+            "WARNING: Potential prompt injection attack: the RAG "
+            "context contains '<context>' and '</context>'. This might be "
+            "nothing, or the user might be trying to hack something."
+        )
+
+    if "[query]" in context:
+        query_placeholder = f"[query-{str(uuid.uuid4())}]"
+        template = template.replace("[query]", query_placeholder)
+        template = template.replace("[context]", context)
+        template = template.replace(query_placeholder, query)
+    else:
+        template = template.replace("[context]", context)
+        template = template.replace("[query]", query)
     return template
 
 
@@ -262,19 +333,27 @@ def get_rag_context(
             continue
 
         try:
+            context = None
             if file["type"] == "text":
                 context = file["content"]
             else:
                 if hybrid_search:
-                    context = query_collection_with_hybrid_search(
-                        collection_names=collection_names,
-                        query=query,
-                        embedding_function=embedding_function,
-                        k=k,
-                        reranking_function=reranking_function,
-                        r=r,
-                    )
-                else:
+                    try:
+                        context = query_collection_with_hybrid_search(
+                            collection_names=collection_names,
+                            query=query,
+                            embedding_function=embedding_function,
+                            k=k,
+                            reranking_function=reranking_function,
+                            r=r,
+                        )
+                    except Exception as e:
+                        log.debug(
+                            "Error when using hybrid search, using"
+                            " non hybrid search as fallback."
+                        )
+
+                if (not hybrid_search) or (context is None):
                     context = query_collection(
                         collection_names=collection_names,
                         query=query,
@@ -283,7 +362,6 @@ def get_rag_context(
                     )
         except Exception as e:
             log.exception(e)
-            context = None
 
         if context:
             relevant_contexts.append({**context, "source": file})
@@ -391,51 +469,11 @@ def generate_openai_batch_embeddings(
         return None
 
 
-from typing import Any
-
-from langchain_core.callbacks import CallbackManagerForRetrieverRun
-from langchain_core.retrievers import BaseRetriever
-
-
-class ChromaRetriever(BaseRetriever):
-    collection: Any
-    embedding_function: Any
-    top_n: int
-
-    def _get_relevant_documents(
-        self,
-        query: str,
-        *,
-        run_manager: CallbackManagerForRetrieverRun,
-    ) -> list[Document]:
-        query_embeddings = self.embedding_function(query)
-
-        results = self.collection.query(
-            query_embeddings=[query_embeddings],
-            n_results=self.top_n,
-        )
-
-        ids = results["ids"][0]
-        metadatas = results["metadatas"][0]
-        documents = results["documents"][0]
-
-        results = []
-        for idx in range(len(ids)):
-            results.append(
-                Document(
-                    metadata=metadatas[idx],
-                    page_content=documents[idx],
-                )
-            )
-        return results
-
-
 import operator
 from typing import Optional, Sequence
 
 from langchain_core.callbacks import Callbacks
 from langchain_core.documents import BaseDocumentCompressor, Document
-from langchain_core.pydantic_v1 import Extra
 
 
 class RerankCompressor(BaseDocumentCompressor):
@@ -445,7 +483,7 @@ class RerankCompressor(BaseDocumentCompressor):
     r_score: float
 
     class Config:
-        extra = Extra.forbid
+        extra = "forbid"
         arbitrary_types_allowed = True
 
     def compress_documents(

+ 10 - 0
backend/open_webui/apps/rag/vector/connector.py

@@ -0,0 +1,10 @@
+from open_webui.apps.rag.vector.dbs.chroma import ChromaClient
+from open_webui.apps.rag.vector.dbs.milvus import MilvusClient
+
+
+from open_webui.config import VECTOR_DB
+
+if VECTOR_DB == "milvus":
+    VECTOR_DB_CLIENT = MilvusClient()
+else:
+    VECTOR_DB_CLIENT = ChromaClient()

+ 122 - 0
backend/open_webui/apps/rag/vector/dbs/chroma.py

@@ -0,0 +1,122 @@
+import chromadb
+from chromadb import Settings
+from chromadb.utils.batch_utils import create_batches
+
+from typing import Optional
+
+from open_webui.apps.rag.vector.main import VectorItem, SearchResult, GetResult
+from open_webui.config import (
+    CHROMA_DATA_PATH,
+    CHROMA_HTTP_HOST,
+    CHROMA_HTTP_PORT,
+    CHROMA_HTTP_HEADERS,
+    CHROMA_HTTP_SSL,
+    CHROMA_TENANT,
+    CHROMA_DATABASE,
+)
+
+
+class ChromaClient:
+    def __init__(self):
+        if CHROMA_HTTP_HOST != "":
+            self.client = chromadb.HttpClient(
+                host=CHROMA_HTTP_HOST,
+                port=CHROMA_HTTP_PORT,
+                headers=CHROMA_HTTP_HEADERS,
+                ssl=CHROMA_HTTP_SSL,
+                tenant=CHROMA_TENANT,
+                database=CHROMA_DATABASE,
+                settings=Settings(allow_reset=True, anonymized_telemetry=False),
+            )
+        else:
+            self.client = chromadb.PersistentClient(
+                path=CHROMA_DATA_PATH,
+                settings=Settings(allow_reset=True, anonymized_telemetry=False),
+                tenant=CHROMA_TENANT,
+                database=CHROMA_DATABASE,
+            )
+
+    def has_collection(self, collection_name: str) -> bool:
+        # Check if the collection exists based on the collection name.
+        collections = self.client.list_collections()
+        return collection_name in [collection.name for collection in collections]
+
+    def delete_collection(self, collection_name: str):
+        # Delete the collection based on the collection name.
+        return self.client.delete_collection(name=collection_name)
+
+    def search(
+        self, collection_name: str, vectors: list[list[float | int]], limit: int
+    ) -> Optional[SearchResult]:
+        # Search for the nearest neighbor items based on the vectors and return 'limit' number of results.
+        collection = self.client.get_collection(name=collection_name)
+        if collection:
+            result = collection.query(
+                query_embeddings=vectors,
+                n_results=limit,
+            )
+
+            return SearchResult(
+                **{
+                    "ids": result["ids"],
+                    "distances": result["distances"],
+                    "documents": result["documents"],
+                    "metadatas": result["metadatas"],
+                }
+            )
+        return None
+
+    def get(self, collection_name: str) -> Optional[GetResult]:
+        # Get all the items in the collection.
+        collection = self.client.get_collection(name=collection_name)
+        if collection:
+            result = collection.get()
+            return GetResult(
+                **{
+                    "ids": [result["ids"]],
+                    "documents": [result["documents"]],
+                    "metadatas": [result["metadatas"]],
+                }
+            )
+        return None
+
+    def insert(self, collection_name: str, items: list[VectorItem]):
+        # Insert the items into the collection, if the collection does not exist, it will be created.
+        collection = self.client.get_or_create_collection(name=collection_name)
+
+        ids = [item["id"] for item in items]
+        documents = [item["text"] for item in items]
+        embeddings = [item["vector"] for item in items]
+        metadatas = [item["metadata"] for item in items]
+
+        for batch in create_batches(
+            api=self.client,
+            documents=documents,
+            embeddings=embeddings,
+            ids=ids,
+            metadatas=metadatas,
+        ):
+            collection.add(*batch)
+
+    def upsert(self, collection_name: str, items: list[VectorItem]):
+        # Update the items in the collection, if the items are not present, insert them. If the collection does not exist, it will be created.
+        collection = self.client.get_or_create_collection(name=collection_name)
+
+        ids = [item["id"] for item in items]
+        documents = [item["text"] for item in items]
+        embeddings = [item["vector"] for item in items]
+        metadatas = [item["metadata"] for item in items]
+
+        collection.upsert(
+            ids=ids, documents=documents, embeddings=embeddings, metadatas=metadatas
+        )
+
+    def delete(self, collection_name: str, ids: list[str]):
+        # Delete the items from the collection based on the ids.
+        collection = self.client.get_collection(name=collection_name)
+        if collection:
+            collection.delete(ids=ids)
+
+    def reset(self):
+        # Resets the database. This will delete all collections and item entries.
+        return self.client.reset()

+ 205 - 0
backend/open_webui/apps/rag/vector/dbs/milvus.py

@@ -0,0 +1,205 @@
+from pymilvus import MilvusClient as Client
+from pymilvus import FieldSchema, DataType
+import json
+
+from typing import Optional
+
+from open_webui.apps.rag.vector.main import VectorItem, SearchResult, GetResult
+from open_webui.config import (
+    MILVUS_URI,
+)
+
+
+class MilvusClient:
+    def __init__(self):
+        self.collection_prefix = "open_webui"
+        self.client = Client(uri=MILVUS_URI)
+
+    def _result_to_get_result(self, result) -> GetResult:
+        print(result)
+
+        ids = []
+        documents = []
+        metadatas = []
+
+        for match in result:
+            _ids = []
+            _documents = []
+            _metadatas = []
+
+            for item in match:
+                _ids.append(item.get("id"))
+                _documents.append(item.get("data", {}).get("text"))
+                _metadatas.append(item.get("metadata"))
+
+            ids.append(_ids)
+            documents.append(_documents)
+            metadatas.append(_metadatas)
+
+        return GetResult(
+            **{
+                "ids": ids,
+                "documents": documents,
+                "metadatas": metadatas,
+            }
+        )
+
+    def _result_to_search_result(self, result) -> SearchResult:
+        print(result)
+
+        ids = []
+        distances = []
+        documents = []
+        metadatas = []
+
+        for match in result:
+            _ids = []
+            _distances = []
+            _documents = []
+            _metadatas = []
+
+            for item in match:
+                _ids.append(item.get("id"))
+                _distances.append(item.get("distance"))
+                _documents.append(item.get("entity", {}).get("data", {}).get("text"))
+                _metadatas.append(item.get("entity", {}).get("metadata"))
+
+            ids.append(_ids)
+            distances.append(_distances)
+            documents.append(_documents)
+            metadatas.append(_metadatas)
+
+        return SearchResult(
+            **{
+                "ids": ids,
+                "distances": distances,
+                "documents": documents,
+                "metadatas": metadatas,
+            }
+        )
+
+    def _create_collection(self, collection_name: str, dimension: int):
+        schema = self.client.create_schema(
+            auto_id=False,
+            enable_dynamic_field=True,
+        )
+        schema.add_field(
+            field_name="id",
+            datatype=DataType.VARCHAR,
+            is_primary=True,
+            max_length=65535,
+        )
+        schema.add_field(
+            field_name="vector",
+            datatype=DataType.FLOAT_VECTOR,
+            dim=dimension,
+            description="vector",
+        )
+        schema.add_field(field_name="data", datatype=DataType.JSON, description="data")
+        schema.add_field(
+            field_name="metadata", datatype=DataType.JSON, description="metadata"
+        )
+
+        index_params = self.client.prepare_index_params()
+        index_params.add_index(
+            field_name="vector", index_type="HNSW", metric_type="COSINE", params={}
+        )
+
+        self.client.create_collection(
+            collection_name=f"{self.collection_prefix}_{collection_name}",
+            schema=schema,
+            index_params=index_params,
+        )
+
+    def has_collection(self, collection_name: str) -> bool:
+        # Check if the collection exists based on the collection name.
+        return self.client.has_collection(
+            collection_name=f"{self.collection_prefix}_{collection_name}"
+        )
+
+    def delete_collection(self, collection_name: str):
+        # Delete the collection based on the collection name.
+        return self.client.drop_collection(
+            collection_name=f"{self.collection_prefix}_{collection_name}"
+        )
+
+    def search(
+        self, collection_name: str, vectors: list[list[float | int]], limit: int
+    ) -> Optional[SearchResult]:
+        # Search for the nearest neighbor items based on the vectors and return 'limit' number of results.
+        result = self.client.search(
+            collection_name=f"{self.collection_prefix}_{collection_name}",
+            data=vectors,
+            limit=limit,
+            output_fields=["data", "metadata"],
+        )
+
+        return self._result_to_search_result(result)
+
+    def get(self, collection_name: str) -> Optional[GetResult]:
+        # Get all the items in the collection.
+        result = self.client.query(
+            collection_name=f"{self.collection_prefix}_{collection_name}",
+            filter='id != ""',
+        )
+        return self._result_to_get_result([result])
+
+    def insert(self, collection_name: str, items: list[VectorItem]):
+        # Insert the items into the collection, if the collection does not exist, it will be created.
+        if not self.client.has_collection(
+            collection_name=f"{self.collection_prefix}_{collection_name}"
+        ):
+            self._create_collection(
+                collection_name=collection_name, dimension=len(items[0]["vector"])
+            )
+
+        return self.client.insert(
+            collection_name=f"{self.collection_prefix}_{collection_name}",
+            data=[
+                {
+                    "id": item["id"],
+                    "vector": item["vector"],
+                    "data": {"text": item["text"]},
+                    "metadata": item["metadata"],
+                }
+                for item in items
+            ],
+        )
+
+    def upsert(self, collection_name: str, items: list[VectorItem]):
+        # Update the items in the collection, if the items are not present, insert them. If the collection does not exist, it will be created.
+        if not self.client.has_collection(
+            collection_name=f"{self.collection_prefix}_{collection_name}"
+        ):
+            self._create_collection(
+                collection_name=collection_name, dimension=len(items[0]["vector"])
+            )
+
+        return self.client.upsert(
+            collection_name=f"{self.collection_prefix}_{collection_name}",
+            data=[
+                {
+                    "id": item["id"],
+                    "vector": item["vector"],
+                    "data": {"text": item["text"]},
+                    "metadata": item["metadata"],
+                }
+                for item in items
+            ],
+        )
+
+    def delete(self, collection_name: str, ids: list[str]):
+        # Delete the items from the collection based on the ids.
+
+        return self.client.delete(
+            collection_name=f"{self.collection_prefix}_{collection_name}",
+            ids=ids,
+        )
+
+    def reset(self):
+        # Resets the database. This will delete all collections and item entries.
+
+        collection_names = self.client.list_collections()
+        for collection_name in collection_names:
+            if collection_name.startswith(self.collection_prefix):
+                self.client.drop_collection(collection_name=collection_name)

+ 19 - 0
backend/open_webui/apps/rag/vector/main.py

@@ -0,0 +1,19 @@
+from pydantic import BaseModel
+from typing import Optional, List, Any
+
+
+class VectorItem(BaseModel):
+    id: str
+    text: str
+    vector: List[float | int]
+    metadata: Any
+
+
+class GetResult(BaseModel):
+    ids: Optional[List[List[str]]]
+    documents: Optional[List[List[str]]]
+    metadatas: Optional[List[List[Any]]]
+
+
+class SearchResult(GetResult):
+    distances: Optional[List[List[float | int]]]

+ 14 - 6
backend/open_webui/apps/socket/main.py

@@ -2,9 +2,16 @@ import asyncio
 
 import socketio
 from open_webui.apps.webui.models.users import Users
+from open_webui.env import ENABLE_WEBSOCKET_SUPPORT
 from open_webui.utils.utils import decode_token
 
-sio = socketio.AsyncServer(cors_allowed_origins=[], async_mode="asgi")
+sio = socketio.AsyncServer(
+    cors_allowed_origins=[],
+    async_mode="asgi",
+    transports=(["polling", "websocket"] if ENABLE_WEBSOCKET_SUPPORT else ["polling"]),
+    allow_upgrades=ENABLE_WEBSOCKET_SUPPORT,
+    always_connect=True,
+)
 app = socketio.ASGIApp(sio, socketio_path="/ws/socket.io")
 
 # Dictionary to maintain the user pool
@@ -32,7 +39,7 @@ async def connect(sid, environ, auth):
             else:
                 USER_POOL[user.id] = [sid]
 
-            print(f"user {user.name}({user.id}) connected with session ID {sid}")
+            # print(f"user {user.name}({user.id}) connected with session ID {sid}")
 
             await sio.emit("user-count", {"count": len(set(USER_POOL))})
             await sio.emit("usage", {"models": get_models_in_use()})
@@ -40,7 +47,7 @@ async def connect(sid, environ, auth):
 
 @sio.on("user-join")
 async def user_join(sid, data):
-    print("user-join", sid, data)
+    # print("user-join", sid, data)
 
     auth = data["auth"] if "auth" in data else None
     if not auth or "token" not in auth:
@@ -60,7 +67,7 @@ async def user_join(sid, data):
     else:
         USER_POOL[user.id] = [sid]
 
-    print(f"user {user.name}({user.id}) connected with session ID {sid}")
+    # print(f"user {user.name}({user.id}) connected with session ID {sid}")
 
     await sio.emit("user-count", {"count": len(set(USER_POOL))})
 
@@ -109,7 +116,7 @@ async def remove_after_timeout(sid, model_id):
     try:
         await asyncio.sleep(TIMEOUT_DURATION)
         if model_id in USAGE_POOL:
-            print(USAGE_POOL[model_id]["sids"])
+            # print(USAGE_POOL[model_id]["sids"])
             USAGE_POOL[model_id]["sids"].remove(sid)
             USAGE_POOL[model_id]["sids"] = list(set(USAGE_POOL[model_id]["sids"]))
 
@@ -136,7 +143,8 @@ async def disconnect(sid):
 
         await sio.emit("user-count", {"count": len(USER_POOL)})
     else:
-        print(f"Unknown session ID {sid} disconnected")
+        pass
+        # print(f"Unknown session ID {sid} disconnected")
 
 
 def get_event_emitter(request_info):

+ 2 - 2
backend/open_webui/apps/webui/routers/auths.py

@@ -190,8 +190,8 @@ async def signin(request: Request, response: Response, form_data: SigninForm):
 async def signup(request: Request, response: Response, form_data: SignupForm):
     if (
         not request.app.state.config.ENABLE_SIGNUP
-        and request.app.state.config.ENABLE_LOGIN_FORM
-        and WEBUI_AUTH
+        or not request.app.state.config.ENABLE_LOGIN_FORM
+        or not WEBUI_AUTH
     ):
         raise HTTPException(
             status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.ACCESS_PROHIBITED

+ 52 - 41
backend/open_webui/apps/webui/routers/memories.py

@@ -1,12 +1,13 @@
+from fastapi import APIRouter, Depends, HTTPException, Request
+from pydantic import BaseModel
 import logging
 from typing import Optional
 
 from open_webui.apps.webui.models.memories import Memories, MemoryModel
-from open_webui.config import CHROMA_CLIENT
-from open_webui.env import SRC_LOG_LEVELS
-from fastapi import APIRouter, Depends, HTTPException, Request
-from pydantic import BaseModel
+from open_webui.apps.rag.vector.connector import VECTOR_DB_CLIENT
 from open_webui.utils.utils import get_verified_user
+from open_webui.env import SRC_LOG_LEVELS
+
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])
@@ -49,14 +50,17 @@ async def add_memory(
     user=Depends(get_verified_user),
 ):
     memory = Memories.insert_new_memory(user.id, form_data.content)
-    memory_embedding = request.app.state.EMBEDDING_FUNCTION(memory.content)
-
-    collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
-    collection.upsert(
-        documents=[memory.content],
-        ids=[memory.id],
-        embeddings=[memory_embedding],
-        metadatas=[{"created_at": memory.created_at}],
+
+    VECTOR_DB_CLIENT.upsert(
+        collection_name=f"user-memory-{user.id}",
+        items=[
+            {
+                "id": memory.id,
+                "text": memory.content,
+                "vector": request.app.state.EMBEDDING_FUNCTION(memory.content),
+                "metadata": {"created_at": memory.created_at},
+            }
+        ],
     )
 
     return memory
@@ -76,12 +80,10 @@ class QueryMemoryForm(BaseModel):
 async def query_memory(
     request: Request, form_data: QueryMemoryForm, user=Depends(get_verified_user)
 ):
-    query_embedding = request.app.state.EMBEDDING_FUNCTION(form_data.content)
-    collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
-
-    results = collection.query(
-        query_embeddings=[query_embedding],
-        n_results=form_data.k,  # how many results to return
+    results = VECTOR_DB_CLIENT.search(
+        collection_name=f"user-memory-{user.id}",
+        vectors=[request.app.state.EMBEDDING_FUNCTION(form_data.content)],
+        limit=form_data.k,
     )
 
     return results
@@ -94,17 +96,25 @@ async def query_memory(
 async def reset_memory_from_vector_db(
     request: Request, user=Depends(get_verified_user)
 ):
-    CHROMA_CLIENT.delete_collection(f"user-memory-{user.id}")
-    collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
+    VECTOR_DB_CLIENT.delete_collection(f"user-memory-{user.id}")
 
     memories = Memories.get_memories_by_user_id(user.id)
-    for memory in memories:
-        memory_embedding = request.app.state.EMBEDDING_FUNCTION(memory.content)
-        collection.upsert(
-            documents=[memory.content],
-            ids=[memory.id],
-            embeddings=[memory_embedding],
-        )
+    VECTOR_DB_CLIENT.upsert(
+        collection_name=f"user-memory-{user.id}",
+        items=[
+            {
+                "id": memory.id,
+                "text": memory.content,
+                "vector": request.app.state.EMBEDDING_FUNCTION(memory.content),
+                "metadata": {
+                    "created_at": memory.created_at,
+                    "updated_at": memory.updated_at,
+                },
+            }
+            for memory in memories
+        ],
+    )
+
     return True
 
 
@@ -119,7 +129,7 @@ async def delete_memory_by_user_id(user=Depends(get_verified_user)):
 
     if result:
         try:
-            CHROMA_CLIENT.delete_collection(f"user-memory-{user.id}")
+            VECTOR_DB_CLIENT.delete_collection(f"user-memory-{user.id}")
         except Exception as e:
             log.error(e)
         return True
@@ -144,16 +154,18 @@ async def update_memory_by_id(
         raise HTTPException(status_code=404, detail="Memory not found")
 
     if form_data.content is not None:
-        memory_embedding = request.app.state.EMBEDDING_FUNCTION(form_data.content)
-        collection = CHROMA_CLIENT.get_or_create_collection(
-            name=f"user-memory-{user.id}"
-        )
-        collection.upsert(
-            documents=[form_data.content],
-            ids=[memory.id],
-            embeddings=[memory_embedding],
-            metadatas=[
-                {"created_at": memory.created_at, "updated_at": memory.updated_at}
+        VECTOR_DB_CLIENT.upsert(
+            collection_name=f"user-memory-{user.id}",
+            items=[
+                {
+                    "id": memory.id,
+                    "text": memory.content,
+                    "vector": request.app.state.EMBEDDING_FUNCTION(memory.content),
+                    "metadata": {
+                        "created_at": memory.created_at,
+                        "updated_at": memory.updated_at,
+                    },
+                }
             ],
         )
 
@@ -170,10 +182,9 @@ async def delete_memory_by_id(memory_id: str, user=Depends(get_verified_user)):
     result = Memories.delete_memory_by_id_and_user_id(memory_id, user.id)
 
     if result:
-        collection = CHROMA_CLIENT.get_or_create_collection(
-            name=f"user-memory-{user.id}"
+        VECTOR_DB_CLIENT.delete(
+            collection_name=f"user-memory-{user.id}", ids=[memory_id]
         )
-        collection.delete(ids=[memory_id])
         return True
 
     return False

+ 12 - 20
backend/open_webui/apps/webui/routers/models.py

@@ -18,8 +18,18 @@ router = APIRouter()
 
 
 @router.get("/", response_model=list[ModelResponse])
-async def get_models(user=Depends(get_verified_user)):
-    return Models.get_all_models()
+async def get_models(id: Optional[str] = None, user=Depends(get_verified_user)):
+    if id:
+        model = Models.get_model_by_id(id)
+        if model:
+            return [model]
+        else:
+            raise HTTPException(
+                status_code=status.HTTP_401_UNAUTHORIZED,
+                detail=ERROR_MESSAGES.NOT_FOUND,
+            )
+    else:
+        return Models.get_all_models()
 
 
 ############################
@@ -50,24 +60,6 @@ async def add_new_model(
             )
 
 
-############################
-# GetModelById
-############################
-
-
-@router.get("/", response_model=Optional[ModelModel])
-async def get_model_by_id(id: str, user=Depends(get_verified_user)):
-    model = Models.get_model_by_id(id)
-
-    if model:
-        return model
-    else:
-        raise HTTPException(
-            status_code=status.HTTP_401_UNAUTHORIZED,
-            detail=ERROR_MESSAGES.NOT_FOUND,
-        )
-
-
 ############################
 # UpdateModelById
 ############################

+ 22 - 3
backend/open_webui/apps/webui/utils.py

@@ -4,7 +4,7 @@ import subprocess
 import sys
 from importlib import util
 import types
-
+import tempfile
 
 from open_webui.apps.webui.models.functions import Functions
 from open_webui.apps.webui.models.tools import Tools
@@ -84,7 +84,15 @@ def load_toolkit_module_by_id(toolkit_id, content=None):
     module = types.ModuleType(module_name)
     sys.modules[module_name] = module
 
+    # Create a temporary file and use it to define `__file__` so
+    # that it works as expected from the module's perspective.
+    temp_file = tempfile.NamedTemporaryFile(delete=False)
+
     try:
+        with open(temp_file.name, "w", encoding="utf-8") as f:
+            f.write(content)
+        module.__dict__["__file__"] = temp_file.name
+
         # Executing the modified content in the created module's namespace
         exec(content, module.__dict__)
         frontmatter = extract_frontmatter(content)
@@ -96,9 +104,11 @@ def load_toolkit_module_by_id(toolkit_id, content=None):
         else:
             raise Exception("No Tools class found in the module")
     except Exception as e:
-        print(f"Error loading module: {toolkit_id}")
+        print(f"Error loading module: {toolkit_id}: {e}")
         del sys.modules[module_name]  # Clean up
         raise e
+    finally:
+        os.unlink(temp_file.name)
 
 
 def load_function_module_by_id(function_id, content=None):
@@ -118,7 +128,14 @@ def load_function_module_by_id(function_id, content=None):
     module = types.ModuleType(module_name)
     sys.modules[module_name] = module
 
+    # Create a temporary file and use it to define `__file__` so
+    # that it works as expected from the module's perspective.
+    temp_file = tempfile.NamedTemporaryFile(delete=False)
     try:
+        with open(temp_file.name, "w", encoding="utf-8") as f:
+            f.write(content)
+        module.__dict__["__file__"] = temp_file.name
+
         # Execute the modified content in the created module's namespace
         exec(content, module.__dict__)
         frontmatter = extract_frontmatter(content)
@@ -134,11 +151,13 @@ def load_function_module_by_id(function_id, content=None):
         else:
             raise Exception("No Function class found in the module")
     except Exception as e:
-        print(f"Error loading module: {function_id}")
+        print(f"Error loading module: {function_id}: {e}")
         del sys.modules[module_name]  # Cleanup by removing the module in case of error
 
         Functions.update_function_by_id(function_id, {"is_active": False})
         raise e
+    finally:
+        os.unlink(temp_file.name)
 
 
 def install_frontmatter_requirements(requirements):

+ 87 - 91
backend/open_webui/config.py

@@ -11,7 +11,6 @@ import chromadb
 import requests
 import yaml
 from open_webui.apps.webui.internal.db import Base, get_db
-from chromadb import Settings
 from open_webui.env import (
     OPEN_WEBUI_DIR,
     DATA_DIR,
@@ -540,40 +539,6 @@ Path(TOOLS_DIR).mkdir(parents=True, exist_ok=True)
 FUNCTIONS_DIR = os.getenv("FUNCTIONS_DIR", f"{DATA_DIR}/functions")
 Path(FUNCTIONS_DIR).mkdir(parents=True, exist_ok=True)
 
-
-####################################
-# LITELLM_CONFIG
-####################################
-
-
-def create_config_file(file_path):
-    directory = os.path.dirname(file_path)
-
-    # Check if directory exists, if not, create it
-    if not os.path.exists(directory):
-        os.makedirs(directory)
-
-    # Data to write into the YAML file
-    config_data = {
-        "general_settings": {},
-        "litellm_settings": {},
-        "model_list": [],
-        "router_settings": {},
-    }
-
-    # Write data to YAML file
-    with open(file_path, "w") as file:
-        yaml.dump(config_data, file)
-
-
-LITELLM_CONFIG_PATH = f"{DATA_DIR}/litellm/config.yaml"
-
-# if not os.path.exists(LITELLM_CONFIG_PATH):
-#     log.info("Config file doesn't exist. Creating...")
-#     create_config_file(LITELLM_CONFIG_PATH)
-#     log.info("Config file created successfully.")
-
-
 ####################################
 # OLLAMA_BASE_URL
 ####################################
@@ -923,25 +888,12 @@ TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = PersistentConfig(
 
 
 ####################################
-# RAG document content extraction
+# Vector Database
 ####################################
 
-CONTENT_EXTRACTION_ENGINE = PersistentConfig(
-    "CONTENT_EXTRACTION_ENGINE",
-    "rag.CONTENT_EXTRACTION_ENGINE",
-    os.environ.get("CONTENT_EXTRACTION_ENGINE", "").lower(),
-)
-
-TIKA_SERVER_URL = PersistentConfig(
-    "TIKA_SERVER_URL",
-    "rag.tika_server_url",
-    os.getenv("TIKA_SERVER_URL", "http://tika:9998"),  # Default for sidecar deployment
-)
-
-####################################
-# RAG
-####################################
+VECTOR_DB = os.environ.get("VECTOR_DB", "chroma")
 
+# Chroma
 CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
 CHROMA_TENANT = os.environ.get("CHROMA_TENANT", chromadb.DEFAULT_TENANT)
 CHROMA_DATABASE = os.environ.get("CHROMA_DATABASE", chromadb.DEFAULT_DATABASE)
@@ -958,8 +910,29 @@ else:
 CHROMA_HTTP_SSL = os.environ.get("CHROMA_HTTP_SSL", "false").lower() == "true"
 # this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (sentence-transformers/all-MiniLM-L6-v2)
 
+# Milvus
+
+MILVUS_URI = os.environ.get("MILVUS_URI", f"{DATA_DIR}/vector_db/milvus.db")
+
+####################################
+# RAG
+####################################
+
+# RAG Content Extraction
+CONTENT_EXTRACTION_ENGINE = PersistentConfig(
+    "CONTENT_EXTRACTION_ENGINE",
+    "rag.CONTENT_EXTRACTION_ENGINE",
+    os.environ.get("CONTENT_EXTRACTION_ENGINE", "").lower(),
+)
+
+TIKA_SERVER_URL = PersistentConfig(
+    "TIKA_SERVER_URL",
+    "rag.tika_server_url",
+    os.getenv("TIKA_SERVER_URL", "http://tika:9998"),  # Default for sidecar deployment
+)
+
 RAG_TOP_K = PersistentConfig(
-    "RAG_TOP_K", "rag.top_k", int(os.environ.get("RAG_TOP_K", "5"))
+    "RAG_TOP_K", "rag.top_k", int(os.environ.get("RAG_TOP_K", "3"))
 )
 RAG_RELEVANCE_THRESHOLD = PersistentConfig(
     "RAG_RELEVANCE_THRESHOLD",
@@ -1048,36 +1021,8 @@ RAG_RERANKING_MODEL_TRUST_REMOTE_CODE = (
     os.environ.get("RAG_RERANKING_MODEL_TRUST_REMOTE_CODE", "").lower() == "true"
 )
 
-
-if CHROMA_HTTP_HOST != "":
-    CHROMA_CLIENT = chromadb.HttpClient(
-        host=CHROMA_HTTP_HOST,
-        port=CHROMA_HTTP_PORT,
-        headers=CHROMA_HTTP_HEADERS,
-        ssl=CHROMA_HTTP_SSL,
-        tenant=CHROMA_TENANT,
-        database=CHROMA_DATABASE,
-        settings=Settings(allow_reset=True, anonymized_telemetry=False),
-    )
-else:
-    CHROMA_CLIENT = chromadb.PersistentClient(
-        path=CHROMA_DATA_PATH,
-        settings=Settings(allow_reset=True, anonymized_telemetry=False),
-        tenant=CHROMA_TENANT,
-        database=CHROMA_DATABASE,
-    )
-
-
-# device type embedding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
-USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false")
-
-if USE_CUDA.lower() == "true":
-    DEVICE_TYPE = "cuda"
-else:
-    DEVICE_TYPE = "cpu"
-
 CHUNK_SIZE = PersistentConfig(
-    "CHUNK_SIZE", "rag.chunk_size", int(os.environ.get("CHUNK_SIZE", "1500"))
+    "CHUNK_SIZE", "rag.chunk_size", int(os.environ.get("CHUNK_SIZE", "1000"))
 )
 CHUNK_OVERLAP = PersistentConfig(
     "CHUNK_OVERLAP",
@@ -1085,19 +1030,25 @@ CHUNK_OVERLAP = PersistentConfig(
     int(os.environ.get("CHUNK_OVERLAP", "100")),
 )
 
-DEFAULT_RAG_TEMPLATE = """Use the following context as your learned knowledge, inside <context></context> XML tags.
+DEFAULT_RAG_TEMPLATE = """You are given a user query, some textual context and rules, all inside xml tags. You have to answer the query based on the context while respecting the rules.
+
 <context>
-    [context]
+[context]
 </context>
 
-When answer to user:
-- If you don't know, just say that you don't know.
-- If you don't know when you are not sure, ask for clarification.
-Avoid mentioning that you obtained the information from the context.
-And answer according to the language of the user's question.
-
-Given the context information, answer the query.
-Query: [query]"""
+<rules>
+- If you don't know, just say so.
+- If you are not sure, ask for clarification.
+- Answer in the same language as the user query.
+- If the context appears unreadable or of poor quality, tell the user then answer as best as you can.
+- If the answer is not in the context but you think you know the answer, explain that to the user then answer with your own knowledge.
+- Answer directly and without using xml tags.
+</rules>
+
+<user_query>
+[query]
+</user_query>
+"""
 
 RAG_TEMPLATE = PersistentConfig(
     "RAG_TEMPLATE",
@@ -1267,6 +1218,37 @@ AUTOMATIC1111_API_AUTH = PersistentConfig(
     os.getenv("AUTOMATIC1111_API_AUTH", ""),
 )
 
+AUTOMATIC1111_CFG_SCALE = PersistentConfig(
+    "AUTOMATIC1111_CFG_SCALE",
+    "image_generation.automatic1111.cfg_scale",
+    (
+        float(os.environ.get("AUTOMATIC1111_CFG_SCALE"))
+        if os.environ.get("AUTOMATIC1111_CFG_SCALE")
+        else None
+    ),
+)
+
+
+AUTOMATIC1111_SAMPLER = PersistentConfig(
+    "AUTOMATIC1111_SAMPLERE",
+    "image_generation.automatic1111.sampler",
+    (
+        os.environ.get("AUTOMATIC1111_SAMPLER")
+        if os.environ.get("AUTOMATIC1111_SAMPLER")
+        else None
+    ),
+)
+
+AUTOMATIC1111_SCHEDULER = PersistentConfig(
+    "AUTOMATIC1111_SCHEDULER",
+    "image_generation.automatic1111.scheduler",
+    (
+        os.environ.get("AUTOMATIC1111_SCHEDULER")
+        if os.environ.get("AUTOMATIC1111_SCHEDULER")
+        else None
+    ),
+)
+
 COMFYUI_BASE_URL = PersistentConfig(
     "COMFYUI_BASE_URL",
     "image_generation.comfyui.base_url",
@@ -1490,3 +1472,17 @@ AUDIO_TTS_SPLIT_ON = PersistentConfig(
     "audio.tts.split_on",
     os.getenv("AUDIO_TTS_SPLIT_ON", "punctuation"),
 )
+
+AUDIO_TTS_AZURE_SPEECH_REGION = PersistentConfig(
+    "AUDIO_TTS_AZURE_SPEECH_REGION",
+    "audio.tts.azure.speech_region",
+    os.getenv("AUDIO_TTS_AZURE_SPEECH_REGION", "eastus"),
+)
+
+AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT = PersistentConfig(
+    "AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT",
+    "audio.tts.azure.speech_output_format",
+    os.getenv(
+        "AUDIO_TTS_AZURE_SPEECH_OUTPUT_FORMAT", "audio-24khz-160kbitrate-mono-mp3"
+    ),
+)

+ 29 - 0
backend/open_webui/env.py

@@ -31,6 +31,28 @@ try:
 except ImportError:
     print("dotenv not installed, skipping...")
 
+DOCKER = os.environ.get("DOCKER", "False").lower() == "true"
+
+# device type embedding models - "cpu" (default), "cuda" (nvidia gpu required) or "mps" (apple silicon) - choosing this right can lead to better performance
+USE_CUDA = os.environ.get("USE_CUDA_DOCKER", "false")
+
+if USE_CUDA.lower() == "true":
+    try:
+        import torch
+
+        assert torch.cuda.is_available(), "CUDA not available"
+        DEVICE_TYPE = "cuda"
+    except Exception as e:
+        cuda_error = (
+            "Error when testing CUDA but USE_CUDA_DOCKER is true. "
+            f"Resetting USE_CUDA_DOCKER to false: {e}"
+        )
+        os.environ["USE_CUDA_DOCKER"] = "false"
+        USE_CUDA = "false"
+        DEVICE_TYPE = "cpu"
+else:
+    DEVICE_TYPE = "cpu"
+
 
 ####################################
 # LOGGING
@@ -47,6 +69,9 @@ else:
 log = logging.getLogger(__name__)
 log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
 
+if "cuda_error" in locals():
+    log.exception(cuda_error)
+
 log_sources = [
     "AUDIO",
     "COMFYUI",
@@ -273,3 +298,7 @@ WEBUI_SESSION_COOKIE_SECURE = os.environ.get(
 
 if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
     raise ValueError(ERROR_MESSAGES.ENV_VAR_NOT_FOUND)
+
+ENABLE_WEBSOCKET_SUPPORT = (
+    os.environ.get("ENABLE_WEBSOCKET_SUPPORT", "True").lower() == "true"
+)

+ 60 - 14
backend/open_webui/main.py

@@ -109,6 +109,7 @@ from starlette.middleware.base import BaseHTTPMiddleware
 from starlette.middleware.sessions import SessionMiddleware
 from starlette.responses import RedirectResponse, Response, StreamingResponse
 
+from open_webui.utils.security_headers import SecurityHeadersMiddleware
 
 from open_webui.utils.misc import (
     add_or_update_system_message,
@@ -586,8 +587,17 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware):
         if len(contexts) > 0:
             context_string = "/n".join(contexts).strip()
             prompt = get_last_user_message(body["messages"])
+
             if prompt is None:
                 raise Exception("No user message found")
+            if (
+                rag_app.state.config.RELEVANCE_THRESHOLD == 0
+                and context_string.strip() == ""
+            ):
+                log.debug(
+                    f"With a 0 relevancy threshold for RAG, the context cannot be empty"
+                )
+
             # Workaround for Ollama 2.0+ system prompt issue
             # TODO: replace with add_or_update_system_message
             if model["owned_by"] == "ollama":
@@ -780,6 +790,8 @@ app.add_middleware(
     allow_headers=["*"],
 )
 
+app.add_middleware(SecurityHeadersMiddleware)
+
 
 @app.middleware("http")
 async def commit_session_after_request(request: Request, call_next):
@@ -812,6 +824,24 @@ async def update_embedding_function(request: Request, call_next):
     return response
 
 
+@app.middleware("http")
+async def inspect_websocket(request: Request, call_next):
+    if (
+        "/ws/socket.io" in request.url.path
+        and request.query_params.get("transport") == "websocket"
+    ):
+        upgrade = (request.headers.get("Upgrade") or "").lower()
+        connection = (request.headers.get("Connection") or "").lower().split(",")
+        # Check that there's the correct headers for an upgrade, else reject the connection
+        # This is to work around this upstream issue: https://github.com/miguelgrinberg/python-engineio/issues/367
+        if upgrade != "websocket" or "upgrade" not in connection:
+            return JSONResponse(
+                status_code=status.HTTP_400_BAD_REQUEST,
+                content={"detail": "Invalid WebSocket upgrade request"},
+            )
+    return await call_next(request)
+
+
 app.mount("/ws", socket_app)
 
 app.mount("/ollama", ollama_app)
@@ -1368,9 +1398,9 @@ async def generate_title(form_data: dict, user=Depends(get_verified_user)):
 
     # Check if the user has a custom task model
     # If the user has a custom task model, use that model
-    model_id = get_task_model_id(model_id)
+    task_model_id = get_task_model_id(model_id)
 
-    print(model_id)
+    print(task_model_id)
 
     if app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE != "":
         template = app.state.config.TITLE_GENERATION_PROMPT_TEMPLATE
@@ -1397,10 +1427,16 @@ Prompt: {{prompt:middletruncate:8000}}"""
     )
 
     payload = {
-        "model": model_id,
+        "model": task_model_id,
         "messages": [{"role": "user", "content": content}],
         "stream": False,
-        "max_tokens": 50,
+        **(
+            {"max_tokens": 50}
+            if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
+            else {
+                "max_completion_tokens": 50,
+            }
+        ),
         "chat_id": form_data.get("chat_id", None),
         "metadata": {"task": str(TASKS.TITLE_GENERATION)},
     }
@@ -1445,9 +1481,8 @@ async def generate_search_query(form_data: dict, user=Depends(get_verified_user)
 
     # Check if the user has a custom task model
     # If the user has a custom task model, use that model
-    model_id = get_task_model_id(model_id)
-
-    print(model_id)
+    task_model_id = get_task_model_id(model_id)
+    print(task_model_id)
 
     if app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE != "":
         template = app.state.config.SEARCH_QUERY_GENERATION_PROMPT_TEMPLATE
@@ -1469,10 +1504,16 @@ Search Query:"""
     print("content", content)
 
     payload = {
-        "model": model_id,
+        "model": task_model_id,
         "messages": [{"role": "user", "content": content}],
         "stream": False,
-        "max_tokens": 30,
+        **(
+            {"max_tokens": 30}
+            if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
+            else {
+                "max_completion_tokens": 30,
+            }
+        ),
         "metadata": {"task": str(TASKS.QUERY_GENERATION)},
     }
 
@@ -1511,9 +1552,8 @@ async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
 
     # Check if the user has a custom task model
     # If the user has a custom task model, use that model
-    model_id = get_task_model_id(model_id)
-
-    print(model_id)
+    task_model_id = get_task_model_id(model_id)
+    print(task_model_id)
 
     template = '''
 Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
@@ -1531,10 +1571,16 @@ Message: """{{prompt}}"""
     )
 
     payload = {
-        "model": model_id,
+        "model": task_model_id,
         "messages": [{"role": "user", "content": content}],
         "stream": False,
-        "max_tokens": 4,
+        **(
+            {"max_tokens": 4}
+            if app.state.MODELS[task_model_id]["owned_by"] == "ollama"
+            else {
+                "max_completion_tokens": 4,
+            }
+        ),
         "chat_id": form_data.get("chat_id", None),
         "metadata": {"task": str(TASKS.EMOJI_GENERATION)},
     }

+ 2 - 2
backend/open_webui/utils/payload.py

@@ -44,9 +44,9 @@ def apply_model_params_to_body(
 def apply_model_params_to_body_openai(params: dict, form_data: dict) -> dict:
     mappings = {
         "temperature": float,
-        "top_p": int,
+        "top_p": float,
         "max_tokens": int,
-        "frequency_penalty": int,
+        "frequency_penalty": float,
         "seed": lambda x: x,
         "stop": lambda x: [bytes(s, "utf-8").decode("unicode_escape") for s in x],
     }

+ 115 - 0
backend/open_webui/utils/security_headers.py

@@ -0,0 +1,115 @@
+import re
+import os
+
+from fastapi import Request
+from starlette.middleware.base import BaseHTTPMiddleware
+from typing import Dict
+
+
+class SecurityHeadersMiddleware(BaseHTTPMiddleware):
+    async def dispatch(self, request: Request, call_next):
+        response = await call_next(request)
+        response.headers.update(set_security_headers())
+        return response
+
+
+def set_security_headers() -> Dict[str, str]:
+    """
+    Sets security headers based on environment variables.
+
+    This function reads specific environment variables and uses their values
+    to set corresponding security headers. The headers that can be set are:
+    - cache-control
+    - strict-transport-security
+    - referrer-policy
+    - x-content-type-options
+    - x-download-options
+    - x-frame-options
+    - x-permitted-cross-domain-policies
+
+    Each environment variable is associated with a specific setter function
+    that constructs the header. If the environment variable is set, the
+    corresponding header is added to the options dictionary.
+
+    Returns:
+        dict: A dictionary containing the security headers and their values.
+    """
+    options = {}
+    header_setters = {
+        "CACHE_CONTROL": set_cache_control,
+        "HSTS": set_hsts,
+        "REFERRER_POLICY": set_referrer,
+        "XCONTENT_TYPE": set_xcontent_type,
+        "XDOWNLOAD_OPTIONS": set_xdownload_options,
+        "XFRAME_OPTIONS": set_xframe,
+        "XPERMITTED_CROSS_DOMAIN_POLICIES": set_xpermitted_cross_domain_policies,
+    }
+
+    for env_var, setter in header_setters.items():
+        value = os.environ.get(env_var, None)
+        if value:
+            header = setter(value)
+            if header:
+                options.update(header)
+
+    return options
+
+
+# Set HTTP Strict Transport Security(HSTS) response header
+def set_hsts(value: str):
+    pattern = r"^max-age=(\d+)(;includeSubDomains)?(;preload)?$"
+    match = re.match(pattern, value, re.IGNORECASE)
+    if not match:
+        return "max-age=31536000;includeSubDomains"
+    return {"Strict-Transport-Security": value}
+
+
+# Set X-Frame-Options response header
+def set_xframe(value: str):
+    pattern = r"^(DENY|SAMEORIGIN)$"
+    match = re.match(pattern, value, re.IGNORECASE)
+    if not match:
+        value = "DENY"
+    return {"X-Frame-Options": value}
+
+
+# Set Referrer-Policy response header
+def set_referrer(value: str):
+    pattern = r"^(no-referrer|no-referrer-when-downgrade|origin|origin-when-cross-origin|same-origin|strict-origin|strict-origin-when-cross-origin|unsafe-url)$"
+    match = re.match(pattern, value, re.IGNORECASE)
+    if not match:
+        value = "no-referrer"
+    return {"Referrer-Policy": value}
+
+
+# Set Cache-Control response header
+def set_cache_control(value: str):
+    pattern = r"^(public|private|no-cache|no-store|must-revalidate|proxy-revalidate|max-age=\d+|s-maxage=\d+|no-transform|immutable)(,\s*(public|private|no-cache|no-store|must-revalidate|proxy-revalidate|max-age=\d+|s-maxage=\d+|no-transform|immutable))*$"
+    match = re.match(pattern, value, re.IGNORECASE)
+    if not match:
+        value = "no-store, max-age=0"
+
+    return {"Cache-Control": value}
+
+
+# Set X-Download-Options response header
+def set_xdownload_options(value: str):
+    if value != "noopen":
+        value = "noopen"
+    return {"X-Download-Options": value}
+
+
+# Set X-Content-Type-Options response header
+def set_xcontent_type(value: str):
+    if value != "nosniff":
+        value = "nosniff"
+    return {"X-Content-Type-Options": value}
+
+
+# Set X-Permitted-Cross-Domain-Policies response header
+def set_xpermitted_cross_domain_policies(value: str):
+    pattern = r"^(none|master-only|by-content-type|by-ftp-filename)$"
+    match = re.match(pattern, value, re.IGNORECASE)
+    if not match:
+        value = "none"
+    return {"X-Permitted-Cross-Domain-Policies": value}

+ 5 - 0
backend/requirements.txt

@@ -40,7 +40,12 @@ langchain-chroma==0.1.2
 
 fake-useragent==1.5.1
 chromadb==0.5.5
+pymilvus==2.4.6
+
 sentence-transformers==3.0.1
+colbert-ai==0.2.21
+einops==0.8.0
+
 pypdf==4.3.1
 docx2txt==0.8
 python-pptx==1.0.0

+ 8 - 0
kubernetes/manifest/base/kustomization.yaml

@@ -0,0 +1,8 @@
+resources:
+  - open-webui.yaml
+  - ollama-service.yaml
+  - ollama-statefulset.yaml
+  - webui-deployment.yaml
+  - webui-service.yaml
+  - webui-ingress.yaml
+  - webui-pvc.yaml

+ 8 - 0
kubernetes/manifest/gpu/kustomization.yaml

@@ -0,0 +1,8 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+  - ../base
+
+patches:
+- path: ollama-statefulset-gpu.yaml

+ 0 - 0
kubernetes/manifest/patches/ollama-statefulset-gpu.yaml → kubernetes/manifest/gpu/ollama-statefulset-gpu.yaml


+ 0 - 13
kubernetes/manifest/kustomization.yaml

@@ -1,13 +0,0 @@
-resources:
-- base/open-webui.yaml
-- base/ollama-service.yaml
-- base/ollama-statefulset.yaml
-- base/webui-deployment.yaml
-- base/webui-service.yaml
-- base/webui-ingress.yaml
-- base/webui-pvc.yaml
-
-apiVersion: kustomize.config.k8s.io/v1beta1
-kind: Kustomization
-patches:
-- path: patches/ollama-statefulset-gpu.yaml

+ 104 - 49
package-lock.json

@@ -1,18 +1,19 @@
 {
 	"name": "open-webui",
-	"version": "0.3.21",
+	"version": "0.3.22",
 	"lockfileVersion": 3,
 	"requires": true,
 	"packages": {
 		"": {
 			"name": "open-webui",
-			"version": "0.3.21",
+			"version": "0.3.22",
 			"dependencies": {
 				"@codemirror/lang-javascript": "^6.2.2",
 				"@codemirror/lang-python": "^6.1.6",
 				"@codemirror/theme-one-dark": "^6.1.2",
 				"@pyscript/core": "^0.4.32",
 				"@sveltejs/adapter-node": "^2.0.0",
+				"@xyflow/svelte": "^0.1.19",
 				"async": "^3.2.5",
 				"bits-ui": "^0.19.7",
 				"codemirror": "^6.0.1",
@@ -45,7 +46,6 @@
 				"@sveltejs/kit": "^2.5.20",
 				"@sveltejs/vite-plugin-svelte": "^3.1.1",
 				"@tailwindcss/typography": "^0.5.13",
-				"@types/bun": "latest",
 				"@typescript-eslint/eslint-plugin": "^6.17.0",
 				"@typescript-eslint/parser": "^6.17.0",
 				"autoprefixer": "^10.4.16",
@@ -1368,6 +1368,14 @@
 			"resolved": "https://registry.npmjs.org/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz",
 			"integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA=="
 		},
+		"node_modules/@svelte-put/shortcut": {
+			"version": "3.1.1",
+			"resolved": "https://registry.npmjs.org/@svelte-put/shortcut/-/shortcut-3.1.1.tgz",
+			"integrity": "sha512-2L5EYTZXiaKvbEelVkg5znxqvfZGZai3m97+cAiUBhLZwXnGtviTDpHxOoZBsqz41szlfRMcamW/8o0+fbW3ZQ==",
+			"peerDependencies": {
+				"svelte": "^3.55.0 || ^4.0.0 || ^5.0.0"
+			}
+		},
 		"node_modules/@sveltejs/adapter-auto": {
 			"version": "3.2.2",
 			"resolved": "https://registry.npmjs.org/@sveltejs/adapter-auto/-/adapter-auto-3.2.2.tgz",
@@ -1494,20 +1502,32 @@
 				"tailwindcss": ">=3.0.0 || insiders"
 			}
 		},
-		"node_modules/@types/bun": {
-			"version": "1.0.10",
-			"resolved": "https://registry.npmjs.org/@types/bun/-/bun-1.0.10.tgz",
-			"integrity": "sha512-Jaz6YYAdm1u3NVlgSyEK+qGmrlLQ20sbWeEoXD64b9w6z/YKYNWlfaphu+xF2Kiy5Tpykm5Q9jIquLegwXx4ng==",
-			"dev": true,
-			"dependencies": {
-				"bun-types": "1.0.33"
-			}
-		},
 		"node_modules/@types/cookie": {
 			"version": "0.6.0",
 			"resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz",
 			"integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA=="
 		},
+		"node_modules/@types/d3-color": {
+			"version": "3.1.3",
+			"resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-3.1.3.tgz",
+			"integrity": "sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A=="
+		},
+		"node_modules/@types/d3-drag": {
+			"version": "3.0.7",
+			"resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz",
+			"integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==",
+			"dependencies": {
+				"@types/d3-selection": "*"
+			}
+		},
+		"node_modules/@types/d3-interpolate": {
+			"version": "3.0.4",
+			"resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-3.0.4.tgz",
+			"integrity": "sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==",
+			"dependencies": {
+				"@types/d3-color": "*"
+			}
+		},
 		"node_modules/@types/d3-scale": {
 			"version": "4.0.8",
 			"resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-4.0.8.tgz",
@@ -1521,11 +1541,33 @@
 			"resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-3.0.3.tgz",
 			"integrity": "sha512-laXM4+1o5ImZv3RpFAsTRn3TEkzqkytiOY0Dz0sq5cnd1dtNlk6sHLon4OvqaiJb28T0S/TdsBI3Sjsy+keJrw=="
 		},
+		"node_modules/@types/d3-selection": {
+			"version": "3.0.10",
+			"resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.10.tgz",
+			"integrity": "sha512-cuHoUgS/V3hLdjJOLTT691+G2QoqAjCVLmr4kJXR4ha56w1Zdu8UUQ5TxLRqudgNjwXeQxKMq4j+lyf9sWuslg=="
+		},
 		"node_modules/@types/d3-time": {
 			"version": "3.0.3",
 			"resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-3.0.3.tgz",
 			"integrity": "sha512-2p6olUZ4w3s+07q3Tm2dbiMZy5pCDfYwtLXXHUnVzXgQlZ/OyPtUz6OL382BkOuGlLXqfT+wqv8Fw2v8/0geBw=="
 		},
+		"node_modules/@types/d3-transition": {
+			"version": "3.0.8",
+			"resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.8.tgz",
+			"integrity": "sha512-ew63aJfQ/ms7QQ4X7pk5NxQ9fZH/z+i24ZfJ6tJSfqxJMrYLiK01EAs2/Rtw/JreGUsS3pLPNV644qXFGnoZNQ==",
+			"dependencies": {
+				"@types/d3-selection": "*"
+			}
+		},
+		"node_modules/@types/d3-zoom": {
+			"version": "3.0.8",
+			"resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz",
+			"integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==",
+			"dependencies": {
+				"@types/d3-interpolate": "*",
+				"@types/d3-selection": "*"
+			}
+		},
 		"node_modules/@types/debug": {
 			"version": "4.1.12",
 			"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
@@ -1568,7 +1610,7 @@
 			"version": "20.11.30",
 			"resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.30.tgz",
 			"integrity": "sha512-dHM6ZxwlmuZaRmUPfv1p+KrdD1Dci04FbdEm/9wEMouFqxYoFl5aMkt0VMAUtYRQDyYvD41WJLukhq/ha3YuTw==",
-			"devOptional": true,
+			"optional": true,
 			"dependencies": {
 				"undici-types": "~5.26.4"
 			}
@@ -1613,15 +1655,6 @@
 			"resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.10.tgz",
 			"integrity": "sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA=="
 		},
-		"node_modules/@types/ws": {
-			"version": "8.5.10",
-			"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz",
-			"integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==",
-			"dev": true,
-			"dependencies": {
-				"@types/node": "*"
-			}
-		},
 		"node_modules/@types/yauzl": {
 			"version": "2.10.3",
 			"resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.3.tgz",
@@ -1942,6 +1975,33 @@
 			"resolved": "https://registry.npmjs.org/@webreflection/fetch/-/fetch-0.1.5.tgz",
 			"integrity": "sha512-zCcqCJoNLvdeF41asAK71XPlwSPieeRDsE09albBunJEksuYPYNillKNQjf8p5BqSoTKTuKrW3lUm3MNodUC4g=="
 		},
+		"node_modules/@xyflow/svelte": {
+			"version": "0.1.19",
+			"resolved": "https://registry.npmjs.org/@xyflow/svelte/-/svelte-0.1.19.tgz",
+			"integrity": "sha512-yW5w5aI+Yqkob4kLQpVDo/ZmX+E9Pw7459kqwLfv4YG4N1NYXrsDRh9cyph/rapbuDnPi6zqK5E8LKrgaCQC0w==",
+			"dependencies": {
+				"@svelte-put/shortcut": "^3.1.0",
+				"@xyflow/system": "0.0.42",
+				"classcat": "^5.0.4"
+			},
+			"peerDependencies": {
+				"svelte": "^3.0.0 || ^4.0.0"
+			}
+		},
+		"node_modules/@xyflow/system": {
+			"version": "0.0.42",
+			"resolved": "https://registry.npmjs.org/@xyflow/system/-/system-0.0.42.tgz",
+			"integrity": "sha512-kWYj+Y0GOct0jKYTdyRMNOLPxGNbb2TYvPg2gTmJnZ31DOOMkL5uRBLX825DR2gOACDu+i5FHLxPJUPf/eGOJw==",
+			"dependencies": {
+				"@types/d3-drag": "^3.0.7",
+				"@types/d3-selection": "^3.0.10",
+				"@types/d3-transition": "^3.0.8",
+				"@types/d3-zoom": "^3.0.8",
+				"d3-drag": "^3.0.0",
+				"d3-selection": "^3.0.0",
+				"d3-zoom": "^3.0.0"
+			}
+		},
 		"node_modules/acorn": {
 			"version": "8.11.3",
 			"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
@@ -2533,16 +2593,6 @@
 				"url": "https://github.com/sponsors/sindresorhus"
 			}
 		},
-		"node_modules/bun-types": {
-			"version": "1.0.33",
-			"resolved": "https://registry.npmjs.org/bun-types/-/bun-types-1.0.33.tgz",
-			"integrity": "sha512-L5tBIf9g6rBBkvshqysi5NoLQ9NnhSPU1pfJ9FzqoSfofYdyac3WLUnOIuQ+M5za/sooVUOP2ko+E6Tco0OLIA==",
-			"dev": true,
-			"dependencies": {
-				"@types/node": "~20.11.3",
-				"@types/ws": "~8.5.10"
-			}
-		},
 		"node_modules/cac": {
 			"version": "6.7.14",
 			"resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz",
@@ -2777,6 +2827,11 @@
 				"node": ">=8"
 			}
 		},
+		"node_modules/classcat": {
+			"version": "5.0.5",
+			"resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz",
+			"integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w=="
+		},
 		"node_modules/clean-stack": {
 			"version": "2.2.0",
 			"resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz",
@@ -7094,9 +7149,9 @@
 			}
 		},
 		"node_modules/picocolors": {
-			"version": "1.0.1",
-			"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz",
-			"integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew=="
+			"version": "1.1.0",
+			"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz",
+			"integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw=="
 		},
 		"node_modules/picomatch": {
 			"version": "2.3.1",
@@ -7162,9 +7217,9 @@
 			}
 		},
 		"node_modules/postcss": {
-			"version": "8.4.41",
-			"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.41.tgz",
-			"integrity": "sha512-TesUflQ0WKZqAvg52PWL6kHgLKP6xB6heTOdoYM0Wt2UHyxNa4K25EZZMgKns3BH1RLVbZCREPpLY0rhnNoHVQ==",
+			"version": "8.4.47",
+			"resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.47.tgz",
+			"integrity": "sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ==",
 			"funding": [
 				{
 					"type": "opencollective",
@@ -7181,8 +7236,8 @@
 			],
 			"dependencies": {
 				"nanoid": "^3.3.7",
-				"picocolors": "^1.0.1",
-				"source-map-js": "^1.2.0"
+				"picocolors": "^1.1.0",
+				"source-map-js": "^1.2.1"
 			},
 			"engines": {
 				"node": "^10 || ^12 || >=14"
@@ -8195,9 +8250,9 @@
 			"integrity": "sha512-FJF5jgdfvoKn1MAKSdGs33bIqLi3LmsgVTliuX6iITj834F+JRQZN90Z93yql8h0K2t0RwDPBmxwlbZfDcxNZA=="
 		},
 		"node_modules/source-map-js": {
-			"version": "1.2.0",
-			"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz",
-			"integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==",
+			"version": "1.2.1",
+			"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+			"integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
 			"engines": {
 				"node": ">=0.10.0"
 			}
@@ -9112,7 +9167,7 @@
 			"version": "5.26.5",
 			"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
 			"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
-			"devOptional": true
+			"optional": true
 		},
 		"node_modules/unist-util-stringify-position": {
 			"version": "3.0.3",
@@ -9329,13 +9384,13 @@
 			}
 		},
 		"node_modules/vite": {
-			"version": "5.4.0",
-			"resolved": "https://registry.npmjs.org/vite/-/vite-5.4.0.tgz",
-			"integrity": "sha512-5xokfMX0PIiwCMCMb9ZJcMyh5wbBun0zUzKib+L65vAZ8GY9ePZMXxFrHbr/Kyll2+LSCY7xtERPpxkBDKngwg==",
+			"version": "5.4.6",
+			"resolved": "https://registry.npmjs.org/vite/-/vite-5.4.6.tgz",
+			"integrity": "sha512-IeL5f8OO5nylsgzd9tq4qD2QqI0k2CQLGrWD0rCN0EQJZpBK5vJAx0I+GDkMOXxQX/OfFHMuLIx6ddAxGX/k+Q==",
 			"dependencies": {
 				"esbuild": "^0.21.3",
-				"postcss": "^8.4.40",
-				"rollup": "^4.13.0"
+				"postcss": "^8.4.43",
+				"rollup": "^4.20.0"
 			},
 			"bin": {
 				"vite": "bin/vite.js"

+ 2 - 2
package.json

@@ -1,6 +1,6 @@
 {
 	"name": "open-webui",
-	"version": "0.3.21",
+	"version": "0.3.22",
 	"private": true,
 	"scripts": {
 		"dev": "npm run pyodide:fetch && vite dev --host",
@@ -25,7 +25,6 @@
 		"@sveltejs/kit": "^2.5.20",
 		"@sveltejs/vite-plugin-svelte": "^3.1.1",
 		"@tailwindcss/typography": "^0.5.13",
-		"@types/bun": "latest",
 		"@typescript-eslint/eslint-plugin": "^6.17.0",
 		"@typescript-eslint/parser": "^6.17.0",
 		"autoprefixer": "^10.4.16",
@@ -54,6 +53,7 @@
 		"@codemirror/theme-one-dark": "^6.1.2",
 		"@pyscript/core": "^0.4.32",
 		"@sveltejs/adapter-node": "^2.0.0",
+		"@xyflow/svelte": "^0.1.19",
 		"async": "^3.2.5",
 		"bits-ui": "^0.19.7",
 		"codemirror": "^6.0.1",

+ 5 - 0
pyproject.toml

@@ -47,7 +47,12 @@ dependencies = [
 
     "fake-useragent==1.5.1",
     "chromadb==0.5.5",
+    "pymilvus==2.4.6",
+
     "sentence-transformers==3.0.1",
+    "colbert-ai==0.2.21",
+    "einops==0.8.0",
+    
     "pypdf==4.3.1",
     "docx2txt==0.8",
     "python-pptx==1.0.0",

+ 17 - 15
src/app.css

@@ -50,21 +50,6 @@ iframe {
 	@apply rounded-lg;
 }
 
-ol > li {
-	counter-increment: list-number;
-	display: block;
-	margin-bottom: 0;
-	margin-top: 0;
-	min-height: 28px;
-}
-
-.prose ol > li::before {
-	content: counters(list-number, '.') '.';
-	padding-right: 0.5rem;
-	color: var(--tw-prose-counters);
-	font-weight: 400;
-}
-
 li p {
 	display: inline;
 }
@@ -171,3 +156,20 @@ input[type='number'] {
 	font-weight: 600;
 	@apply rounded-md dark:bg-gray-800 bg-gray-100 mx-0.5;
 }
+
+.svelte-flow {
+	background-color: transparent !important;
+}
+
+.svelte-flow__edge > path {
+	stroke-width: 0.5;
+}
+
+.svelte-flow__edge.animated > path {
+	stroke-width: 2;
+	@apply stroke-gray-600 dark:stroke-gray-500;
+}
+
+.bg-gray-950-90 {
+	background-color: rgba(var(--color-gray-950, #0d0d0d), 0.9);
+}

+ 20 - 14
src/app.html

@@ -4,7 +4,11 @@
 		<meta charset="utf-8" />
 		<link rel="icon" href="%sveltekit.assets%/favicon.png" />
 		<link rel="manifest" href="%sveltekit.assets%/manifest.json" crossorigin="use-credentials" />
-		<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
+		<meta
+			name="viewport"
+			content="width=device-width, initial-scale=1, maximum-scale=1, viewport-fit=cover"
+		/>
+		<meta name="theme-color" content="#171717" />
 		<meta name="robots" content="noindex,nofollow" />
 		<meta name="description" content="Open WebUI" />
 		<link
@@ -23,33 +27,33 @@
 		<script>
 			// On page load or when changing themes, best to add inline in `head` to avoid FOUC
 			(() => {
+				const metaThemeColorTag = document.querySelector('meta[name="theme-color"]');
+				const prefersDarkTheme = window.matchMedia('(prefers-color-scheme: dark)').matches;
+
 				if (!localStorage?.theme) {
 					localStorage.theme = 'system';
 				}
 
-				if (localStorage?.theme && localStorage?.theme.includes('oled')) {
+				if (localStorage.theme === 'system') {
+					document.documentElement.classList.add(prefersDarkTheme ? 'dark' : 'light');
+					metaThemeColorTag.setAttribute('content', prefersDarkTheme ? '#171717' : '#ffffff');
+				} else if (localStorage.theme === 'oled-dark') {
 					document.documentElement.style.setProperty('--color-gray-800', '#101010');
 					document.documentElement.style.setProperty('--color-gray-850', '#050505');
 					document.documentElement.style.setProperty('--color-gray-900', '#000000');
 					document.documentElement.style.setProperty('--color-gray-950', '#000000');
 					document.documentElement.classList.add('dark');
-				} else if (
-					localStorage.theme === 'light' ||
-					(!('theme' in localStorage) && window.matchMedia('(prefers-color-scheme: light)').matches)
-				) {
+					metaThemeColorTag.setAttribute('content', '#000000');
+				} else if (localStorage.theme === 'light') {
 					document.documentElement.classList.add('light');
-				} else if (localStorage.theme && localStorage.theme !== 'system') {
-					localStorage.theme.split(' ').forEach((e) => {
-						document.documentElement.classList.add(e);
-					});
-				} else if (localStorage.theme && localStorage.theme === 'system') {
-					systemTheme = window.matchMedia('(prefers-color-scheme: dark)').matches;
-					document.documentElement.classList.add(systemTheme ? 'dark' : 'light');
-				} else if (localStorage.theme && localStorage.theme === 'her') {
+					metaThemeColorTag.setAttribute('content', '#ffffff');
+				} else if (localStorage.theme === 'her') {
 					document.documentElement.classList.add('dark');
 					document.documentElement.classList.add('her');
+					metaThemeColorTag.setAttribute('content', '#983724');
 				} else {
 					document.documentElement.classList.add('dark');
+					metaThemeColorTag.setAttribute('content', '#171717');
 				}
 
 				window.matchMedia('(prefers-color-scheme: dark)').addListener((e) => {
@@ -57,9 +61,11 @@
 						if (e.matches) {
 							document.documentElement.classList.add('dark');
 							document.documentElement.classList.remove('light');
+							metaThemeColorTag.setAttribute('content', '#171717');
 						} else {
 							document.documentElement.classList.add('light');
 							document.documentElement.classList.remove('dark');
+							metaThemeColorTag.setAttribute('content', '#ffffff');
 						}
 					}
 				});

+ 69 - 1
src/lib/components/admin/Settings/Audio.svelte

@@ -31,6 +31,8 @@
 	let TTS_MODEL = '';
 	let TTS_VOICE = '';
 	let TTS_SPLIT_ON: TTS_RESPONSE_SPLIT = TTS_RESPONSE_SPLIT.PUNCTUATION;
+	let TTS_AZURE_SPEECH_REGION = '';
+	let TTS_AZURE_SPEECH_OUTPUT_FORMAT = '';
 
 	let STT_OPENAI_API_BASE_URL = '';
 	let STT_OPENAI_API_KEY = '';
@@ -87,7 +89,9 @@
 				ENGINE: TTS_ENGINE,
 				MODEL: TTS_MODEL,
 				VOICE: TTS_VOICE,
-				SPLIT_ON: TTS_SPLIT_ON
+				SPLIT_ON: TTS_SPLIT_ON,
+				AZURE_SPEECH_REGION: TTS_AZURE_SPEECH_REGION,
+				AZURE_SPEECH_OUTPUT_FORMAT: TTS_AZURE_SPEECH_OUTPUT_FORMAT
 			},
 			stt: {
 				OPENAI_API_BASE_URL: STT_OPENAI_API_BASE_URL,
@@ -120,6 +124,9 @@
 
 			TTS_SPLIT_ON = res.tts.SPLIT_ON || TTS_RESPONSE_SPLIT.PUNCTUATION;
 
+			TTS_AZURE_SPEECH_OUTPUT_FORMAT = res.tts.AZURE_SPEECH_OUTPUT_FORMAT;
+			TTS_AZURE_SPEECH_REGION = res.tts.AZURE_SPEECH_REGION;
+
 			STT_OPENAI_API_BASE_URL = res.stt.OPENAI_API_BASE_URL;
 			STT_OPENAI_API_KEY = res.stt.OPENAI_API_KEY;
 
@@ -224,6 +231,7 @@
 							<option value="">{$i18n.t('Web API')}</option>
 							<option value="openai">{$i18n.t('OpenAI')}</option>
 							<option value="elevenlabs">{$i18n.t('ElevenLabs')}</option>
+							<option value="azure">{$i18n.t('Azure AI Speech')}</option>
 						</select>
 					</div>
 				</div>
@@ -252,6 +260,23 @@
 							/>
 						</div>
 					</div>
+				{:else if TTS_ENGINE === 'azure'}
+					<div>
+						<div class="mt-1 flex gap-2 mb-1">
+							<input
+								class="flex-1 w-full rounded-lg py-2 pl-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+								placeholder={$i18n.t('API Key')}
+								bind:value={TTS_API_KEY}
+								required
+							/>
+							<input
+								class="flex-1 w-full rounded-lg py-2 pl-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+								placeholder={$i18n.t('Azure Region')}
+								bind:value={TTS_AZURE_SPEECH_REGION}
+								required
+							/>
+						</div>
+					</div>
 				{/if}
 
 				<hr class=" dark:border-gray-850 my-2" />
@@ -359,6 +384,49 @@
 							</div>
 						</div>
 					</div>
+				{:else if TTS_ENGINE === 'azure'}
+					<div class=" flex gap-2">
+						<div class="w-full">
+							<div class=" mb-1.5 text-sm font-medium">{$i18n.t('TTS Voice')}</div>
+							<div class="flex w-full">
+								<div class="flex-1">
+									<input
+										list="voice-list"
+										class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+										bind:value={TTS_VOICE}
+										placeholder="Select a voice"
+									/>
+
+									<datalist id="voice-list">
+										{#each voices as voice}
+											<option value={voice.id}>{voice.name}</option>
+										{/each}
+									</datalist>
+								</div>
+							</div>
+						</div>
+						<div class="w-full">
+							<div class=" mb-1.5 text-sm font-medium">
+								{$i18n.t('Output format')}
+								<a
+									href="https://learn.microsoft.com/en-us/azure/ai-services/speech-service/rest-text-to-speech?tabs=streaming#audio-outputs"
+									target="_blank"
+								>
+									<small>{$i18n.t('Available list')}</small>
+								</a>
+							</div>
+							<div class="flex w-full">
+								<div class="flex-1">
+									<input
+										list="tts-model-list"
+										class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+										bind:value={TTS_AZURE_SPEECH_OUTPUT_FORMAT}
+										placeholder="Select a output format"
+									/>
+								</div>
+							</div>
+						</div>
+					</div>
 				{/if}
 
 				<hr class="dark:border-gray-850 my-2" />

+ 9 - 7
src/lib/components/admin/Settings/Connections.svelte

@@ -150,18 +150,20 @@
 				})()
 			]);
 
-			OPENAI_API_BASE_URLS.forEach(async (url, idx) => {
-				const res = await getOpenAIModels(localStorage.token, idx);
-				if (res.pipelines) {
-					pipelineUrls[url] = true;
-				}
-			});
-
 			const ollamaConfig = await getOllamaConfig(localStorage.token);
 			const openaiConfig = await getOpenAIConfig(localStorage.token);
 
 			ENABLE_OPENAI_API = openaiConfig.ENABLE_OPENAI_API;
 			ENABLE_OLLAMA_API = ollamaConfig.ENABLE_OLLAMA_API;
+
+			if (ENABLE_OPENAI_API) {
+				OPENAI_API_BASE_URLS.forEach(async (url, idx) => {
+					const res = await getOpenAIModels(localStorage.token, idx);
+					if (res.pipelines) {
+						pipelineUrls[url] = true;
+					}
+				});
+			}
 		}
 	});
 </script>

+ 11 - 5
src/lib/components/admin/Settings/Documents.svelte

@@ -732,11 +732,17 @@
 
 			<div>
 				<div class=" mb-2.5 text-sm font-medium">{$i18n.t('RAG Template')}</div>
-				<textarea
-					bind:value={querySettings.template}
-					class="w-full rounded-lg px-4 py-3 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none resize-none"
-					rows="4"
-				/>
+				<Tooltip
+					content={$i18n.t('Leave empty to use the default prompt, or enter a custom prompt')}
+					placement="top-start"
+				>
+					<textarea
+						bind:value={querySettings.template}
+						placeholder={$i18n.t('Leave empty to use the default prompt, or enter a custom prompt')}
+						class="w-full rounded-lg px-4 py-3 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none resize-none"
+						rows="4"
+					/>
+				</Tooltip>
 			</div>
 		</div>
 

+ 97 - 0
src/lib/components/admin/Settings/Images.svelte

@@ -27,6 +27,43 @@
 
 	let models = null;
 
+	let samplers = [
+		'DPM++ 2M',
+		'DPM++ SDE',
+		'DPM++ 2M SDE',
+		'DPM++ 2M SDE Heun',
+		'DPM++ 2S a',
+		'DPM++ 3M SDE',
+		'Euler a',
+		'Euler',
+		'LMS',
+		'Heun',
+		'DPM2',
+		'DPM2 a',
+		'DPM fast',
+		'DPM adaptive',
+		'Restart',
+		'DDIM',
+		'DDIM CFG++',
+		'PLMS',
+		'UniPC'
+	];
+
+	let schedulers = [
+		'Automatic',
+		'Uniform',
+		'Karras',
+		'Exponential',
+		'Polyexponential',
+		'SGM Uniform',
+		'KL Optimal',
+		'Align Your Steps',
+		'Simple',
+		'Normal',
+		'DDIM',
+		'Beta'
+	];
+
 	let requiredWorkflowNodes = [
 		{
 			type: 'prompt',
@@ -326,6 +363,66 @@
 							</a>
 						</div>
 					</div>
+
+					<!---Sampler-->
+					<div>
+						<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Sampler')}</div>
+						<div class="flex w-full">
+							<div class="flex-1 mr-2">
+								<Tooltip content={$i18n.t('Enter Sampler (e.g. Euler a)')} placement="top-start">
+									<input
+										list="sampler-list"
+										class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+										placeholder={$i18n.t('Enter Sampler (e.g. Euler a)')}
+										bind:value={config.automatic1111.AUTOMATIC1111_SAMPLER}
+									/>
+
+									<datalist id="sampler-list">
+										{#each samplers ?? [] as sampler}
+											<option value={sampler}>{sampler}</option>
+										{/each}
+									</datalist>
+								</Tooltip>
+							</div>
+						</div>
+					</div>
+					<!---Scheduler-->
+					<div>
+						<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set Scheduler')}</div>
+						<div class="flex w-full">
+							<div class="flex-1 mr-2">
+								<Tooltip content={$i18n.t('Enter Scheduler (e.g. Karras)')} placement="top-start">
+									<input
+										list="scheduler-list"
+										class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+										placeholder={$i18n.t('Enter Scheduler (e.g. Karras)')}
+										bind:value={config.automatic1111.AUTOMATIC1111_SCHEDULER}
+									/>
+
+									<datalist id="scheduler-list">
+										{#each schedulers ?? [] as scheduler}
+											<option value={scheduler}>{scheduler}</option>
+										{/each}
+									</datalist>
+								</Tooltip>
+							</div>
+						</div>
+					</div>
+					<!---CFG scale-->
+					<div>
+						<div class=" mb-2.5 text-sm font-medium">{$i18n.t('Set CFG Scale')}</div>
+						<div class="flex w-full">
+							<div class="flex-1 mr-2">
+								<Tooltip content={$i18n.t('Enter CFG Scale (e.g. 7.0)')} placement="top-start">
+									<input
+										class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+										placeholder={$i18n.t('Enter CFG Scale (e.g. 7.0)')}
+										bind:value={config.automatic1111.AUTOMATIC1111_CFG_SCALE}
+									/>
+								</Tooltip>
+							</div>
+						</div>
+					</div>
 				{:else if config?.engine === 'comfyui'}
 					<div class="">
 						<div class=" mb-2 text-sm font-medium">{$i18n.t('ComfyUI Base URL')}</div>

+ 3 - 2
src/lib/components/admin/Settings/Interface.svelte

@@ -307,9 +307,10 @@
 									/>
 								</div>
 
-								<input
-									class="px-3 py-1.5 text-xs w-full bg-transparent outline-none border-r border-gray-100 dark:border-gray-800"
+								<textarea
+									class="px-3 py-1.5 text-xs w-full bg-transparent outline-none border-r border-gray-100 dark:border-gray-800 resize-none"
 									placeholder={$i18n.t('Prompt (e.g. Tell me a fun fact about the Roman Empire)')}
+									rows="3"
 									bind:value={prompt.content}
 								/>
 							</div>

+ 246 - 189
src/lib/components/chat/Chat.svelte

@@ -23,6 +23,7 @@
 		banners,
 		user,
 		socket,
+		showControls,
 		showCallOverlay,
 		currentChatPage,
 		temporaryChatEnabled
@@ -70,7 +71,6 @@
 	let loaded = false;
 	const eventTarget = new EventTarget();
 
-	let showControls = false;
 	let stopResponseFlag = false;
 	let autoScroll = true;
 	let processing = '';
@@ -115,13 +115,14 @@
 
 	$: if (history.currentId !== null) {
 		let _messages = [];
-
 		let currentMessage = history.messages[history.currentId];
-		while (currentMessage !== null) {
+		while (currentMessage) {
 			_messages.unshift({ ...currentMessage });
 			currentMessage =
 				currentMessage.parentId !== null ? history.messages[currentMessage.parentId] : null;
 		}
+
+		// This is most likely causing the performance issue
 		messages = _messages;
 	} else {
 		messages = [];
@@ -143,6 +144,28 @@
 		})();
 	}
 
+	const showMessage = async (message) => {
+		let _messageId = JSON.parse(JSON.stringify(message.id));
+
+		let messageChildrenIds = history.messages[_messageId].childrenIds;
+
+		while (messageChildrenIds.length !== 0) {
+			_messageId = messageChildrenIds.at(-1);
+			messageChildrenIds = history.messages[_messageId].childrenIds;
+		}
+
+		history.currentId = _messageId;
+
+		await tick();
+		await tick();
+		await tick();
+
+		const messageElement = document.getElementById(`message-${message.id}`);
+		if (messageElement) {
+			messageElement.scrollIntoView({ behavior: 'smooth' });
+		}
+	};
+
 	const chatEventHandler = async (event, cb) => {
 		if (event.chat_id === $chatId) {
 			await tick();
@@ -860,8 +883,9 @@
 
 		await tick();
 
+		const stream = $settings?.streamResponse ?? true;
 		const [res, controller] = await generateChatCompletion(localStorage.token, {
-			stream: true,
+			stream: stream,
 			model: model.id,
 			messages: messagesBody,
 			options: {
@@ -886,142 +910,162 @@
 		});
 
 		if (res && res.ok) {
-			console.log('controller', controller);
+			if (!stream) {
+				const response = await res.json();
+				console.log(response);
+
+				responseMessage.content = response.message.content;
+				responseMessage.info = {
+					eval_count: response.eval_count,
+					eval_duration: response.eval_duration,
+					load_duration: response.load_duration,
+					prompt_eval_count: response.prompt_eval_count,
+					prompt_eval_duration: response.prompt_eval_duration,
+					total_duration: response.total_duration
+				};
+				responseMessage.done = true;
+			} else {
+				console.log('controller', controller);
 
-			const reader = res.body
-				.pipeThrough(new TextDecoderStream())
-				.pipeThrough(splitStream('\n'))
-				.getReader();
+				const reader = res.body
+					.pipeThrough(new TextDecoderStream())
+					.pipeThrough(splitStream('\n'))
+					.getReader();
 
-			while (true) {
-				const { value, done } = await reader.read();
-				if (done || stopResponseFlag || _chatId !== $chatId) {
-					responseMessage.done = true;
-					messages = messages;
+				while (true) {
+					const { value, done } = await reader.read();
+					if (done || stopResponseFlag || _chatId !== $chatId) {
+						responseMessage.done = true;
+						messages = messages;
 
-					if (stopResponseFlag) {
-						controller.abort('User: Stop Response');
-					} else {
-						const messages = createMessagesList(responseMessageId);
-						await chatCompletedHandler(_chatId, model.id, responseMessageId, messages);
+						if (stopResponseFlag) {
+							controller.abort('User: Stop Response');
+						}
+
+						_response = responseMessage.content;
+						break;
 					}
 
-					_response = responseMessage.content;
-					break;
-				}
+					try {
+						let lines = value.split('\n');
 
-				try {
-					let lines = value.split('\n');
-
-					for (const line of lines) {
-						if (line !== '') {
-							console.log(line);
-							let data = JSON.parse(line);
-
-							if ('citations' in data) {
-								responseMessage.citations = data.citations;
-								// Only remove status if it was initially set
-								if (model?.info?.meta?.knowledge ?? false) {
-									responseMessage.statusHistory = responseMessage.statusHistory.filter(
-										(status) => status.action !== 'knowledge_search'
-									);
+						for (const line of lines) {
+							if (line !== '') {
+								console.log(line);
+								let data = JSON.parse(line);
+
+								if ('citations' in data) {
+									responseMessage.citations = data.citations;
+									// Only remove status if it was initially set
+									if (model?.info?.meta?.knowledge ?? false) {
+										responseMessage.statusHistory = responseMessage.statusHistory.filter(
+											(status) => status.action !== 'knowledge_search'
+										);
+									}
+									continue;
 								}
-								continue;
-							}
 
-							if ('detail' in data) {
-								throw data;
-							}
+								if ('detail' in data) {
+									throw data;
+								}
 
-							if (data.done == false) {
-								if (responseMessage.content == '' && data.message.content == '\n') {
-									continue;
-								} else {
-									responseMessage.content += data.message.content;
+								if (data.done == false) {
+									if (responseMessage.content == '' && data.message.content == '\n') {
+										continue;
+									} else {
+										responseMessage.content += data.message.content;
 
-									if (navigator.vibrate && ($settings?.hapticFeedback ?? false)) {
-										navigator.vibrate(5);
-									}
+										if (navigator.vibrate && ($settings?.hapticFeedback ?? false)) {
+											navigator.vibrate(5);
+										}
 
-									const messageContentParts = getMessageContentParts(
-										responseMessage.content,
-										$config?.audio?.tts?.split_on ?? 'punctuation'
-									);
-									messageContentParts.pop();
-
-									// dispatch only last sentence and make sure it hasn't been dispatched before
-									if (
-										messageContentParts.length > 0 &&
-										messageContentParts[messageContentParts.length - 1] !==
-											responseMessage.lastSentence
-									) {
-										responseMessage.lastSentence =
-											messageContentParts[messageContentParts.length - 1];
-										eventTarget.dispatchEvent(
-											new CustomEvent('chat', {
-												detail: {
-													id: responseMessageId,
-													content: messageContentParts[messageContentParts.length - 1]
-												}
-											})
+										const messageContentParts = getMessageContentParts(
+											responseMessage.content,
+											$config?.audio?.tts?.split_on ?? 'punctuation'
 										);
+										messageContentParts.pop();
+
+										// dispatch only last sentence and make sure it hasn't been dispatched before
+										if (
+											messageContentParts.length > 0 &&
+											messageContentParts[messageContentParts.length - 1] !==
+												responseMessage.lastSentence
+										) {
+											responseMessage.lastSentence =
+												messageContentParts[messageContentParts.length - 1];
+											eventTarget.dispatchEvent(
+												new CustomEvent('chat', {
+													detail: {
+														id: responseMessageId,
+														content: messageContentParts[messageContentParts.length - 1]
+													}
+												})
+											);
+										}
+
+										messages = messages;
 									}
+								} else {
+									responseMessage.done = true;
 
-									messages = messages;
-								}
-							} else {
-								responseMessage.done = true;
+									if (responseMessage.content == '') {
+										responseMessage.error = {
+											code: 400,
+											content: `Oops! No text generated from Ollama, Please try again.`
+										};
+									}
 
-								if (responseMessage.content == '') {
-									responseMessage.error = {
-										code: 400,
-										content: `Oops! No text generated from Ollama, Please try again.`
+									responseMessage.context = data.context ?? null;
+									responseMessage.info = {
+										total_duration: data.total_duration,
+										load_duration: data.load_duration,
+										sample_count: data.sample_count,
+										sample_duration: data.sample_duration,
+										prompt_eval_count: data.prompt_eval_count,
+										prompt_eval_duration: data.prompt_eval_duration,
+										eval_count: data.eval_count,
+										eval_duration: data.eval_duration
 									};
-								}
+									messages = messages;
 
-								responseMessage.context = data.context ?? null;
-								responseMessage.info = {
-									total_duration: data.total_duration,
-									load_duration: data.load_duration,
-									sample_count: data.sample_count,
-									sample_duration: data.sample_duration,
-									prompt_eval_count: data.prompt_eval_count,
-									prompt_eval_duration: data.prompt_eval_duration,
-									eval_count: data.eval_count,
-									eval_duration: data.eval_duration
-								};
-								messages = messages;
-
-								if ($settings.notificationEnabled && !document.hasFocus()) {
-									const notification = new Notification(`${model.id}`, {
-										body: responseMessage.content,
-										icon: `${WEBUI_BASE_URL}/static/favicon.png`
-									});
-								}
+									if ($settings.notificationEnabled && !document.hasFocus()) {
+										const notification = new Notification(`${model.id}`, {
+											body: responseMessage.content,
+											icon: `${WEBUI_BASE_URL}/static/favicon.png`
+										});
+									}
 
-								if ($settings?.responseAutoCopy ?? false) {
-									copyToClipboard(responseMessage.content);
-								}
+									if ($settings?.responseAutoCopy ?? false) {
+										copyToClipboard(responseMessage.content);
+									}
 
-								if ($settings.responseAutoPlayback && !$showCallOverlay) {
-									await tick();
-									document.getElementById(`speak-button-${responseMessage.id}`)?.click();
+									if ($settings.responseAutoPlayback && !$showCallOverlay) {
+										await tick();
+										document.getElementById(`speak-button-${responseMessage.id}`)?.click();
+									}
 								}
 							}
 						}
+					} catch (error) {
+						console.log(error);
+						if ('detail' in error) {
+							toast.error(error.detail);
+						}
+						break;
 					}
-				} catch (error) {
-					console.log(error);
-					if ('detail' in error) {
-						toast.error(error.detail);
-					}
-					break;
-				}
 
-				if (autoScroll) {
-					scrollToBottom();
+					if (autoScroll) {
+						scrollToBottom();
+					}
 				}
 			}
+
+			await chatCompletedHandler(
+				_chatId,
+				model.id,
+				responseMessageId,
+				createMessagesList(responseMessageId)
+			);
 		} else {
 			if (res !== null) {
 				const error = await res.json();
@@ -1133,17 +1177,19 @@
 		await tick();
 
 		try {
+			const stream = $settings?.streamResponse ?? true;
 			const [res, controller] = await generateOpenAIChatCompletion(
 				localStorage.token,
 				{
-					stream: true,
+					stream: stream,
 					model: model.id,
-					stream_options:
-						(model.info?.meta?.capabilities?.usage ?? false)
-							? {
+					...(stream && (model.info?.meta?.capabilities?.usage ?? false)
+						? {
+								stream_options: {
 									include_usage: true
 								}
-							: undefined,
+							}
+						: {}),
 					messages: [
 						params?.system || $settings.system || (responseMessage?.userContext ?? null)
 							? {
@@ -1221,85 +1267,95 @@
 			scrollToBottom();
 
 			if (res && res.ok && res.body) {
-				const textStream = await createOpenAITextStream(res.body, $settings.splitLargeChunks);
-
-				for await (const update of textStream) {
-					const { value, done, citations, error, usage } = update;
-					if (error) {
-						await handleOpenAIError(error, null, model, responseMessage);
-						break;
-					}
-					if (done || stopResponseFlag || _chatId !== $chatId) {
-						responseMessage.done = true;
-						messages = messages;
+				if (!stream) {
+					const response = await res.json();
+					console.log(response);
 
-						if (stopResponseFlag) {
-							controller.abort('User: Stop Response');
-						} else {
-							const messages = createMessagesList(responseMessageId);
+					responseMessage.content = response.choices[0].message.content;
+					responseMessage.info = { ...response.usage, openai: true };
+					responseMessage.done = true;
+				} else {
+					const textStream = await createOpenAITextStream(res.body, $settings.splitLargeChunks);
 
-							await chatCompletedHandler(_chatId, model.id, responseMessageId, messages);
+					for await (const update of textStream) {
+						const { value, done, citations, error, usage } = update;
+						if (error) {
+							await handleOpenAIError(error, null, model, responseMessage);
+							break;
 						}
+						if (done || stopResponseFlag || _chatId !== $chatId) {
+							responseMessage.done = true;
+							messages = messages;
 
-						_response = responseMessage.content;
-
-						break;
-					}
+							if (stopResponseFlag) {
+								controller.abort('User: Stop Response');
+							}
+							_response = responseMessage.content;
+							break;
+						}
 
-					if (usage) {
-						responseMessage.info = { ...usage, openai: true };
-					}
+						if (usage) {
+							responseMessage.info = { ...usage, openai: true };
+						}
 
-					if (citations) {
-						responseMessage.citations = citations;
-						// Only remove status if it was initially set
-						if (model?.info?.meta?.knowledge ?? false) {
-							responseMessage.statusHistory = responseMessage.statusHistory.filter(
-								(status) => status.action !== 'knowledge_search'
-							);
+						if (citations) {
+							responseMessage.citations = citations;
+							// Only remove status if it was initially set
+							if (model?.info?.meta?.knowledge ?? false) {
+								responseMessage.statusHistory = responseMessage.statusHistory.filter(
+									(status) => status.action !== 'knowledge_search'
+								);
+							}
+							continue;
 						}
-						continue;
-					}
 
-					if (responseMessage.content == '' && value == '\n') {
-						continue;
-					} else {
-						responseMessage.content += value;
+						if (responseMessage.content == '' && value == '\n') {
+							continue;
+						} else {
+							responseMessage.content += value;
 
-						if (navigator.vibrate && ($settings?.hapticFeedback ?? false)) {
-							navigator.vibrate(5);
-						}
+							if (navigator.vibrate && ($settings?.hapticFeedback ?? false)) {
+								navigator.vibrate(5);
+							}
 
-						const messageContentParts = getMessageContentParts(
-							responseMessage.content,
-							$config?.audio?.tts?.split_on ?? 'punctuation'
-						);
-						messageContentParts.pop();
-
-						// dispatch only last sentence and make sure it hasn't been dispatched before
-						if (
-							messageContentParts.length > 0 &&
-							messageContentParts[messageContentParts.length - 1] !== responseMessage.lastSentence
-						) {
-							responseMessage.lastSentence = messageContentParts[messageContentParts.length - 1];
-							eventTarget.dispatchEvent(
-								new CustomEvent('chat', {
-									detail: {
-										id: responseMessageId,
-										content: messageContentParts[messageContentParts.length - 1]
-									}
-								})
+							const messageContentParts = getMessageContentParts(
+								responseMessage.content,
+								$config?.audio?.tts?.split_on ?? 'punctuation'
 							);
-						}
+							messageContentParts.pop();
+
+							// dispatch only last sentence and make sure it hasn't been dispatched before
+							if (
+								messageContentParts.length > 0 &&
+								messageContentParts[messageContentParts.length - 1] !== responseMessage.lastSentence
+							) {
+								responseMessage.lastSentence = messageContentParts[messageContentParts.length - 1];
+								eventTarget.dispatchEvent(
+									new CustomEvent('chat', {
+										detail: {
+											id: responseMessageId,
+											content: messageContentParts[messageContentParts.length - 1]
+										}
+									})
+								);
+							}
 
-						messages = messages;
-					}
+							messages = messages;
+						}
 
-					if (autoScroll) {
-						scrollToBottom();
+						if (autoScroll) {
+							scrollToBottom();
+						}
 					}
 				}
 
+				await chatCompletedHandler(
+					_chatId,
+					model.id,
+					responseMessageId,
+					createMessagesList(responseMessageId)
+				);
+
 				if ($settings.notificationEnabled && !document.hasFocus()) {
 					const notification = new Notification(`${model.id}`, {
 						body: responseMessage.content,
@@ -1703,7 +1759,6 @@
 			{title}
 			bind:selectedModels
 			bind:showModelSelector
-			bind:showControls
 			shareEnabled={messages.length > 0}
 			{chat}
 			{initNewChat}
@@ -1713,7 +1768,7 @@
 			<div
 				class="absolute top-[4.25rem] w-full {$showSidebar
 					? 'md:max-w-[calc(100%-260px)]'
-					: ''} {showControls ? 'lg:pr-[24rem]' : ''} z-20"
+					: ''} {$showControls ? 'lg:pr-[26rem]' : ''} z-20"
 			>
 				<div class=" flex flex-col gap-1 w-full">
 					{#each $banners.filter( (b) => (b.dismissible ? !JSON.parse(localStorage.getItem('dismissedBannerIds') ?? '[]').includes(b.id) : true) ) as banner}
@@ -1740,8 +1795,8 @@
 
 		<div class="flex flex-col flex-auto z-10">
 			<div
-				class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0 max-w-full z-10 scrollbar-hidden {showControls
-					? 'lg:pr-[24rem]'
+				class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0 max-w-full z-10 scrollbar-hidden {$showControls
+					? 'lg:pr-[26rem]'
 					: ''}"
 				id="messages-container"
 				bind:this={messagesContainerElement}
@@ -1766,11 +1821,12 @@
 						{regenerateResponse}
 						{mergeResponses}
 						{chatActionHandler}
+						{showMessage}
 					/>
 				</div>
 			</div>
 
-			<div class={showControls ? 'lg:pr-[24rem]' : ''}>
+			<div class={$showControls ? 'lg:pr-[26rem]' : ''}>
 				<MessageInput
 					bind:files
 					bind:prompt
@@ -1791,7 +1847,7 @@
 					{submitPrompt}
 					{stopResponse}
 					on:call={() => {
-						showControls = true;
+						showControls.set(true);
 					}}
 				/>
 			</div>
@@ -1807,12 +1863,13 @@
 		}
 		return a;
 	}, [])}
-	bind:show={showControls}
+	bind:history
 	bind:chatFiles
 	bind:params
 	bind:files
 	{submitPrompt}
 	{stopResponse}
+	{showMessage}
 	modelId={selectedModelIds?.at(0) ?? null}
 	chatId={$chatId}
 	{eventTarget}

+ 100 - 57
src/lib/components/chat/ChatControls.svelte

@@ -1,14 +1,17 @@
 <script lang="ts">
+	import { SvelteFlowProvider } from '@xyflow/svelte';
 	import { slide } from 'svelte/transition';
+
+	import { onDestroy, onMount } from 'svelte';
+	import { mobile, showControls, showCallOverlay, showOverview } from '$lib/stores';
+
 	import Modal from '../common/Modal.svelte';
 	import Controls from './Controls/Controls.svelte';
-	import { onMount } from 'svelte';
-	import { mobile, showCallOverlay } from '$lib/stores';
 	import CallOverlay from './MessageInput/CallOverlay.svelte';
 	import Drawer from '../common/Drawer.svelte';
+	import Overview from './Overview.svelte';
 
-	export let show = false;
-
+	export let history;
 	export let models = [];
 
 	export let chatId = null;
@@ -18,6 +21,7 @@
 	export let eventTarget: EventTarget;
 	export let submitPrompt: Function;
 	export let stopResponse: Function;
+	export let showMessage: Function;
 	export let files;
 	export let modelId;
 
@@ -42,48 +46,23 @@
 			mediaQuery.removeEventListener('change', handleMediaQuery);
 		};
 	});
+
+	onDestroy(() => {
+		showControls.set(false);
+	});
+
+	$: if (!chatId) {
+		showOverview.set(false);
+	}
 </script>
 
-{#if !largeScreen}
-	{#if $showCallOverlay}
-		<div class=" absolute w-full h-screen max-h-[100dvh] flex z-[999] overflow-hidden">
-			<div
-				class="absolute w-full h-screen max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center"
-			>
-				<CallOverlay
-					bind:files
-					{submitPrompt}
-					{stopResponse}
-					{modelId}
-					{chatId}
-					{eventTarget}
-					on:close={() => {
-						show = false;
-					}}
-				/>
-			</div>
-		</div>
-	{:else if show}
-		<Drawer bind:show>
-			<div class="  px-6 py-4 h-full">
-				<Controls
-					on:close={() => {
-						show = false;
-					}}
-					{models}
-					bind:chatFiles
-					bind:params
-				/>
-			</div>
-		</Drawer>
-	{/if}
-{:else if show}
-	<div class=" absolute bottom-0 right-0 z-20 h-full pointer-events-none">
-		<div class="pr-4 pt-14 pb-8 w-[24rem] h-full" in:slide={{ duration: 200, axis: 'x' }}>
-			<div
-				class="w-full h-full px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850 border border-gray-50 dark:border-gray-800 rounded-xl z-50 pointer-events-auto overflow-y-auto scrollbar-hidden"
-			>
-				{#if $showCallOverlay}
+<SvelteFlowProvider>
+	{#if !largeScreen}
+		{#if $showCallOverlay}
+			<div class=" absolute w-full h-screen max-h-[100dvh] flex z-[999] overflow-hidden">
+				<div
+					class="absolute w-full h-screen max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center"
+				>
 					<CallOverlay
 						bind:files
 						{submitPrompt}
@@ -92,20 +71,84 @@
 						{chatId}
 						{eventTarget}
 						on:close={() => {
-							show = false;
+							showControls.set(false);
 						}}
 					/>
-				{:else}
-					<Controls
-						on:close={() => {
-							show = false;
-						}}
-						{models}
-						bind:chatFiles
-						bind:params
-					/>
-				{/if}
+				</div>
+			</div>
+		{:else if $showControls}
+			<Drawer
+				show={$showControls}
+				on:close={() => {
+					showControls.set(false);
+				}}
+			>
+				<div class=" {$showOverview ? ' h-screen  w-screen' : 'px-6 py-4'} h-full">
+					{#if $showOverview}
+						<Overview
+							{history}
+							on:nodeclick={(e) => {
+								showMessage(e.detail.node.data.message);
+							}}
+							on:close={() => {
+								showControls.set(false);
+							}}
+						/>
+					{:else}
+						<Controls
+							on:close={() => {
+								showControls.set(false);
+							}}
+							{models}
+							bind:chatFiles
+							bind:params
+						/>
+					{/if}
+				</div>
+			</Drawer>
+		{/if}
+	{:else if $showControls}
+		<div class=" absolute bottom-0 right-0 z-20 h-full pointer-events-none">
+			<div class="pr-4 pt-14 pb-8 w-[26rem] h-full" in:slide={{ duration: 200, axis: 'x' }}>
+				<div
+					class="w-full h-full {$showOverview && !$showCallOverlay
+						? ' '
+						: 'px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850  border border-gray-50 dark:border-gray-800'}  rounded-lg z-50 pointer-events-auto overflow-y-auto scrollbar-hidden"
+				>
+					{#if $showCallOverlay}
+						<CallOverlay
+							bind:files
+							{submitPrompt}
+							{stopResponse}
+							{modelId}
+							{chatId}
+							{eventTarget}
+							on:close={() => {
+								showControls.set(false);
+							}}
+						/>
+					{:else if $showOverview}
+						<Overview
+							{history}
+							on:nodeclick={(e) => {
+								showMessage(e.detail.node.data.message);
+							}}
+							on:close={() => {
+								showControls.set(false);
+							}}
+						/>
+					{:else}
+						<Controls
+							on:close={() => {
+								showControls.set(false);
+							}}
+							{models}
+							bind:chatFiles
+							bind:params
+						/>
+					{/if}
+				</div>
 			</div>
 		</div>
-	</div>
-{/if}
+	{/if}
+</SvelteFlowProvider>

+ 18 - 15
src/lib/components/chat/MessageInput.svelte

@@ -93,20 +93,6 @@
 	const uploadFileHandler = async (file) => {
 		console.log(file);
 
-		// Check if the file is an audio file and transcribe/convert it to text file
-		if (['audio/mpeg', 'audio/wav'].includes(file['type'])) {
-			const res = await transcribeAudio(localStorage.token, file).catch((error) => {
-				toast.error(error);
-				return null;
-			});
-
-			if (res) {
-				console.log(res);
-				const blob = new Blob([res.text], { type: 'text/plain' });
-				file = blobToFile(blob, `${file.name}.txt`);
-			}
-		}
-
 		const fileItem = {
 			type: 'file',
 			file: '',
@@ -120,6 +106,23 @@
 		};
 		files = [...files, fileItem];
 
+		// Check if the file is an audio file and transcribe/convert it to text file
+		if (['audio/mpeg', 'audio/wav', 'audio/ogg'].includes(file['type'])) {
+			const res = await transcribeAudio(localStorage.token, file).catch((error) => {
+				toast.error(error);
+				return null;
+			});
+
+			if (res) {
+				console.log(res);
+				const blob = new Blob([res.text], { type: 'text/plain' });
+				file = blobToFile(blob, `${file.name}.txt`);
+
+				fileItem.name = file.name;
+				fileItem.size = file.size;
+			}
+		}
+
 		try {
 			const uploadedFile = await uploadFile(localStorage.token, file);
 
@@ -349,7 +352,7 @@
 	</div>
 
 	<div class="{transparentBackground ? 'bg-transparent' : 'bg-white dark:bg-gray-900'} ">
-		<div class="max-w-6xl px-2.5 md:px-6 mx-auto inset-x-0">
+		<div class="max-w-6xl px-2.5 md:px-6 mx-auto inset-x-0 pb-safe-bottom">
 			<div class=" pb-2">
 				<input
 					bind:this={filesInputElement}

+ 7 - 7
src/lib/components/chat/MessageInput/CallOverlay.svelte

@@ -1,15 +1,13 @@
 <script lang="ts">
 	import { config, models, settings, showCallOverlay } from '$lib/stores';
 	import { onMount, tick, getContext, onDestroy, createEventDispatcher } from 'svelte';
+	import { DropdownMenu } from 'bits-ui';
+	import Dropdown from '$lib/components/common/Dropdown.svelte';
+	import { flyAndScale } from '$lib/utils/transitions';
 
 	const dispatch = createEventDispatcher();
 
-	import {
-		blobToFile,
-		calculateSHA256,
-		extractSentencesForAudio,
-		findWordIndices
-	} from '$lib/utils';
+	import { blobToFile } from '$lib/utils';
 	import { generateEmoji } from '$lib/apis';
 	import { synthesizeOpenAISpeech, transcribeAudio } from '$lib/apis/audio';
 
@@ -360,6 +358,7 @@
 								?.at(0) ?? undefined;
 
 						currentUtterance = new SpeechSynthesisUtterance(content);
+						currentUtterance.rate = $settings.audio?.tts?.speedRate ?? 1;
 
 						if (voice) {
 							currentUtterance.voice = voice;
@@ -381,11 +380,12 @@
 	const playAudio = (audio) => {
 		if ($showCallOverlay) {
 			return new Promise((resolve) => {
-				const audioElement = document.getElementById('audioElement');
+				const audioElement = document.getElementById('audioElement') as HTMLAudioElement;
 
 				if (audioElement) {
 					audioElement.src = audio.src;
 					audioElement.muted = true;
+					audioElement.playbackRate = $settings.audio?.tts?.speedRate ?? 1;
 
 					audioElement
 						.play()

+ 1 - 1
src/lib/components/chat/MessageInput/Suggestions.svelte

@@ -9,7 +9,7 @@
 
 	let prompts = [];
 
-	$: prompts = suggestionPrompts
+	$: prompts = (suggestionPrompts ?? [])
 		.reduce((acc, current) => [...acc, ...[current]], [])
 		.sort(() => Math.random() - 0.5);
 	// suggestionPrompts.length <= 4

+ 50 - 31
src/lib/components/chat/Messages.svelte

@@ -21,6 +21,7 @@
 	export let regenerateResponse: Function;
 	export let mergeResponses: Function;
 	export let chatActionHandler: Function;
+	export let showMessage: Function = () => {};
 
 	export let user = $_user;
 	export let prompt;
@@ -108,6 +109,33 @@
 		await updateChatMessages();
 	};
 
+	const saveNewResponseMessage = async (message, content) => {
+		const responseMessageId = uuidv4();
+		const parentId = message.parentId;
+
+		const responseMessage = {
+			...message,
+			id: responseMessageId,
+			parentId: parentId,
+			childrenIds: [],
+			content: content,
+			timestamp: Math.floor(Date.now() / 1000) // Unix epoch
+		};
+
+		history.messages[responseMessageId] = responseMessage;
+		history.currentId = responseMessageId;
+
+		// Append messageId to childrenIds of parent message
+		if (parentId !== null) {
+			history.messages[parentId].childrenIds = [
+				...history.messages[parentId].childrenIds,
+				responseMessageId
+			];
+		}
+
+		await updateChatMessages();
+	};
+
 	const rateMessage = async (messageId, rating) => {
 		history.messages[messageId].annotation = {
 			...history.messages[messageId].annotation,
@@ -217,49 +245,39 @@
 
 	const deleteMessageHandler = async (messageId) => {
 		const messageToDelete = history.messages[messageId];
-
 		const parentMessageId = messageToDelete.parentId;
 		const childMessageIds = messageToDelete.childrenIds ?? [];
 
-		const hasDescendantMessages = childMessageIds.some(
-			(childId) => history.messages[childId]?.childrenIds?.length > 0
+		// Collect all grandchildren
+		const grandchildrenIds = childMessageIds.flatMap(
+			(childId) => history.messages[childId]?.childrenIds ?? []
 		);
 
-		history.currentId = parentMessageId;
-		await tick();
-
-		// Remove the message itself from the parent message's children array
-		history.messages[parentMessageId].childrenIds = history.messages[
-			parentMessageId
-		].childrenIds.filter((id) => id !== messageId);
-
-		await tick();
+		// Update parent's children
+		if (parentMessageId && history.messages[parentMessageId]) {
+			history.messages[parentMessageId].childrenIds = [
+				...history.messages[parentMessageId].childrenIds.filter((id) => id !== messageId),
+				...grandchildrenIds
+			];
+		}
 
-		childMessageIds.forEach((childId) => {
-			const childMessage = history.messages[childId];
-
-			if (childMessage && childMessage.childrenIds) {
-				if (childMessage.childrenIds.length === 0 && !hasDescendantMessages) {
-					// If there are no other responses/prompts
-					history.messages[parentMessageId].childrenIds = [];
-				} else {
-					childMessage.childrenIds.forEach((grandChildId) => {
-						if (history.messages[grandChildId]) {
-							history.messages[grandChildId].parentId = parentMessageId;
-							history.messages[parentMessageId].childrenIds.push(grandChildId);
-						}
-					});
-				}
+		// Update grandchildren's parent
+		grandchildrenIds.forEach((grandchildId) => {
+			if (history.messages[grandchildId]) {
+				history.messages[grandchildId].parentId = parentMessageId;
 			}
+		});
 
-			// Remove child message id from the parent message's children array
-			history.messages[parentMessageId].childrenIds = history.messages[
-				parentMessageId
-			].childrenIds.filter((id) => id !== childId);
+		// Delete the message and its children
+		[messageId, ...childMessageIds].forEach((id) => {
+			delete history.messages[id];
 		});
 
 		await tick();
 
+		showMessage({ id: parentMessageId });
+
+		// Update the chat
 		await updateChatById(localStorage.token, chatId, {
 			messages: messages,
 			history: history
@@ -342,6 +360,7 @@
 										{readOnly}
 										{updateChatMessages}
 										{confirmEditResponseMessage}
+										{saveNewResponseMessage}
 										{showPreviousMessage}
 										{showNextMessage}
 										{rateMessage}

+ 38 - 27
src/lib/components/chat/Messages/Citations.svelte

@@ -3,14 +3,9 @@
 
 	export let citations = [];
 
-	let showCitationModal = false;
-	let selectedCitation = null;
-</script>
+	let _citations = [];
 
-<CitationsModal bind:show={showCitationModal} citation={selectedCitation} />
-
-<div class="mt-1 mb-2 w-full flex gap-1 items-center flex-wrap">
-	{#each citations.reduce((acc, citation) => {
+	$: _citations = citations.reduce((acc, citation) => {
 		citation.document.forEach((document, index) => {
 			const metadata = citation.metadata?.[index];
 			const id = metadata?.source ?? 'N/A';
@@ -31,26 +26,42 @@
 				existingSource.document.push(document);
 				existingSource.metadata.push(metadata);
 			} else {
-				acc.push( { id: id, source: source, document: [document], metadata: metadata ? [metadata] : [] } );
+				acc.push({
+					id: id,
+					source: source,
+					document: [document],
+					metadata: metadata ? [metadata] : []
+				});
 			}
 		});
 		return acc;
-	}, []) as citation, idx}
-		<div class="flex gap-1 text-xs font-semibold">
-			<button
-				class="flex dark:text-gray-300 py-1 px-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-xl"
-				on:click={() => {
-					showCitationModal = true;
-					selectedCitation = citation;
-				}}
-			>
-				<div class="bg-white dark:bg-gray-700 rounded-full size-4">
-					{idx + 1}
-				</div>
-				<div class="flex-1 mx-2 line-clamp-1">
-					{citation.source.name}
-				</div>
-			</button>
-		</div>
-	{/each}
-</div>
+	}, []);
+
+	let showCitationModal = false;
+	let selectedCitation = null;
+</script>
+
+<CitationsModal bind:show={showCitationModal} citation={selectedCitation} />
+
+{#if _citations.length > 0}
+	<div class="mt-1 mb-2 w-full flex gap-1 items-center flex-wrap">
+		{#each _citations as citation, idx}
+			<div class="flex gap-1 text-xs font-semibold">
+				<button
+					class="flex dark:text-gray-300 py-1 px-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-xl"
+					on:click={() => {
+						showCitationModal = true;
+						selectedCitation = citation;
+					}}
+				>
+					<div class="bg-white dark:bg-gray-700 rounded-full size-4">
+						{idx + 1}
+					</div>
+					<div class="flex-1 mx-2 line-clamp-1">
+						{citation.source.name}
+					</div>
+				</button>
+			</div>
+		{/each}
+	</div>
+{/if}

+ 1 - 0
src/lib/components/chat/Messages/CitationsModal.svelte

@@ -66,6 +66,7 @@
 								>
 									{document?.metadata?.name ?? document.source.name}
 								</a>
+								{document?.metadata?.page ? `(page ${document.metadata.page + 1})` : ''}
 							</div>
 						{:else}
 							<div class="text-sm dark:text-gray-400">

+ 25 - 23
src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte

@@ -34,34 +34,36 @@
 			code={revertSanitizedResponseContent(token?.text ?? '')}
 		/>
 	{:else if token.type === 'table'}
-		<table>
-			<thead>
-				<tr>
-					{#each token.header as header, headerIdx}
-						<th style={token.align[headerIdx] ? '' : `text-align: ${token.align[headerIdx]}`}>
-							<MarkdownInlineTokens
-								id={`${id}-${tokenIdx}-header-${headerIdx}`}
-								tokens={header.tokens}
-							/>
-						</th>
-					{/each}
-				</tr>
-			</thead>
-			<tbody>
-				{#each token.rows as row, rowIdx}
+		<div class="scrollbar-hidden relative whitespace-nowrap overflow-x-auto max-w-full">
+			<table class="w-full">
+				<thead>
 					<tr>
-						{#each row ?? [] as cell, cellIdx}
-							<td style={token.align[cellIdx] ? '' : `text-align: ${token.align[cellIdx]}`}>
+						{#each token.header as header, headerIdx}
+							<th style={token.align[headerIdx] ? '' : `text-align: ${token.align[headerIdx]}`}>
 								<MarkdownInlineTokens
-									id={`${id}-${tokenIdx}-row-${rowIdx}-${cellIdx}`}
-									tokens={cell.tokens}
+									id={`${id}-${tokenIdx}-header-${headerIdx}`}
+									tokens={header.tokens}
 								/>
-							</td>
+							</th>
 						{/each}
 					</tr>
-				{/each}
-			</tbody>
-		</table>
+				</thead>
+				<tbody>
+					{#each token.rows as row, rowIdx}
+						<tr>
+							{#each row ?? [] as cell, cellIdx}
+								<td style={token.align[cellIdx] ? '' : `text-align: ${token.align[cellIdx]}`}>
+									<MarkdownInlineTokens
+										id={`${id}-${tokenIdx}-row-${rowIdx}-${cellIdx}`}
+										tokens={cell.tokens}
+									/>
+								</td>
+							{/each}
+						</tr>
+					{/each}
+				</tbody>
+			</table>
+		</div>
 	{:else if token.type === 'blockquote'}
 		<blockquote>
 			<svelte:self id={`${id}-${tokenIdx}`} tokens={token.tokens} />

+ 3 - 15
src/lib/components/chat/Messages/ProfileImage.svelte

@@ -1,23 +1,11 @@
 <script lang="ts">
 	import { settings } from '$lib/stores';
-	import { WEBUI_BASE_URL } from '$lib/constants';
+	import ProfileImageBase from './ProfileImageBase.svelte';
 
 	export let className = 'size-8';
-
-	export let src = '/user.png';
+	export let src = '';
 </script>
 
 <div class={`flex-shrink-0 ${($settings?.chatDirection ?? 'LTR') === 'LTR' ? 'mr-3' : 'ml-3'}`}>
-	<img
-		crossorigin="anonymous"
-		src={src.startsWith(WEBUI_BASE_URL) ||
-		src.startsWith('https://www.gravatar.com/avatar/') ||
-		src.startsWith('data:') ||
-		src.startsWith('/')
-			? src
-			: `/user.png`}
-		class=" {className} object-cover rounded-full -translate-y-[1px]"
-		alt="profile"
-		draggable="false"
-	/>
+	<ProfileImageBase {src} {className} />
 </div>

+ 21 - 0
src/lib/components/chat/Messages/ProfileImageBase.svelte

@@ -0,0 +1,21 @@
+<script lang="ts">
+	import { WEBUI_BASE_URL } from '$lib/constants';
+
+	export let className = 'size-8';
+	export let src = `${WEBUI_BASE_URL}/static/favicon.png`;
+</script>
+
+<img
+	crossorigin="anonymous"
+	src={src === ''
+		? `${WEBUI_BASE_URL}/static/favicon.png`
+		: src.startsWith(WEBUI_BASE_URL) ||
+			  src.startsWith('https://www.gravatar.com/avatar/') ||
+			  src.startsWith('data:') ||
+			  src.startsWith('/')
+			? src
+			: `/user.png`}
+	class=" {className} object-cover rounded-full -translate-y-[1px]"
+	alt="profile"
+	draggable="false"
+/>

+ 49 - 21
src/lib/components/chat/Messages/ResponseMessage.svelte

@@ -85,6 +85,8 @@
 
 	export let updateChatMessages: Function;
 	export let confirmEditResponseMessage: Function;
+	export let saveNewResponseMessage: Function = () => {};
+
 	export let showPreviousMessage: Function;
 	export let showNextMessage: Function;
 	export let rateMessage: Function;
@@ -202,6 +204,8 @@
 					const blob = await res.blob();
 					const blobUrl = URL.createObjectURL(blob);
 					const audio = new Audio(blobUrl);
+					audio.playbackRate = $settings.audio?.tts?.speedRate ?? 1;
+
 					audioParts[idx] = audio;
 					loadingSpeech = false;
 					lastPlayedAudioPromise = lastPlayedAudioPromise.then(() => playAudio(idx));
@@ -224,6 +228,7 @@
 					console.log(voice);
 
 					const speak = new SpeechSynthesisUtterance(message.content);
+					speak.rate = $settings.audio?.tts?.speedRate ?? 1;
 
 					console.log(speak);
 
@@ -267,6 +272,15 @@
 		await tick();
 	};
 
+	const saveNewMessageHandler = async () => {
+		saveNewResponseMessage(message, editedContent);
+
+		edit = false;
+		editedContent = '';
+
+		await tick();
+	};
+
 	const cancelEditMessage = async () => {
 		edit = false;
 		editedContent = '';
@@ -333,7 +347,7 @@
 						{#each message.files as file}
 							<div>
 								{#if file.type === 'image'}
-									<Image src={file.url} />
+									<Image src={file.url} alt={message.content} />
 								{/if}
 							</div>
 						{/each}
@@ -399,31 +413,45 @@
 										const isEnterPressed = e.key === 'Enter';
 
 										if (isCmdOrCtrlPressed && isEnterPressed) {
-											document.getElementById('save-edit-message-button')?.click();
+											document.getElementById('confirm-edit-message-button')?.click();
 										}
 									}}
 								/>
 
-								<div class=" mt-2 mb-1 flex justify-end space-x-1.5 text-sm font-medium">
-									<button
-										id="close-edit-message-button"
-										class="px-4 py-2 bg-white hover:bg-gray-100 text-gray-800 transition rounded-3xl"
-										on:click={() => {
-											cancelEditMessage();
-										}}
-									>
-										{$i18n.t('Cancel')}
-									</button>
+								<div class=" mt-2 mb-1 flex justify-between text-sm font-medium">
+									<div>
+										<button
+											id="save-new-message-button"
+											class=" px-4 py-2 bg-gray-50 hover:bg-gray-100 dark:bg-gray-800 dark:hover:bg-gray-700 border dark:border-gray-700 text-gray-700 dark:text-gray-200 transition rounded-3xl"
+											on:click={() => {
+												saveNewMessageHandler();
+											}}
+										>
+											{$i18n.t('Save As Copy')}
+										</button>
+									</div>
 
-									<button
-										id="save-edit-message-button"
-										class=" px-4 py-2 bg-gray-900 hover:bg-gray-850 text-gray-100 transition rounded-3xl"
-										on:click={() => {
-											editMessageConfirmHandler();
-										}}
-									>
-										{$i18n.t('Save')}
-									</button>
+									<div class="flex space-x-1.5">
+										<button
+											id="close-edit-message-button"
+											class="px-4 py-2 bg-white dark:bg-gray-900 hover:bg-gray-100 text-gray-800 dark:text-gray-100 transition rounded-3xl"
+											on:click={() => {
+												cancelEditMessage();
+											}}
+										>
+											{$i18n.t('Cancel')}
+										</button>
+
+										<button
+											id="confirm-edit-message-button"
+											class=" px-4 py-2 bg-gray-900 dark:bg-white hover:bg-gray-850 text-gray-100 dark:text-gray-800 transition rounded-3xl"
+											on:click={() => {
+												editMessageConfirmHandler();
+											}}
+										>
+											{$i18n.t('Save')}
+										</button>
+									</div>
 								</div>
 							</div>
 						{:else}

+ 1 - 1
src/lib/components/chat/Messages/Skeleton.svelte

@@ -1,4 +1,4 @@
-<div class="w-full mt-2 mb-4">
+<div class="w-full mt-2 mb-2">
 	<div class="animate-pulse flex w-full">
 		<div class="space-y-2 w-full">
 			<div class="h-2 bg-gray-200 dark:bg-gray-600 rounded mr-14" />

+ 1 - 1
src/lib/components/chat/Messages/UserMessage.svelte

@@ -62,7 +62,7 @@
 	};
 </script>
 
-<div class=" flex w-full user-message" dir={$settings.chatDirection}>
+<div class=" flex w-full user-message" dir={$settings.chatDirection} id="message-{message.id}">
 	{#if !($settings?.chatBubble ?? true)}
 		<ProfileImage
 			src={message.user

+ 174 - 0
src/lib/components/chat/Overview.svelte

@@ -0,0 +1,174 @@
+<script lang="ts">
+	import { getContext, createEventDispatcher, onDestroy } from 'svelte';
+	import { useSvelteFlow, useNodesInitialized, useStore } from '@xyflow/svelte';
+
+	const dispatch = createEventDispatcher();
+	const i18n = getContext('i18n');
+
+	import { onMount, tick } from 'svelte';
+
+	import { writable } from 'svelte/store';
+	import { models, showOverview, theme, user } from '$lib/stores';
+
+	import '@xyflow/svelte/dist/style.css';
+
+	import CustomNode from './Overview/Node.svelte';
+	import Flow from './Overview/Flow.svelte';
+	import XMark from '../icons/XMark.svelte';
+
+	const { width, height } = useStore();
+
+	const { fitView, getViewport } = useSvelteFlow();
+	const nodesInitialized = useNodesInitialized();
+
+	export let history;
+
+	const nodes = writable([]);
+	const edges = writable([]);
+
+	const nodeTypes = {
+		custom: CustomNode
+	};
+
+	$: if (history) {
+		drawFlow();
+	}
+
+	$: if (history?.currentId) {
+		fitView({ nodes: [{ id: history.currentId }] });
+	}
+
+	const drawFlow = async () => {
+		const nodeList = [];
+		const edgeList = [];
+		const levelOffset = 150; // Vertical spacing between layers
+		const siblingOffset = 250; // Horizontal spacing between nodes at the same layer
+
+		// Map to keep track of node positions at each level
+		let positionMap = new Map();
+
+		// Helper function to truncate labels
+		function createLabel(content) {
+			const maxLength = 100;
+			return content.length > maxLength ? content.substr(0, maxLength) + '...' : content;
+		}
+
+		// Create nodes and map children to ensure alignment in width
+		let layerWidths = {}; // Track widths of each layer
+
+		Object.keys(history.messages).forEach((id) => {
+			const message = history.messages[id];
+			const level = message.parentId ? (positionMap.get(message.parentId)?.level ?? -1) + 1 : 0;
+			if (!layerWidths[level]) layerWidths[level] = 0;
+
+			positionMap.set(id, {
+				id: message.id,
+				level,
+				position: layerWidths[level]++
+			});
+		});
+
+		// Adjust positions based on siblings count to centralize vertical spacing
+		Object.keys(history.messages).forEach((id) => {
+			const pos = positionMap.get(id);
+			const xOffset = pos.position * siblingOffset;
+			const y = pos.level * levelOffset;
+			const x = xOffset;
+
+			nodeList.push({
+				id: pos.id,
+				type: 'custom',
+				data: {
+					user: $user,
+					message: history.messages[id],
+					model: $models.find((model) => model.id === history.messages[id].model)
+				},
+				position: { x, y }
+			});
+
+			// Create edges
+			const parentId = history.messages[id].parentId;
+			if (parentId) {
+				edgeList.push({
+					id: parentId + '-' + pos.id,
+					source: parentId,
+					target: pos.id,
+					selectable: false,
+					class: ' dark:fill-gray-300 fill-gray-300',
+					type: 'smoothstep',
+					animated: history.currentId === id || recurseCheckChild(id, history.currentId)
+				});
+			}
+		});
+
+		await edges.set([...edgeList]);
+		await nodes.set([...nodeList]);
+	};
+
+	const recurseCheckChild = (nodeId, currentId) => {
+		const node = history.messages[nodeId];
+		return (
+			node.childrenIds &&
+			node.childrenIds.some((id) => id === currentId || recurseCheckChild(id, currentId))
+		);
+	};
+
+	onMount(() => {
+		drawFlow();
+
+		nodesInitialized.subscribe(async (initialized) => {
+			if (initialized) {
+				await tick();
+				const res = await fitView({ nodes: [{ id: history.currentId }] });
+			}
+		});
+
+		width.subscribe((value) => {
+			if (value) {
+				// fitView();
+				fitView({ nodes: [{ id: history.currentId }] });
+			}
+		});
+
+		height.subscribe((value) => {
+			if (value) {
+				// fitView();
+				fitView({ nodes: [{ id: history.currentId }] });
+			}
+		});
+	});
+
+	onDestroy(() => {
+		console.log('Overview destroyed');
+
+		nodes.set([]);
+		edges.set([]);
+	});
+</script>
+
+<div class="w-full h-full relative">
+	<div class=" absolute z-50 w-full flex justify-between dark:text-gray-100 px-5 py-4">
+		<div class=" text-lg font-medium self-center font-primary">{$i18n.t('Chat Overview')}</div>
+		<button
+			class="self-center"
+			on:click={() => {
+				dispatch('close');
+				showOverview.set(false);
+			}}
+		>
+			<XMark className="size-4" />
+		</button>
+	</div>
+
+	{#if $nodes.length > 0}
+		<Flow
+			{nodes}
+			{nodeTypes}
+			{edges}
+			on:nodeclick={(e) => {
+				console.log(e.detail.node.data);
+				dispatch('nodeclick', e.detail);
+			}}
+		/>
+	{/if}
+</div>

+ 36 - 0
src/lib/components/chat/Overview/Flow.svelte

@@ -0,0 +1,36 @@
+<script>
+	import { createEventDispatcher } from 'svelte';
+
+	const dispatch = createEventDispatcher();
+
+	import { theme } from '$lib/stores';
+	import { Background, Controls, SvelteFlow, BackgroundVariant } from '@xyflow/svelte';
+
+	export let nodes;
+	export let nodeTypes;
+	export let edges;
+</script>
+
+<SvelteFlow
+	{nodes}
+	{nodeTypes}
+	{edges}
+	fitView
+	minZoom={0.001}
+	colorMode={$theme.includes('dark')
+		? 'dark'
+		: $theme === 'system'
+			? window.matchMedia('(prefers-color-scheme: dark)').matches
+				? 'dark'
+				: 'light'
+			: 'light'}
+	nodesConnectable={false}
+	nodesDraggable={false}
+	on:nodeclick={(e) => dispatch('nodeclick', e.detail)}
+	oninit={() => {
+		console.log('Flow initialized');
+	}}
+>
+	<Controls showLock={false} />
+	<Background variant={BackgroundVariant.Dots} />
+</SvelteFlow>

+ 62 - 0
src/lib/components/chat/Overview/Node.svelte

@@ -0,0 +1,62 @@
+<script lang="ts">
+	import { WEBUI_BASE_URL } from '$lib/constants';
+	import { Handle, Position, type NodeProps } from '@xyflow/svelte';
+
+	import ProfileImageBase from '../Messages/ProfileImageBase.svelte';
+	import Tooltip from '$lib/components/common/Tooltip.svelte';
+
+	type $$Props = NodeProps;
+	export let data: $$Props['data'];
+</script>
+
+<div
+	class="px-4 py-3 shadow-md rounded-xl dark:bg-black bg-white border dark:border-gray-900 w-60 h-20"
+>
+	<Tooltip
+		content={data?.message?.error ? data.message.error.content : data.message.content}
+		class="w-full"
+	>
+		{#if data.message.role === 'user'}
+			<div class="flex w-full">
+				<ProfileImageBase
+					src={data.user?.profile_image_url ?? '/user.png'}
+					className={'size-5 -translate-y-[1px]'}
+				/>
+				<div class="ml-2">
+					<div class="text-xs text-black dark:text-white font-medium">
+						{data?.user?.name ?? 'User'}
+					</div>
+
+					{#if data?.message?.error}
+						<div class="text-red-500 line-clamp-2 text-xs mt-0.5">{data.message.error.content}</div>
+					{:else}
+						<div class="text-gray-500 line-clamp-2 text-xs mt-0.5">{data.message.content}</div>
+					{/if}
+				</div>
+			</div>
+		{:else}
+			<div class="flex w-full">
+				<ProfileImageBase
+					src={data?.model?.info?.meta?.profile_image_url ?? ''}
+					className={'size-5 -translate-y-[1px]'}
+				/>
+
+				<div class="ml-2">
+					<div class="text-xs text-black dark:text-white font-medium">
+						{data?.model?.name ?? data?.message?.model ?? 'Assistant'}
+					</div>
+
+					{#if data?.message?.error}
+						<div class="text-red-500 line-clamp-2 text-xs mt-0.5">
+							{data.message.error.content}
+						</div>
+					{:else}
+						<div class="text-gray-500 line-clamp-2 text-xs mt-0.5">{data.message.content}</div>
+					{/if}
+				</div>
+			</div>
+		{/if}
+	</Tooltip>
+	<Handle type="target" position={Position.Top} class="w-2 rounded-full dark:bg-gray-900" />
+	<Handle type="source" position={Position.Bottom} class="w-2 rounded-full dark:bg-gray-900" />
+</div>

+ 1 - 1
src/lib/components/chat/Settings/Advanced/AdvancedParams.svelte

@@ -41,7 +41,7 @@
 	}
 </script>
 
-<div class=" space-y-1 text-xs">
+<div class=" space-y-1 text-xs pb-safe-bottom">
 	<div class=" py-0.5 w-full justify-between">
 		<div class="flex w-full justify-between">
 			<div class=" self-center text-xs font-medium">{$i18n.t('Seed')}</div>

+ 21 - 0
src/lib/components/chat/Settings/Audio.svelte

@@ -23,6 +23,10 @@
 	let voices = [];
 	let voice = '';
 
+	// Audio speed control
+	let speechRate = 1;
+	const speedOptions = [2, 1.75, 1.5, 1.25, 1, 0.75, 0.5];
+
 	const getVoices = async () => {
 		if ($config.audio.tts.engine === '') {
 			const getVoicesLoop = setInterval(async () => {
@@ -56,6 +60,7 @@
 	};
 
 	onMount(async () => {
+		speechRate = $settings.audio?.tts?.speedRate ?? 1;
 		conversationMode = $settings.conversationMode ?? false;
 		speechAutoSend = $settings.speechAutoSend ?? false;
 		responseAutoPlayback = $settings.responseAutoPlayback ?? false;
@@ -83,6 +88,7 @@
 					engine: STTEngine !== '' ? STTEngine : undefined
 				},
 				tts: {
+					speedRate: speechRate,
 					voice: voice !== '' ? voice : undefined,
 					defaultVoice: $config?.audio?.tts?.voice ?? '',
 					nonLocalVoices: $config.audio.tts.engine === '' ? nonLocalVoices : undefined
@@ -153,6 +159,21 @@
 					{/if}
 				</button>
 			</div>
+
+			<div class=" py-0.5 flex w-full justify-between">
+				<div class=" self-center text-xs font-medium">{$i18n.t('Speed Rate')}</div>
+
+				<div class="flex items-center relative">
+					<select
+						class="dark:bg-gray-900 w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
+						bind:value={speechRate}
+					>
+						{#each speedOptions as option}
+							<option value={option} selected={speechRate === option}>{option}x</option>
+						{/each}
+					</select>
+				</div>
+			</div>
 		</div>
 
 		<hr class=" dark:border-gray-850" />

+ 23 - 0
src/lib/components/chat/Settings/General.svelte

@@ -114,6 +114,29 @@
 			document.documentElement.classList.add(e);
 		});
 
+		const metaThemeColor = document.querySelector('meta[name="theme-color"]');
+		if (metaThemeColor) {
+			if (_theme.includes('system')) {
+				const systemTheme = window.matchMedia('(prefers-color-scheme: dark)').matches
+					? 'dark'
+					: 'light';
+				console.log('Setting system meta theme color: ' + systemTheme);
+				metaThemeColor.setAttribute('content', systemTheme === 'light' ? '#ffffff' : '#171717');
+			} else {
+				console.log('Setting meta theme color: ' + _theme);
+				metaThemeColor.setAttribute(
+					'content',
+					_theme === 'dark'
+						? '#171717'
+						: _theme === 'oled-dark'
+							? '#000000'
+							: _theme === 'her'
+								? '#983724'
+								: '#ffffff'
+				);
+			}
+		}
+
 		console.log(_theme);
 	};
 

+ 30 - 0
src/lib/components/chat/Settings/Interface.svelte

@@ -36,11 +36,18 @@
 	let voiceInterruption = false;
 	let hapticFeedback = false;
 
+	let streamResponse = true;
+
 	const toggleSplitLargeChunks = async () => {
 		splitLargeChunks = !splitLargeChunks;
 		saveSettings({ splitLargeChunks: splitLargeChunks });
 	};
 
+	const toggleStreamResponse = async () => {
+		streamResponse = !streamResponse;
+		saveSettings({ streamResponse: streamResponse });
+	};
+
 	const togglesScrollOnBranchChange = async () => {
 		scrollOnBranchChange = !scrollOnBranchChange;
 		saveSettings({ scrollOnBranchChange: scrollOnBranchChange });
@@ -158,6 +165,7 @@
 		userLocation = $settings.userLocation ?? false;
 
 		hapticFeedback = $settings.hapticFeedback ?? false;
+		streamResponse = $settings?.streamResponse ?? true;
 
 		defaultModelId = $settings?.models?.at(0) ?? '';
 		if ($config?.default_models) {
@@ -311,6 +319,28 @@
 				</div>
 			</div>
 
+			<div>
+				<div class=" py-0.5 flex w-full justify-between">
+					<div class=" self-center text-xs">
+						{$i18n.t('Stream Chat Response')}
+					</div>
+
+					<button
+						class="p-1 px-3 text-xs flex rounded transition"
+						on:click={() => {
+							toggleStreamResponse();
+						}}
+						type="button"
+					>
+						{#if streamResponse === true}
+							<span class="ml-2 self-center">{$i18n.t('On')}</span>
+						{:else}
+							<span class="ml-2 self-center">{$i18n.t('Off')}</span>
+						{/if}
+					</button>
+				</div>
+			</div>
+
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
 					<div class=" self-center text-xs">

+ 4 - 4
src/lib/components/common/ConfirmDialog.svelte

@@ -49,20 +49,20 @@
 	<!-- svelte-ignore a11y-no-static-element-interactions -->
 	<div
 		bind:this={modalElement}
-		class=" fixed top-0 right-0 left-0 bottom-0 bg-black/60 w-full min-h-screen h-screen flex justify-center z-[9999] overflow-hidden overscroll-contain"
+		class=" fixed top-0 right-0 left-0 bottom-0 bg-black/60 w-full h-screen max-h-[100dvh] flex justify-center z-[9999] overflow-hidden overscroll-contain"
 		in:fade={{ duration: 10 }}
 		on:mousedown={() => {
 			show = false;
 		}}
 	>
 		<div
-			class=" m-auto rounded-2xl max-w-full w-[32rem] mx-2 bg-gray-50 dark:bg-gray-950 shadow-3xl border border-gray-850"
+			class=" m-auto rounded-2xl max-w-full w-[32rem] mx-2 bg-gray-50 dark:bg-gray-950 max-h-[100dvh] shadow-3xl border border-gray-850"
 			in:flyAndScale
 			on:mousedown={(e) => {
 				e.stopPropagation();
 			}}
 		>
-			<div class="px-[1.75rem] py-6">
+			<div class="px-[1.75rem] py-6 flex flex-col">
 				<div class=" text-lg font-semibold dark:text-gray-200 mb-2.5">
 					{#if title !== ''}
 						{title}
@@ -72,7 +72,7 @@
 				</div>
 
 				<slot>
-					<div class=" text-sm text-gray-500">
+					<div class=" text-sm text-gray-500 flex-1">
 						{#if message !== ''}
 							{message}
 						{:else}

+ 12 - 3
src/lib/components/common/Drawer.svelte

@@ -3,6 +3,8 @@
 	import { flyAndScale } from '$lib/utils/transitions';
 	import { fade, fly, slide } from 'svelte/transition';
 
+	const dispatch = createEventDispatcher();
+
 	export let show = false;
 	export let size = 'md';
 
@@ -42,15 +44,22 @@
 		window.addEventListener('keydown', handleKeyDown);
 		document.body.style.overflow = 'hidden';
 	} else if (modalElement) {
+		dispatch('close');
 		window.removeEventListener('keydown', handleKeyDown);
-		document.body.removeChild(modalElement);
-		document.body.style.overflow = 'unset';
+
+		if (document.body.contains(modalElement)) {
+			document.body.removeChild(modalElement);
+			document.body.style.overflow = 'unset';
+		}
 	}
 
 	onDestroy(() => {
 		show = false;
 		if (modalElement) {
-			document.body.removeChild(modalElement);
+			if (document.body.contains(modalElement)) {
+				document.body.removeChild(modalElement);
+				document.body.style.overflow = 'unset';
+			}
 		}
 	});
 </script>

+ 3 - 3
src/lib/components/common/ImagePreview.svelte

@@ -9,14 +9,14 @@
 
 	let previewElement = null;
 
-	const downloadImage = (url, filename) => {
+	const downloadImage = (url, filename, prefixName = '') => {
 		fetch(url)
 			.then((response) => response.blob())
 			.then((blob) => {
 				const objectUrl = window.URL.createObjectURL(blob);
 				const link = document.createElement('a');
 				link.href = objectUrl;
-				link.download = filename;
+				link.download = `${prefixName}${filename}`;
 				document.body.appendChild(link);
 				link.click();
 				document.body.removeChild(link);
@@ -87,7 +87,7 @@
 				<button
 					class=" p-5"
 					on:click={() => {
-						downloadImage(src, src.substring(src.lastIndexOf('/') + 1));
+						downloadImage(src, src.substring(src.lastIndexOf('/') + 1), alt);
 					}}
 				>
 					<svg

+ 19 - 0
src/lib/components/icons/ArrowUpCircle.svelte

@@ -0,0 +1,19 @@
+<script lang="ts">
+	export let className = 'size-4';
+	export let strokeWidth = '1.5';
+</script>
+
+<svg
+	xmlns="http://www.w3.org/2000/svg"
+	fill="none"
+	viewBox="0 0 24 24"
+	stroke-width={strokeWidth}
+	stroke="currentColor"
+	class={className}
+>
+	<path
+		stroke-linecap="round"
+		stroke-linejoin="round"
+		d="m15 11.25-3-3m0 0-3 3m3-3v7.5M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Z"
+	/>
+</svg>

+ 19 - 0
src/lib/components/icons/Clipboard.svelte

@@ -0,0 +1,19 @@
+<script lang="ts">
+	export let className = 'size-4';
+	export let strokeWidth = '2';
+</script>
+
+<svg
+	xmlns="http://www.w3.org/2000/svg"
+	fill="none"
+	viewBox="0 0 24 24"
+	stroke-width={strokeWidth}
+	stroke="currentColor"
+	class={className}
+>
+	<path
+		stroke-linecap="round"
+		stroke-linejoin="round"
+		d="M15.666 3.888A2.25 2.25 0 0 0 13.5 2.25h-3c-1.03 0-1.9.693-2.166 1.638m7.332 0c.055.194.084.4.084.612v0a.75.75 0 0 1-.75.75H9a.75.75 0 0 1-.75-.75v0c0-.212.03-.418.084-.612m7.332 0c.646.049 1.288.11 1.927.184 1.1.128 1.907 1.077 1.907 2.185V19.5a2.25 2.25 0 0 1-2.25 2.25H6.75A2.25 2.25 0 0 1 4.5 19.5V6.257c0-1.108.806-2.057 1.907-2.185a48.208 48.208 0 0 1 1.927-.184"
+	/>
+</svg>

+ 19 - 0
src/lib/components/icons/Map.svelte

@@ -0,0 +1,19 @@
+<script lang="ts">
+	export let className = 'size-4';
+	export let strokeWidth = '2';
+</script>
+
+<svg
+	xmlns="http://www.w3.org/2000/svg"
+	fill="none"
+	viewBox="0 0 24 24"
+	stroke-width={strokeWidth}
+	stroke="currentColor"
+	class={className}
+>
+	<path
+		stroke-linecap="round"
+		stroke-linejoin="round"
+		d="M9 6.75V15m6-6v8.25m.503 3.498 4.875-2.437c.381-.19.622-.58.622-1.006V4.82c0-.836-.88-1.38-1.628-1.006l-3.869 1.934c-.317.159-.69.159-1.006 0L9.503 3.252a1.125 1.125 0 0 0-1.006 0L3.622 5.689C3.24 5.88 3 6.27 3 6.695V19.18c0 .836.88 1.38 1.628 1.006l3.869-1.934c.317-.159.69-.159 1.006 0l4.994 2.497c.317.158.69.158 1.006 0Z"
+	/>
+</svg>

+ 18 - 17
src/lib/components/layout/Navbar.svelte

@@ -8,7 +8,7 @@
 		mobile,
 		settings,
 		showArchivedChats,
-		showSettings,
+		showControls,
 		showSidebar,
 		user
 	} from '$lib/stores';
@@ -22,6 +22,7 @@
 	import UserMenu from './Sidebar/UserMenu.svelte';
 	import MenuLines from '../icons/MenuLines.svelte';
 	import AdjustmentsHorizontal from '../icons/AdjustmentsHorizontal.svelte';
+	import Map from '../icons/Map.svelte';
 
 	const i18n = getContext('i18n');
 
@@ -31,9 +32,7 @@
 
 	export let chat;
 	export let selectedModels;
-
 	export let showModelSelector = true;
-	export let showControls = false;
 
 	let showShareChatModal = false;
 	let showDownloadChatModal = false;
@@ -83,7 +82,7 @@
 						}}
 					>
 						<button
-							class="hidden md:flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
+							class="flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
 							id="chat-context-menu-button"
 						>
 							<div class=" m-auto self-center">
@@ -106,19 +105,21 @@
 					</Menu>
 				{/if}
 
-				<Tooltip content={$i18n.t('Controls')}>
-					<button
-						class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
-						on:click={() => {
-							showControls = !showControls;
-						}}
-						aria-label="Controls"
-					>
-						<div class=" m-auto self-center">
-							<AdjustmentsHorizontal className=" size-5" strokeWidth="0.5" />
-						</div>
-					</button>
-				</Tooltip>
+				{#if !$mobile}
+					<Tooltip content={$i18n.t('Controls')}>
+						<button
+							class=" flex cursor-pointer px-2 py-2 rounded-xl hover:bg-gray-50 dark:hover:bg-gray-850 transition"
+							on:click={() => {
+								showControls.set(!$showControls);
+							}}
+							aria-label="Controls"
+						>
+							<div class=" m-auto self-center">
+								<AdjustmentsHorizontal className=" size-5" strokeWidth="0.5" />
+							</div>
+						</button>
+					</Tooltip>
+				{/if}
 
 				<Tooltip content={$i18n.t('New Chat')}>
 					<button

+ 59 - 12
src/lib/components/layout/Navbar/Menu.svelte

@@ -5,13 +5,19 @@
 	import fileSaver from 'file-saver';
 	const { saveAs } = fileSaver;
 
-	import { showSettings } from '$lib/stores';
+	import { downloadChatAsPDF } from '$lib/apis/utils';
+	import { copyToClipboard } from '$lib/utils';
+
+	import { showOverview, showControls, mobile } from '$lib/stores';
 	import { flyAndScale } from '$lib/utils/transitions';
 
 	import Dropdown from '$lib/components/common/Dropdown.svelte';
 	import Tags from '$lib/components/chat/Tags.svelte';
-
-	import { downloadChatAsPDF } from '$lib/apis/utils';
+	import Map from '$lib/components/icons/Map.svelte';
+	import { get } from 'svelte/store';
+	import Clipboard from '$lib/components/icons/Clipboard.svelte';
+	import { toast } from 'svelte-sonner';
+	import AdjustmentsHorizontal from '$lib/components/icons/AdjustmentsHorizontal.svelte';
 
 	const i18n = getContext('i18n');
 
@@ -24,14 +30,18 @@
 	export let chat;
 	export let onClose: Function = () => {};
 
-	const downloadTxt = async () => {
+	const getChatAsText = async () => {
 		const _chat = chat.chat;
-		console.log('download', chat);
-
 		const chatText = _chat.messages.reduce((a, message, i, arr) => {
 			return `${a}### ${message.role.toUpperCase()}\n${message.content}\n\n`;
 		}, '');
 
+		return chatText.trim();
+	};
+
+	const downloadTxt = async () => {
+		const chatText = await getChatAsText();
+
 		let blob = new Blob([chatText], {
 			type: 'text/plain'
 		});
@@ -117,6 +127,48 @@
 				<div class="flex items-center">{$i18n.t('Settings')}</div>
 			</DropdownMenu.Item> -->
 
+			{#if $mobile}
+				<DropdownMenu.Item
+					class="flex gap-2 items-center px-3 py-2 text-sm  cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"
+					id="chat-controls-button"
+					on:click={async () => {
+						await showControls.set(true);
+					}}
+				>
+					<AdjustmentsHorizontal className=" size-4" strokeWidth="0.5" />
+					<div class="flex items-center">{$i18n.t('Controls')}</div>
+				</DropdownMenu.Item>
+			{/if}
+
+			<DropdownMenu.Item
+				class="flex gap-2 items-center px-3 py-2 text-sm  cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"
+				id="chat-overview-button"
+				on:click={async () => {
+					await showControls.set(true);
+					await showOverview.set(true);
+				}}
+			>
+				<Map className=" size-4" strokeWidth="1.5" />
+				<div class="flex items-center">{$i18n.t('Overview')}</div>
+			</DropdownMenu.Item>
+
+			<DropdownMenu.Item
+				class="flex gap-2 items-center px-3 py-2 text-sm  cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"
+				id="chat-copy-button"
+				on:click={async () => {
+					const res = await copyToClipboard(await getChatAsText()).catch((e) => {
+						console.error(e);
+					});
+
+					if (res) {
+						toast.success($i18n.t('Copied to clipboard'));
+					}
+				}}
+			>
+				<Clipboard className=" size-4" strokeWidth="1.5" />
+				<div class="flex items-center">{$i18n.t('Copy')}</div>
+			</DropdownMenu.Item>
+
 			<DropdownMenu.Item
 				class="flex gap-2 items-center px-3 py-2 text-sm  cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"
 				id="chat-share-button"
@@ -138,12 +190,7 @@
 				</svg>
 				<div class="flex items-center">{$i18n.t('Share')}</div>
 			</DropdownMenu.Item>
-			<!-- <DropdownMenu.Item
-					class="flex gap-2 items-center px-3 py-2 text-sm  font-medium cursor-pointer"
-					on:click={() => {
-						downloadHandler();
-					}}
-				/> -->
+
 			<DropdownMenu.Sub>
 				<DropdownMenu.SubTrigger
 					class="flex gap-2 items-center px-3 py-2 text-sm  cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"

+ 19 - 9
src/lib/components/layout/Sidebar.svelte

@@ -53,6 +53,8 @@
 	let showDeleteConfirm = false;
 	let showDropdown = false;
 
+	let selectedTagName = null;
+
 	let filteredChatList = [];
 
 	// Pagination variables
@@ -240,7 +242,7 @@
 		deleteChatHandler(deleteChat.id);
 	}}
 >
-	<div class=" text-sm text-gray-500">
+	<div class=" text-sm text-gray-500 flex-1 line-clamp-3">
 		{$i18n.t('This will delete')} <span class="  font-semibold">{deleteChat.title}</span>.
 	</div>
 </DeleteConfirmDialog>
@@ -380,11 +382,13 @@
 			</div>
 		{/if}
 
-		<div class="relative flex flex-col flex-1 overflow-y-auto">
+		<div
+			class="relative flex flex-col flex-1 overflow-y-auto {$temporaryChatEnabled
+				? 'opacity-20'
+				: ''}"
+		>
 			{#if $temporaryChatEnabled}
-				<div
-					class="absolute z-40 w-full h-full bg-gray-50/90 dark:bg-black/90 flex justify-center"
-				></div>
+				<div class="absolute z-40 w-full h-full flex justify-center"></div>
 			{/if}
 
 			<div class="px-2 mt-0.5 mb-2 flex justify-center space-x-2">
@@ -419,10 +423,13 @@
 			</div>
 
 			{#if $tags.filter((t) => t.name !== 'pinned').length > 0}
-				<div class="px-2.5 mb-2 flex gap-1 flex-wrap">
+				<div class="px-3.5 mb-1 flex gap-0.5 flex-wrap">
 					<button
-						class="px-2.5 text-xs font-medium bg-gray-50 dark:bg-gray-900 dark:hover:bg-gray-800 transition rounded-full"
+						class="px-2.5 py-[1px] text-xs transition {selectedTagName === null
+							? 'bg-gray-100 dark:bg-gray-900'
+							: ' '} rounded-md font-medium"
 						on:click={async () => {
+							selectedTagName = null;
 							await enablePagination();
 						}}
 					>
@@ -430,8 +437,11 @@
 					</button>
 					{#each $tags.filter((t) => t.name !== 'pinned') as tag}
 						<button
-							class="px-2.5 text-xs font-medium bg-gray-50 dark:bg-gray-900 dark:hover:bg-gray-800 transition rounded-full"
+							class="px-2.5 py-[1px] text-xs transition {selectedTagName === tag.name
+								? 'bg-gray-100 dark:bg-gray-900'
+								: ''}  rounded-md font-medium"
 							on:click={async () => {
+								selectedTagName = tag.name;
 								scrollPaginationEnabled.set(false);
 								let chatIds = await getChatListByTagName(localStorage.token, tag.name);
 								if (chatIds.length === 0) {
@@ -551,7 +561,7 @@
 			</div>
 		</div>
 
-		<div class="px-2.5">
+		<div class="px-2.5 pb-safe-bottom">
 			<!-- <hr class=" border-gray-900 mb-1 w-full" /> -->
 
 			<div class="flex flex-col font-primary">

+ 1 - 1
src/lib/components/workspace/Documents.svelte

@@ -55,7 +55,7 @@
 	const uploadDoc = async (file, tags?: object) => {
 		console.log(file);
 		// Check if the file is an audio file and transcribe/convert it to text file
-		if (['audio/mpeg', 'audio/wav'].includes(file['type'])) {
+		if (['audio/mpeg', 'audio/wav', 'audio/ogg'].includes(file['type'])) {
 			const transcribeRes = await transcribeAudio(localStorage.token, file).catch((error) => {
 				toast.error(error);
 				return null;

+ 56 - 1
src/lib/components/workspace/Models.svelte

@@ -94,6 +94,58 @@
 		window.addEventListener('message', messageHandler, false);
 	};
 
+	const moveToTopHandler = async (model) => {
+		// find models with position 0 and set them to 1
+		const topModels = _models.filter((m) => m.info?.meta?.position === 0);
+		for (const m of topModels) {
+			let info = m.info;
+			if (!info) {
+				info = {
+					id: m.id,
+					name: m.name,
+					meta: {
+						position: 1
+					},
+					params: {}
+				};
+			}
+
+			info.meta = {
+				...info.meta,
+				position: 1
+			};
+
+			await updateModelById(localStorage.token, info.id, info);
+		}
+
+		let info = model.info;
+
+		if (!info) {
+			info = {
+				id: model.id,
+				name: model.name,
+				meta: {
+					position: 0
+				},
+				params: {}
+			};
+		}
+
+		info.meta = {
+			...info.meta,
+			position: 0
+		};
+
+		const res = await updateModelById(localStorage.token, info.id, info);
+
+		if (res) {
+			toast.success($i18n.t(`Model {{name}} is now at the top`, { name: info.id }));
+		}
+
+		await models.set(await getModels(localStorage.token));
+		_models = $models;
+	};
+
 	const hideModelHandler = async (model) => {
 		let info = model.info;
 
@@ -322,7 +374,7 @@
 			>
 				<div class=" self-start w-8 pt-0.5">
 					<div
-						class=" rounded-full bg-stone-700 {(model?.info?.meta?.hidden ?? false)
+						class=" rounded-full object-cover {(model?.info?.meta?.hidden ?? false)
 							? 'brightness-90 dark:brightness-50'
 							: ''} "
 					>
@@ -440,6 +492,9 @@
 						exportHandler={() => {
 							exportModelHandler(model);
 						}}
+						moveToTopHandler={() => {
+							moveToTopHandler(model);
+						}}
 						hideHandler={() => {
 							hideModelHandler(model);
 						}}

+ 44 - 0
src/lib/components/workspace/Models/Capabilities.svelte

@@ -0,0 +1,44 @@
+<script lang="ts">
+	import { getContext } from 'svelte';
+	import Checkbox from '$lib/components/common/Checkbox.svelte';
+	import Tooltip from '$lib/components/common/Tooltip.svelte';
+	import { marked } from 'marked';
+
+	const i18n = getContext('i18n');
+
+	const helpText = {
+		vision: $i18n.t('Model accepts image inputs'),
+		usage: $i18n.t(
+			'Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.'
+		)
+	};
+
+	export let capabilities: {
+		vision?: boolean;
+		usage?: boolean;
+	} = {};
+</script>
+
+<div>
+	<div class="flex w-full justify-between mb-1">
+		<div class=" self-center text-sm font-semibold">{$i18n.t('Capabilities')}</div>
+	</div>
+	<div class="flex flex-col">
+		{#each Object.keys(capabilities) as capability}
+			<div class=" flex items-center gap-2">
+				<Checkbox
+					state={capabilities[capability] ? 'checked' : 'unchecked'}
+					on:change={(e) => {
+						capabilities[capability] = e.detail === 'checked';
+					}}
+				/>
+
+				<div class=" py-0.5 text-sm capitalize">
+					<Tooltip content={marked.parse(helpText[capability])}>
+						{$i18n.t(capability)}
+					</Tooltip>
+				</div>
+			</div>
+		{/each}
+	</div>
+</div>

+ 13 - 0
src/lib/components/workspace/Models/ModelMenu.svelte

@@ -12,6 +12,7 @@
 	import ArchiveBox from '$lib/components/icons/ArchiveBox.svelte';
 	import DocumentDuplicate from '$lib/components/icons/DocumentDuplicate.svelte';
 	import ArrowDownTray from '$lib/components/icons/ArrowDownTray.svelte';
+	import ArrowUpCircle from '$lib/components/icons/ArrowUpCircle.svelte';
 
 	const i18n = getContext('i18n');
 
@@ -21,6 +22,7 @@
 	export let cloneHandler: Function;
 	export let exportHandler: Function;
 
+	export let moveToTopHandler: Function;
 	export let hideHandler: Function;
 	export let deleteHandler: Function;
 	export let onClose: Function;
@@ -80,6 +82,17 @@
 				<div class="flex items-center">{$i18n.t('Export')}</div>
 			</DropdownMenu.Item>
 
+			<DropdownMenu.Item
+				class="flex gap-2 items-center px-3 py-2 text-sm  font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"
+				on:click={() => {
+					moveToTopHandler();
+				}}
+			>
+				<ArrowUpCircle />
+
+				<div class="flex items-center">{$i18n.t('Move to Top')}</div>
+			</DropdownMenu.Item>
+
 			<DropdownMenu.Item
 				class="flex  gap-2  items-center px-3 py-2 text-sm  font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"
 				on:click={() => {

+ 19 - 0
src/lib/i18n/locales/ar-BH/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 الرابط الرئيسي",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 الرابط مطلوب",
+	"Available list": "",
 	"available!": "متاح",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "خلف",
 	"Bad Response": "استجابة خطاء",
 	"Banners": "لافتات",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "UI الدردشة",
 	"Chat Controls": "",
 	"Chat direction": "اتجاه المحادثة",
+	"Chat Overview": "",
 	"Chats": "المحادثات",
 	"Check Again": "تحقق مرة اخرى",
 	"Check for updates": "تحقق من التحديثات",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "ادخل معلومات عنك تريد أن يتذكرها الموديل",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "أدخل مفتاح واجهة برمجة تطبيقات البحث الشجاع",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "أدخل الChunk Overlap",
 	"Enter Chunk Size": "أدخل Chunk الحجم",
 	"Enter Github Raw URL": "أدخل عنوان URL ل Github Raw",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "(e.g. {{modelTag}}) أدخل الموديل تاق",
 	"Enter Number of Steps (e.g. 50)": "(e.g. 50) أدخل عدد الخطوات",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "أدخل النتيجة",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "لم يتم العثور على النموذج {{modelId}}.",
 	"Model {{modelName}} is not vision capable": "نموذج {{modelName}} غير قادر على الرؤية",
 	"Model {{name}} is now {{status}}": "نموذج {{name}} هو الآن {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "تم اكتشاف مسار نظام الملفات النموذجي. الاسم المختصر للنموذج مطلوب للتحديث، ولا يمكن الاستمرار.",
 	"Model ID": "رقم الموديل",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "محتوى الملف النموذجي",
 	"Models": "الموديلات",
 	"More": "المزيد",
+	"Move to Top": "",
 	"Name": "الأسم",
 	"Name Tag": "أسم التاق",
 	"Name your model": "قم بتسمية النموذج الخاص بك",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "URL/مفتاح OpenAI.مطلوب عنوان ",
 	"or": "أو",
 	"Other": "آخر",
+	"Output format": "",
+	"Overview": "",
 	"Password": "الباسورد",
 	"PDF document (.pdf)": "PDF ملف (.pdf)",
 	"PDF Extract Images (OCR)": "PDF أستخرج الصور (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "حفظ",
 	"Save & Create": "حفظ وإنشاء",
 	"Save & Update": "حفظ وتحديث",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "لم يعد حفظ سجلات الدردشة مباشرة في مساحة تخزين متصفحك مدعومًا. يرجى تخصيص بعض الوقت لتنزيل وحذف سجلات الدردشة الخاصة بك عن طريق النقر على الزر أدناه. لا تقلق، يمكنك بسهولة إعادة استيراد سجلات الدردشة الخاصة بك إلى الواجهة الخلفية من خلاله",
 	"Scan": "مسح",
@@ -572,16 +585,20 @@
 	"Send": "تم",
 	"Send a Message": "يُرجى إدخال طلبك هنا",
 	"Send message": "يُرجى إدخال طلبك هنا.",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "سبتمبر",
 	"Serper API Key": "مفتاح واجهة برمجة تطبيقات سيربر",
 	"Serply API Key": "",
 	"Serpstack API Key": "مفتاح واجهة برمجة تطبيقات Serpstack",
 	"Server connection verified": "تم التحقق من اتصال الخادم",
 	"Set as default": "الافتراضي",
+	"Set CFG Scale": "",
 	"Set Default Model": "تفعيد الموديل الافتراضي",
 	"Set embedding model (e.g. {{model}})": "ضبط نموذج المتجهات (على سبيل المثال: {{model}})",
 	"Set Image Size": "حجم الصورة",
 	"Set reranking model (e.g. {{model}})": "ضبط نموذج إعادة الترتيب (على سبيل المثال: {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "ضبط الخطوات",
 	"Set Task Model": "تعيين نموذج المهمة",
 	"Set Voice": "ضبط الصوت",
@@ -604,7 +621,9 @@
 	"Source": "المصدر",
 	"Speech recognition error: {{error}}": "{{error}} خطأ في التعرف على الكلام",
 	"Speech-to-Text Engine": "محرك تحويل الكلام إلى نص",
+	"Speed Rate": "",
 	"Stop Sequence": "وقف التسلسل",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT اعدادات",
 	"Submit": "إرسال",

+ 19 - 0
src/lib/i18n/locales/bg-BG/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 Базов URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 Базов URL е задължителен.",
+	"Available list": "",
 	"available!": "наличен!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Назад",
 	"Bad Response": "Невалиден отговор от API",
 	"Banners": "Банери",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "UI за чат бублон",
 	"Chat Controls": "",
 	"Chat direction": "Направление на чата",
+	"Chat Overview": "",
 	"Chats": "Чатове",
 	"Check Again": "Проверете Още Веднъж",
 	"Check for updates": "Проверка за актуализации",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Въведете подробности за себе си, за да се herinnerат вашите LLMs",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Въведете Brave Search API ключ",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Въведете Chunk Overlap",
 	"Enter Chunk Size": "Въведете Chunk Size",
 	"Enter Github Raw URL": "Въведете URL адреса на Github Raw",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Въведете таг на модел (напр. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Въведете брой стъпки (напр. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Въведете оценка",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Моделът {{modelId}} не е намерен",
 	"Model {{modelName}} is not vision capable": "Моделът {{modelName}} не може да се вижда",
 	"Model {{name}} is now {{status}}": "Моделът {{name}} сега е {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Открит е път до файловата система на модела. За актуализацията се изисква съкратено име на модела, не може да продължи.",
 	"Model ID": "ИД на модел",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Съдържание на модфайл",
 	"Models": "Модели",
 	"More": "Повече",
+	"Move to Top": "",
 	"Name": "Име",
 	"Name Tag": "Име Таг",
 	"Name your model": "Дайте име на вашия модел",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/Key е задължителен.",
 	"or": "или",
 	"Other": "Other",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Парола",
 	"PDF document (.pdf)": "PDF документ (.pdf)",
 	"PDF Extract Images (OCR)": "PDF Extract Images (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Запис",
 	"Save & Create": "Запис & Създаване",
 	"Save & Update": "Запис & Актуализиране",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Запазването на чат логове директно в хранилището на вашия браузър вече не се поддържа. Моля, отделете малко време, за да изтеглите и изтриете чат логовете си, като щракнете върху бутона по-долу. Не се притеснявайте, можете лесно да импортирате отново чат логовете си в бекенда чрез",
 	"Scan": "Сканиране",
@@ -568,16 +581,20 @@
 	"Send": "Изпрати",
 	"Send a Message": "Изпращане на Съобщение",
 	"Send message": "Изпращане на съобщение",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "Септември",
 	"Serper API Key": "Serper API ключ",
 	"Serply API Key": "",
 	"Serpstack API Key": "Serpstack API ключ",
 	"Server connection verified": "Server connection verified",
 	"Set as default": "Задай по подразбиране",
+	"Set CFG Scale": "",
 	"Set Default Model": "Задай Модел По Подразбиране",
 	"Set embedding model (e.g. {{model}})": "Задай embedding model (e.g. {{model}})",
 	"Set Image Size": "Задай Размер на Изображението",
 	"Set reranking model (e.g. {{model}})": "Задай reranking model (e.g. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Задай Стъпки",
 	"Set Task Model": "Задаване на модел на задача",
 	"Set Voice": "Задай Глас",
@@ -600,7 +617,9 @@
 	"Source": "Източник",
 	"Speech recognition error: {{error}}": "Speech recognition error: {{error}}",
 	"Speech-to-Text Engine": "Speech-to-Text Engine",
+	"Speed Rate": "",
 	"Stop Sequence": "Stop Sequence",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT Настройки",
 	"Submit": "Изпращане",

+ 19 - 0
src/lib/i18n/locales/bn-BD/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 বেজ ইউআরএল",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 বেজ ইউআরএল আবশ্যক",
+	"Available list": "",
 	"available!": "উপলব্ধ!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "পেছনে",
 	"Bad Response": "খারাপ প্রতিক্রিয়া",
 	"Banners": "ব্যানার",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "চ্যাট বাবল UI",
 	"Chat Controls": "",
 	"Chat direction": "চ্যাট দিকনির্দেশ",
+	"Chat Overview": "",
 	"Chats": "চ্যাটসমূহ",
 	"Check Again": "আবার চেক করুন",
 	"Check for updates": "নতুন আপডেট আছে কিনা চেক করুন",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "আপনার এলএলএমগুলি স্মরণ করার জন্য নিজের সম্পর্কে একটি বিশদ লিখুন",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "সাহসী অনুসন্ধান API কী লিখুন",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "চাঙ্ক ওভারল্যাপ লিখুন",
 	"Enter Chunk Size": "চাংক সাইজ লিখুন",
 	"Enter Github Raw URL": "গিটহাব কাঁচা URL লিখুন",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "মডেল ট্যাগ লিখুন (e.g. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "ধাপের সংখ্যা দিন (যেমন: 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "স্কোর দিন",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "{{modelId}} মডেল পাওয়া যায়নি",
 	"Model {{modelName}} is not vision capable": "মডেল {{modelName}} দৃষ্টি সক্ষম নয়",
 	"Model {{name}} is now {{status}}": "মডেল {{name}} এখন {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "মডেল ফাইলসিস্টেম পাথ পাওয়া গেছে। আপডেটের জন্য মডেলের শর্টনেম আবশ্যক, এগিয়ে যাওয়া যাচ্ছে না।",
 	"Model ID": "মডেল ID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "মডেলফাইল কনটেন্ট",
 	"Models": "মডেলসমূহ",
 	"More": "আরো",
+	"Move to Top": "",
 	"Name": "নাম",
 	"Name Tag": "নামের ট্যাগ",
 	"Name your model": "আপনার মডেলের নাম দিন",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/Key আবশ্যক",
 	"or": "অথবা",
 	"Other": "অন্যান্য",
+	"Output format": "",
+	"Overview": "",
 	"Password": "পাসওয়ার্ড",
 	"PDF document (.pdf)": "PDF ডকুমেন্ট (.pdf)",
 	"PDF Extract Images (OCR)": "পিডিএফ এর ছবি থেকে লেখা বের করুন (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "সংরক্ষণ",
 	"Save & Create": "সংরক্ষণ এবং তৈরি করুন",
 	"Save & Update": "সংরক্ষণ এবং আপডেট করুন",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "মাধ্যমে",
 	"Scan": "স্ক্যান",
@@ -568,16 +581,20 @@
 	"Send": "পাঠান",
 	"Send a Message": "একটি মেসেজ পাঠান",
 	"Send message": "মেসেজ পাঠান",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "সেপ্টেম্বর",
 	"Serper API Key": "Serper API Key",
 	"Serply API Key": "",
 	"Serpstack API Key": "Serpstack API Key",
 	"Server connection verified": "সার্ভার কানেকশন যাচাই করা হয়েছে",
 	"Set as default": "ডিফল্ট হিসেবে নির্ধারণ করুন",
+	"Set CFG Scale": "",
 	"Set Default Model": "ডিফল্ট মডেল নির্ধারণ করুন",
 	"Set embedding model (e.g. {{model}})": "ইমেম্বিং মডেল নির্ধারণ করুন (উদাহরণ {{model}})",
 	"Set Image Size": "ছবির সাইজ নির্ধারণ করুন",
 	"Set reranking model (e.g. {{model}})": "রি-র্যাংকিং মডেল নির্ধারণ করুন (উদাহরণ {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "পরবর্তী ধাপসমূহ",
 	"Set Task Model": "টাস্ক মডেল সেট করুন",
 	"Set Voice": "কন্ঠস্বর নির্ধারণ করুন",
@@ -600,7 +617,9 @@
 	"Source": "উৎস",
 	"Speech recognition error: {{error}}": "স্পিচ রিকগনিশনে সমস্যা: {{error}}",
 	"Speech-to-Text Engine": "স্পিচ-টু-টেক্সট ইঞ্জিন",
+	"Speed Rate": "",
 	"Stop Sequence": "সিকোয়েন্স থামান",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT সেটিংস",
 	"Submit": "সাবমিট",

+ 21 - 2
src/lib/i18n/locales/ca-ES/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "Cadena d'autenticació de l'API d'AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL": "URL Base d'AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "Es requereix l'URL Base d'AUTOMATIC1111.",
+	"Available list": "",
 	"available!": "disponible!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Enrere",
 	"Bad Response": "Resposta errònia",
 	"Banners": "Banners",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Chat Bubble UI",
 	"Chat Controls": "Controls de xat",
 	"Chat direction": "Direcció del xat",
+	"Chat Overview": "",
 	"Chats": "Xats",
 	"Check Again": "Comprovar-ho de nou",
 	"Check for updates": "Comprovar si hi ha actualitzacions",
@@ -230,7 +234,7 @@
 	"Enable Message Rating": "Permetre la qualificació de missatges",
 	"Enable New Sign Ups": "Permetre nous registres",
 	"Enable Web Search": "Activar la cerca web",
-	"Enable Web Search Query Generation": "",
+	"Enable Web Search Query Generation": "Activa la generació de consultes de cerca web",
 	"Enabled": "Habilitat",
 	"Engine": "Motor",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assegura't que els teus fitxers CSV inclouen 4 columnes en aquest ordre: Nom, Correu electrònic, Contrasenya, Rol.",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Introdueix un detall sobre tu què els teus models de llenguatge puguin recordar",
 	"Enter api auth string (e.g. username:password)": "Entra la cadena d'autenticació api (p. ex. nom d'usuari:contrasenya)",
 	"Enter Brave Search API Key": "Introdueix la clau API de Brave Search",
+	"Enter CFG Scale (e.g. 7.0)": "Entra l'escala CFG (p.ex. 7.0)",
 	"Enter Chunk Overlap": "Introdueix la mida de solapament de blocs",
 	"Enter Chunk Size": "Introdueix la mida del bloc",
 	"Enter Github Raw URL": "Introdueix l'URL en brut de Github",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "Introdueix l'identificador del model",
 	"Enter model tag (e.g. {{modelTag}})": "Introdueix l'etiqueta del model (p. ex. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Introdueix el nombre de passos (p. ex. 50)",
+	"Enter Sampler (e.g. Euler a)": "Introdueix el mostrejador (p.ex. Euler a)",
+	"Enter Scheduler (e.g. Karras)": "Entra el programador (p.ex. Karras)",
 	"Enter Score": "Introdueix la puntuació",
 	"Enter SearchApi API Key": "Introdueix la clau API SearchApi",
 	"Enter SearchApi Engine": "Introdueix el motor SearchApi",
@@ -368,7 +375,7 @@
 	"Last Active": "Activitat recent",
 	"Last Modified": "Modificació",
 	"Leave empty for unlimited": "Deixar-ho buit per il·limitat",
-	"Leave empty to use the default prompt, or enter a custom prompt": "",
+	"Leave empty to use the default prompt, or enter a custom prompt": "Deixa-ho en blanc per utilitzar la indicació predeterminada o introdueix una indicació personalitzada",
 	"Light": "Clar",
 	"Listening...": "Escoltant...",
 	"LLMs can make mistakes. Verify important information.": "Els models de llenguatge poden cometre errors. Verifica la informació important.",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "No s'ha trobat el model {{modelId}}",
 	"Model {{modelName}} is not vision capable": "El model {{modelName}} no és capaç de visió",
 	"Model {{name}} is now {{status}}": "El model {{name}} ara és {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "El model accepta entrades d'imatge",
 	"Model created successfully!": "Model creat correctament",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "S'ha detectat el camí del sistema de fitxers del model. És necessari un nom curt del model per actualitzar, no es pot continuar.",
 	"Model ID": "Identificador del model",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Contingut del Modelfile",
 	"Models": "Models",
 	"More": "Més",
+	"Move to Top": "",
 	"Name": "Nom",
 	"Name Tag": "Etiqueta de nom",
 	"Name your model": "Posa un nom al teu model",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "URL/Clau d'OpenAI requerides.",
 	"or": "o",
 	"Other": "Altres",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Contrasenya",
 	"PDF document (.pdf)": "Document PDF (.pdf)",
 	"PDF Extract Images (OCR)": "Extreu imatges del PDF (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Desar",
 	"Save & Create": "Desar i crear",
 	"Save & Update": "Desar i actualitzar",
+	"Save As Copy": "",
 	"Save Tag": "Desar l'etiqueta",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Desar els registres de xat directament a l'emmagatzematge del teu navegador ja no està suportat. Si us plau, descarregr i elimina els registres de xat fent clic al botó de sota. No et preocupis, pots tornar a importar fàcilment els teus registres de xat al backend a través de",
 	"Scan": "Escanejar",
@@ -569,16 +582,20 @@
 	"Send": "Enviar",
 	"Send a Message": "Enviar un missatge",
 	"Send message": "Enviar missatge",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Envia `stream_options: { include_usage: true }` a la sol·licitud.\nEls proveïdors compatibles retornaran la informació d'ús del token a la resposta quan s'estableixi.",
 	"September": "Setembre",
 	"Serper API Key": "Clau API de Serper",
 	"Serply API Key": "Clau API de Serply",
 	"Serpstack API Key": "Clau API de Serpstack",
 	"Server connection verified": "Connexió al servidor verificada",
 	"Set as default": "Establir com a predeterminat",
+	"Set CFG Scale": "Establir l'escala CFG",
 	"Set Default Model": "Establir el model predeterminat",
 	"Set embedding model (e.g. {{model}})": "Establir el model d'incrustació (p.ex. {{model}})",
 	"Set Image Size": "Establir la mida de la image",
 	"Set reranking model (e.g. {{model}})": "Establir el model de reavaluació (p.ex. {{model}})",
+	"Set Sampler": "Establir el mostrejador",
+	"Set Scheduler": "Establir el programador",
 	"Set Steps": "Establir el nombre de passos",
 	"Set Task Model": "Establir el model de tasca",
 	"Set Voice": "Establir la veu",
@@ -601,7 +618,9 @@
 	"Source": "Font",
 	"Speech recognition error: {{error}}": "Error de reconeixement de veu: {{error}}",
 	"Speech-to-Text Engine": "Motor de veu a text",
+	"Speed Rate": "",
 	"Stop Sequence": "Atura la seqüència",
+	"Stream Chat Response": "",
 	"STT Model": "Model SST",
 	"STT Settings": "Preferències de STT",
 	"Submit": "Enviar",

+ 19 - 0
src/lib/i18n/locales/ceb-PH/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "Base URL AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "Ang AUTOMATIC1111 base URL gikinahanglan.",
+	"Available list": "",
 	"available!": "magamit!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Balik",
 	"Bad Response": "",
 	"Banners": "",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "",
 	"Chat Controls": "",
 	"Chat direction": "",
+	"Chat Overview": "",
 	"Chats": "Mga panaghisgot",
 	"Check Again": "Susiha pag-usab",
 	"Check for updates": "Susiha ang mga update",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Pagsulod sa block overlap",
 	"Enter Chunk Size": "Isulod ang block size",
 	"Enter Github Raw URL": "",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Pagsulod sa template tag (e.g. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Pagsulod sa gidaghanon sa mga lakang (e.g. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Modelo {{modelId}} wala makit-an",
 	"Model {{modelName}} is not vision capable": "",
 	"Model {{name}} is now {{status}}": "",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "",
 	"Model ID": "",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Mga sulod sa template file",
 	"Models": "Mga modelo",
 	"More": "",
+	"Move to Top": "",
 	"Name": "Ngalan",
 	"Name Tag": "Tag sa ngalan",
 	"Name your model": "",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "",
 	"or": "O",
 	"Other": "",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Password",
 	"PDF document (.pdf)": "",
 	"PDF Extract Images (OCR)": "PDF Image Extraction (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Tipigi",
 	"Save & Create": "I-save ug Paghimo",
 	"Save & Update": "I-save ug I-update",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Ang pag-save sa mga chat log direkta sa imong browser storage dili na suportado. ",
 	"Scan": "Aron ma-scan",
@@ -568,16 +581,20 @@
 	"Send": "",
 	"Send a Message": "Magpadala ug mensahe",
 	"Send message": "Magpadala ug mensahe",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "",
 	"Serper API Key": "",
 	"Serply API Key": "",
 	"Serpstack API Key": "",
 	"Server connection verified": "Gipamatud-an nga koneksyon sa server",
 	"Set as default": "Define pinaagi sa default",
+	"Set CFG Scale": "",
 	"Set Default Model": "Ibutang ang default template",
 	"Set embedding model (e.g. {{model}})": "",
 	"Set Image Size": "Ibutang ang gidak-on sa hulagway",
 	"Set reranking model (e.g. {{model}})": "",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Ipasabot ang mga lakang",
 	"Set Task Model": "",
 	"Set Voice": "Ibutang ang tingog",
@@ -600,7 +617,9 @@
 	"Source": "Tinubdan",
 	"Speech recognition error: {{error}}": "Sayop sa pag-ila sa tingog: {{error}}",
 	"Speech-to-Text Engine": "Engine sa pag-ila sa tingog",
+	"Speed Rate": "",
 	"Stop Sequence": "Pagkasunod-sunod sa pagsira",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "Mga setting sa STT",
 	"Submit": "Isumite",

+ 19 - 0
src/lib/i18n/locales/de-DE/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111-API-Authentifizierungszeichenfolge",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111-Basis-URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111-Basis-URL ist erforderlich.",
+	"Available list": "",
 	"available!": "Verfügbar!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Zurück",
 	"Bad Response": "Schlechte Antwort",
 	"Banners": "Banner",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Chat Bubble UI",
 	"Chat Controls": "",
 	"Chat direction": "Textrichtung",
+	"Chat Overview": "",
 	"Chats": "Unterhaltungen",
 	"Check Again": "Erneut überprüfen",
 	"Check for updates": "Nach Updates suchen",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Geben Sie ein Detail über sich selbst ein, das Ihre Sprachmodelle (LLMs) sich merken sollen",
 	"Enter api auth string (e.g. username:password)": "Geben Sie die API-Authentifizierungszeichenfolge ein (z. B. Benutzername:Passwort)",
 	"Enter Brave Search API Key": "Geben Sie den Brave Search API-Schlüssel ein",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Geben Sie die Blocküberlappung ein",
 	"Enter Chunk Size": "Geben Sie die Blockgröße ein",
 	"Enter Github Raw URL": "Geben Sie die Github Raw-URL ein",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Gebn Sie den Model-Tag ein",
 	"Enter Number of Steps (e.g. 50)": "Geben Sie die Anzahl an Schritten ein (z. B. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Punktzahl eingeben",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Modell {{modelId}} nicht gefunden",
 	"Model {{modelName}} is not vision capable": "Das Modell {{modelName}} ist nicht für die Bildverarbeitung geeignet",
 	"Model {{name}} is now {{status}}": "Modell {{name}} ist jetzt {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "Modell erfolgreich erstellt!",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modell-Dateisystempfad erkannt. Modellkurzname ist für das Update erforderlich, Fortsetzung nicht möglich.",
 	"Model ID": "Modell-ID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Modelfile-Inhalt",
 	"Models": "Modelle",
 	"More": "Mehr",
+	"Move to Top": "",
 	"Name": "Name",
 	"Name Tag": "Namens-Tag",
 	"Name your model": "Benennen Sie Ihr Modell",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI-URL/Schlüssel erforderlich.",
 	"or": "oder",
 	"Other": "Andere",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Passwort",
 	"PDF document (.pdf)": "PDF-Dokument (.pdf)",
 	"PDF Extract Images (OCR)": "Text von Bildern aus PDFs extrahieren (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Speichern",
 	"Save & Create": "Erstellen",
 	"Save & Update": "Aktualisieren",
+	"Save As Copy": "",
 	"Save Tag": "Tag speichern",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Das direkte Speichern von Unterhaltungen im Browser-Speicher wird nicht mehr unterstützt. Bitte nehmen Sie einen Moment Zeit, um Ihre Unterhaltungen zu exportieren und zu löschen, indem Sie auf die Schaltfläche unten klicken. Keine Sorge, Sie können Ihre Unterhaltungen problemlos über das Backend wieder importieren.",
 	"Scan": "Scannen",
@@ -568,16 +581,20 @@
 	"Send": "Senden",
 	"Send a Message": "Eine Nachricht senden",
 	"Send message": "Nachricht senden",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "September",
 	"Serper API Key": "Serper-API-Schlüssel",
 	"Serply API Key": "Serply-API-Schlüssel",
 	"Serpstack API Key": "Serpstack-API-Schlüssel",
 	"Server connection verified": "Serververbindung überprüft",
 	"Set as default": "Als Standard festlegen",
+	"Set CFG Scale": "",
 	"Set Default Model": "Standardmodell festlegen",
 	"Set embedding model (e.g. {{model}})": "Einbettungsmodell festlegen (z. B. {{model}})",
 	"Set Image Size": "Bildgröße festlegen",
 	"Set reranking model (e.g. {{model}})": "Rerankingmodell festlegen (z. B. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Schrittgröße festlegen",
 	"Set Task Model": "Aufgabenmodell festlegen",
 	"Set Voice": "Stimme festlegen",
@@ -600,7 +617,9 @@
 	"Source": "Quelle",
 	"Speech recognition error: {{error}}": "Spracherkennungsfehler: {{error}}",
 	"Speech-to-Text Engine": "Sprache-zu-Text-Engine",
+	"Speed Rate": "",
 	"Stop Sequence": "Stop-Sequenz",
+	"Stream Chat Response": "",
 	"STT Model": "STT-Modell",
 	"STT Settings": "STT-Einstellungen",
 	"Submit": "Senden",

+ 19 - 0
src/lib/i18n/locales/dg-DG/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 Base URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 Base URL is required.",
+	"Available list": "",
 	"available!": "available! So excite!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Back",
 	"Bad Response": "",
 	"Banners": "",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "",
 	"Chat Controls": "",
 	"Chat direction": "",
+	"Chat Overview": "",
 	"Chats": "Chats",
 	"Check Again": "Check Again",
 	"Check for updates": "Check for updates",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Enter Overlap of Chunks",
 	"Enter Chunk Size": "Enter Size of Chunk",
 	"Enter Github Raw URL": "",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Enter model doge tag (e.g. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Enter Number of Steps (e.g. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Model {{modelId}} not found",
 	"Model {{modelName}} is not vision capable": "",
 	"Model {{name}} is now {{status}}": "",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Model filesystem bark detected. Model shortname is required for update, cannot continue.",
 	"Model ID": "",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Modelfile Content",
 	"Models": "Wowdels",
 	"More": "",
+	"Move to Top": "",
 	"Name": "Name",
 	"Name Tag": "Name Tag",
 	"Name your model": "",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "",
 	"or": "or",
 	"Other": "",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Barkword",
 	"PDF document (.pdf)": "",
 	"PDF Extract Images (OCR)": "PDF Extract Wowmages (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Save much wow",
 	"Save & Create": "Save & Create much create",
 	"Save & Update": "Save & Update much update",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Saving chat logs in browser storage not support anymore. Pls download and delete your chat logs by click button below. Much easy re-import to backend through",
 	"Scan": "Scan much scan",
@@ -570,16 +583,20 @@
 	"Send": "",
 	"Send a Message": "Send a Message much message",
 	"Send message": "Send message very send",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "",
 	"Serper API Key": "",
 	"Serply API Key": "",
 	"Serpstack API Key": "",
 	"Server connection verified": "Server connection verified much secure",
 	"Set as default": "Set as default very default",
+	"Set CFG Scale": "",
 	"Set Default Model": "Set Default Model much model",
 	"Set embedding model (e.g. {{model}})": "",
 	"Set Image Size": "Set Image Size very size",
 	"Set reranking model (e.g. {{model}})": "",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Set Steps so many steps",
 	"Set Task Model": "",
 	"Set Voice": "Set Voice so speak",
@@ -602,7 +619,9 @@
 	"Source": "Source",
 	"Speech recognition error: {{error}}": "Speech recognition error: {{error}} so error",
 	"Speech-to-Text Engine": "Speech-to-Text Engine much speak",
+	"Speed Rate": "",
 	"Stop Sequence": "Stop Sequence much stop",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT Settings very settings",
 	"Submit": "Submit much submit",

+ 19 - 0
src/lib/i18n/locales/en-GB/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "",
 	"AUTOMATIC1111 Base URL is required.": "",
+	"Available list": "",
 	"available!": "",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "",
 	"Bad Response": "",
 	"Banners": "",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "",
 	"Chat Controls": "",
 	"Chat direction": "",
+	"Chat Overview": "",
 	"Chats": "",
 	"Check Again": "",
 	"Check for updates": "",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "",
 	"Enter Chunk Size": "",
 	"Enter Github Raw URL": "",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "",
 	"Enter Number of Steps (e.g. 50)": "",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "",
 	"Model {{modelName}} is not vision capable": "",
 	"Model {{name}} is now {{status}}": "",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "",
 	"Model ID": "",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "",
 	"Models": "",
 	"More": "",
+	"Move to Top": "",
 	"Name": "",
 	"Name Tag": "",
 	"Name your model": "",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "",
 	"or": "",
 	"Other": "",
+	"Output format": "",
+	"Overview": "",
 	"Password": "",
 	"PDF document (.pdf)": "",
 	"PDF Extract Images (OCR)": "",
@@ -526,6 +538,7 @@
 	"Save": "",
 	"Save & Create": "",
 	"Save & Update": "",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "",
 	"Scan": "",
@@ -568,16 +581,20 @@
 	"Send": "",
 	"Send a Message": "",
 	"Send message": "",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "",
 	"Serper API Key": "",
 	"Serply API Key": "",
 	"Serpstack API Key": "",
 	"Server connection verified": "",
 	"Set as default": "",
+	"Set CFG Scale": "",
 	"Set Default Model": "",
 	"Set embedding model (e.g. {{model}})": "",
 	"Set Image Size": "",
 	"Set reranking model (e.g. {{model}})": "",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "",
 	"Set Task Model": "",
 	"Set Voice": "",
@@ -600,7 +617,9 @@
 	"Source": "",
 	"Speech recognition error: {{error}}": "",
 	"Speech-to-Text Engine": "",
+	"Speed Rate": "",
 	"Stop Sequence": "",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "",
 	"Submit": "",

+ 19 - 0
src/lib/i18n/locales/en-US/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "",
 	"AUTOMATIC1111 Base URL is required.": "",
+	"Available list": "",
 	"available!": "",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "",
 	"Bad Response": "",
 	"Banners": "",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "",
 	"Chat Controls": "",
 	"Chat direction": "",
+	"Chat Overview": "",
 	"Chats": "",
 	"Check Again": "",
 	"Check for updates": "",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "",
 	"Enter Chunk Size": "",
 	"Enter Github Raw URL": "",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "",
 	"Enter Number of Steps (e.g. 50)": "",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "",
 	"Model {{modelName}} is not vision capable": "",
 	"Model {{name}} is now {{status}}": "",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "",
 	"Model ID": "",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "",
 	"Models": "",
 	"More": "",
+	"Move to Top": "",
 	"Name": "",
 	"Name Tag": "",
 	"Name your model": "",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "",
 	"or": "",
 	"Other": "",
+	"Output format": "",
+	"Overview": "",
 	"Password": "",
 	"PDF document (.pdf)": "",
 	"PDF Extract Images (OCR)": "",
@@ -526,6 +538,7 @@
 	"Save": "",
 	"Save & Create": "",
 	"Save & Update": "",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "",
 	"Scan": "",
@@ -568,16 +581,20 @@
 	"Send": "",
 	"Send a Message": "",
 	"Send message": "",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "",
 	"Serper API Key": "",
 	"Serply API Key": "",
 	"Serpstack API Key": "",
 	"Server connection verified": "",
 	"Set as default": "",
+	"Set CFG Scale": "",
 	"Set Default Model": "",
 	"Set embedding model (e.g. {{model}})": "",
 	"Set Image Size": "",
 	"Set reranking model (e.g. {{model}})": "",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "",
 	"Set Task Model": "",
 	"Set Voice": "",
@@ -600,7 +617,9 @@
 	"Source": "",
 	"Speech recognition error: {{error}}": "",
 	"Speech-to-Text Engine": "",
+	"Speed Rate": "",
 	"Stop Sequence": "",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "",
 	"Submit": "",

+ 19 - 0
src/lib/i18n/locales/es-ES/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "Dirección URL de AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "La dirección URL de AUTOMATIC1111 es requerida.",
+	"Available list": "",
 	"available!": "¡disponible!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Volver",
 	"Bad Response": "Respuesta incorrecta",
 	"Banners": "Banners",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Burbuja de chat UI",
 	"Chat Controls": "",
 	"Chat direction": "Dirección del Chat",
+	"Chat Overview": "",
 	"Chats": "Chats",
 	"Check Again": "Verifica de nuevo",
 	"Check for updates": "Verificar actualizaciones",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Ingrese un detalle sobre usted para que sus LLMs recuerden",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Ingresa la clave de API de Brave Search",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Ingresar superposición de fragmentos",
 	"Enter Chunk Size": "Ingrese el tamaño del fragmento",
 	"Enter Github Raw URL": "Ingresa la URL sin procesar de Github",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Ingrese la etiqueta del modelo (p.ej. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Ingrese el número de pasos (p.ej., 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Ingrese la puntuación",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "El modelo {{modelId}} no fue encontrado",
 	"Model {{modelName}} is not vision capable": "El modelo {{modelName}} no es capaz de ver",
 	"Model {{name}} is now {{status}}": "El modelo {{name}} ahora es {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "Modelo creado correctamente!",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Se detectó la ruta del sistema de archivos del modelo. Se requiere el nombre corto del modelo para la actualización, no se puede continuar.",
 	"Model ID": "ID del modelo",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Contenido del Modelfile",
 	"Models": "Modelos",
 	"More": "Más",
+	"Move to Top": "",
 	"Name": "Nombre",
 	"Name Tag": "Nombre de etiqueta",
 	"Name your model": "Asigne un nombre a su modelo",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "URL/Clave de OpenAI es requerida.",
 	"or": "o",
 	"Other": "Otro",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Contraseña",
 	"PDF document (.pdf)": "PDF document (.pdf)",
 	"PDF Extract Images (OCR)": "Extraer imágenes de PDF (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Guardar",
 	"Save & Create": "Guardar y Crear",
 	"Save & Update": "Guardar y Actualizar",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Ya no se admite guardar registros de chat directamente en el almacenamiento de su navegador. Tómese un momento para descargar y eliminar sus registros de chat haciendo clic en el botón a continuación. No te preocupes, puedes volver a importar fácilmente tus registros de chat al backend a través de",
 	"Scan": "Escanear",
@@ -569,16 +582,20 @@
 	"Send": "Enviar",
 	"Send a Message": "Enviar un Mensaje",
 	"Send message": "Enviar Mensaje",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "Septiembre",
 	"Serper API Key": "Clave API de Serper",
 	"Serply API Key": "Clave API de Serply",
 	"Serpstack API Key": "Clave API de Serpstack",
 	"Server connection verified": "Conexión del servidor verificada",
 	"Set as default": "Establecer por defecto",
+	"Set CFG Scale": "",
 	"Set Default Model": "Establecer modelo predeterminado",
 	"Set embedding model (e.g. {{model}})": "Establecer modelo de embedding (ej. {{model}})",
 	"Set Image Size": "Establecer tamaño de imagen",
 	"Set reranking model (e.g. {{model}})": "Establecer modelo de reranking (ej. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Establecer Pasos",
 	"Set Task Model": "Establecer modelo de tarea",
 	"Set Voice": "Establecer la voz",
@@ -601,7 +618,9 @@
 	"Source": "Fuente",
 	"Speech recognition error: {{error}}": "Error de reconocimiento de voz: {{error}}",
 	"Speech-to-Text Engine": "Motor de voz a texto",
+	"Speed Rate": "",
 	"Stop Sequence": "Detener secuencia",
+	"Stream Chat Response": "",
 	"STT Model": "Modelo STT",
 	"STT Settings": "Configuraciones de STT",
 	"Submit": "Enviar",

+ 19 - 0
src/lib/i18n/locales/fa-IR/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "پایه URL AUTOMATIC1111 ",
 	"AUTOMATIC1111 Base URL is required.": "به URL پایه AUTOMATIC1111 مورد نیاز است.",
+	"Available list": "",
 	"available!": "در دسترس!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "بازگشت",
 	"Bad Response": "پاسخ خوب نیست",
 	"Banners": "بنر",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "UI\u200cی\u200c گفتگو\u200c",
 	"Chat Controls": "",
 	"Chat direction": "جهت\u200cگفتگو",
+	"Chat Overview": "",
 	"Chats": "گپ\u200cها",
 	"Check Again": "چک مجدد",
 	"Check for updates": "بررسی به\u200cروزرسانی",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "برای ذخیره سازی اطلاعات خود، یک توضیح کوتاه درباره خود را وارد کنید",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "کلید API جستجوی شجاع را وارد کنید",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "مقدار Chunk Overlap را وارد کنید",
 	"Enter Chunk Size": "مقدار Chunk Size را وارد کنید",
 	"Enter Github Raw URL": "ادرس Github Raw را وارد کنید",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "تگ مدل را وارد کنید (مثلا {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "تعداد گام ها را وارد کنید (مثال: 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "امتیاز را وارد کنید",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "مدل {{modelId}} یافت نشد",
 	"Model {{modelName}} is not vision capable": "مدل {{modelName}} قادر به بینایی نیست",
 	"Model {{name}} is now {{status}}": "مدل {{name}} در حال حاضر {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "مسیر فایل سیستم مدل یافت شد. برای بروزرسانی نیاز است نام کوتاه مدل وجود داشته باشد.",
 	"Model ID": "شناسه مدل",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "محتویات فایل مدل",
 	"Models": "مدل\u200cها",
 	"More": "بیشتر",
+	"Move to Top": "",
 	"Name": "نام",
 	"Name Tag": "نام تگ",
 	"Name your model": "نام مدل خود را",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "URL/Key OpenAI مورد نیاز است.",
 	"or": "روشن",
 	"Other": "دیگر",
+	"Output format": "",
+	"Overview": "",
 	"Password": "رمز عبور",
 	"PDF document (.pdf)": "PDF سند (.pdf)",
 	"PDF Extract Images (OCR)": "استخراج تصاویر از PDF (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "ذخیره",
 	"Save & Create": "ذخیره و ایجاد",
 	"Save & Update": "ذخیره و به\u200cروزرسانی",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "ذخیره گزارش\u200cهای چت مستقیماً در حافظه مرورگر شما دیگر پشتیبانی نمی\u200cشود. لطفاً با کلیک بر روی دکمه زیر، چند لحظه برای دانلود و حذف گزارش های چت خود وقت بگذارید. نگران نباشید، شما به راحتی می توانید گزارش های چت خود را از طریق بکند دوباره وارد کنید",
 	"Scan": "اسکن",
@@ -568,16 +581,20 @@
 	"Send": "ارسال",
 	"Send a Message": "ارسال یک پیام",
 	"Send message": "ارسال پیام",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "سپتامبر",
 	"Serper API Key": "کلید API Serper",
 	"Serply API Key": "",
 	"Serpstack API Key": "کلید API Serpstack",
 	"Server connection verified": "اتصال سرور تأیید شد",
 	"Set as default": "تنظیم به عنوان پیشفرض",
+	"Set CFG Scale": "",
 	"Set Default Model": "تنظیم مدل پیش فرض",
 	"Set embedding model (e.g. {{model}})": "تنظیم مدل پیچشی (برای مثال {{model}})",
 	"Set Image Size": "تنظیم اندازه تصویر",
 	"Set reranking model (e.g. {{model}})": "تنظیم مدل ری\u200cراینگ (برای مثال {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "تنظیم گام\u200cها",
 	"Set Task Model": "تنظیم مدل تکلیف",
 	"Set Voice": "تنظیم صدا",
@@ -600,7 +617,9 @@
 	"Source": "منبع",
 	"Speech recognition error: {{error}}": "خطای تشخیص گفتار: {{error}}",
 	"Speech-to-Text Engine": "موتور گفتار به متن",
+	"Speed Rate": "",
 	"Stop Sequence": "توالی توقف",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT تنظیمات",
 	"Submit": "ارسال",

+ 19 - 0
src/lib/i18n/locales/fi-FI/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111-perus-URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111-perus-URL vaaditaan.",
+	"Available list": "",
 	"available!": "saatavilla!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Takaisin",
 	"Bad Response": "Epäkelpo vastaus",
 	"Banners": "Bannerit",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Keskustelu-pallojen käyttöliittymä",
 	"Chat Controls": "",
 	"Chat direction": "Keskustelun suunta",
+	"Chat Overview": "",
 	"Chats": "Keskustelut",
 	"Check Again": "Tarkista uudelleen",
 	"Check for updates": "Tarkista päivitykset",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Kirjoita tieto itseestäsi LLM:ien muistamiseksi",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Anna Brave Search API -avain",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Syötä osien päällekkäisyys",
 	"Enter Chunk Size": "Syötä osien koko",
 	"Enter Github Raw URL": "Kirjoita Github Raw URL-osoite",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Syötä mallitagi (esim. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Syötä askelien määrä (esim. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Syötä pisteet",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Mallia {{modelId}} ei löytynyt",
 	"Model {{modelName}} is not vision capable": "Malli {{modelName}} ei kykene näkökykyyn",
 	"Model {{name}} is now {{status}}": "Malli {{name}} on nyt {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Mallin tiedostojärjestelmäpolku havaittu. Mallin lyhytnimi vaaditaan päivitykseen, ei voi jatkaa.",
 	"Model ID": "Mallin tunnus",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Mallitiedoston sisältö",
 	"Models": "Mallit",
 	"More": "Lisää",
+	"Move to Top": "",
 	"Name": "Nimi",
 	"Name Tag": "Nimitagi",
 	"Name your model": "Mallin nimeäminen",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/ -avain vaaditaan.",
 	"or": "tai",
 	"Other": "Muu",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Salasana",
 	"PDF document (.pdf)": "PDF-tiedosto (.pdf)",
 	"PDF Extract Images (OCR)": "PDF-tiedoston kuvien erottelu (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Tallenna",
 	"Save & Create": "Tallenna ja luo",
 	"Save & Update": "Tallenna ja päivitä",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Keskustelulokien tallentaminen suoraan selaimen tallennustilaan ei ole enää tuettua. Lataa ja poista keskustelulokit napsauttamalla alla olevaa painiketta. Älä huoli, voit helposti tuoda keskustelulokit takaisin backendiin",
 	"Scan": "Skannaa",
@@ -568,16 +581,20 @@
 	"Send": "Lähetä",
 	"Send a Message": "Lähetä viesti",
 	"Send message": "Lähetä viesti",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "syyskuu",
 	"Serper API Key": "Serper API -avain",
 	"Serply API Key": "",
 	"Serpstack API Key": "Serpstack API -avain",
 	"Server connection verified": "Palvelinyhteys varmennettu",
 	"Set as default": "Aseta oletukseksi",
+	"Set CFG Scale": "",
 	"Set Default Model": "Aseta oletusmalli",
 	"Set embedding model (e.g. {{model}})": "Aseta upotusmalli (esim. {{model}})",
 	"Set Image Size": "Aseta kuvan koko",
 	"Set reranking model (e.g. {{model}})": "Aseta uudelleenpisteytysmalli (esim. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Aseta askelmäärä",
 	"Set Task Model": "Aseta tehtävämalli",
 	"Set Voice": "Aseta puheääni",
@@ -600,7 +617,9 @@
 	"Source": "Lähde",
 	"Speech recognition error: {{error}}": "Puheentunnistusvirhe: {{error}}",
 	"Speech-to-Text Engine": "Puheentunnistusmoottori",
+	"Speed Rate": "",
 	"Stop Sequence": "Lopetussekvenssi",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "Puheentunnistusasetukset",
 	"Submit": "Lähetä",

+ 19 - 0
src/lib/i18n/locales/fr-CA/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Chaîne d'authentification de l'API",
 	"AUTOMATIC1111 Base URL": "URL de base AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "L'URL de base {AUTOMATIC1111} est requise.",
+	"Available list": "",
 	"available!": "disponible !",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Retour en arrière",
 	"Bad Response": "Mauvaise réponse",
 	"Banners": "Banniers",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Bulles de discussion",
 	"Chat Controls": "",
 	"Chat direction": "Direction du chat",
+	"Chat Overview": "",
 	"Chats": "Conversations",
 	"Check Again": "Vérifiez à nouveau.",
 	"Check for updates": "Vérifier les mises à jour disponibles",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Saisissez un détail sur vous-même que vos LLMs pourront se rappeler",
 	"Enter api auth string (e.g. username:password)": "Entrez la chaîne d'authentification de l'API (par ex. nom d'utilisateur:mot de passe)",
 	"Enter Brave Search API Key": "Entrez la clé API Brave Search",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Entrez le chevauchement de chunk",
 	"Enter Chunk Size": "Entrez la taille de bloc",
 	"Enter Github Raw URL": "Entrez l'URL brute de GitHub",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Entrez l'étiquette du modèle (par ex. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Entrez le nombre de pas (par ex. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Entrez votre score",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Modèle {{modelId}} introuvable",
 	"Model {{modelName}} is not vision capable": "Le modèle {{modelName}} n'a pas de capacités visuelles",
 	"Model {{name}} is now {{status}}": "Le modèle {{name}} est désormais {{status}}.",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "Le modèle a été créé avec succès !",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Chemin du système de fichiers de modèle détecté. Le nom court du modèle est requis pour la mise à jour, l'opération ne peut pas être poursuivie.",
 	"Model ID": "ID du modèle",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Contenu du Fichier de Modèle",
 	"Models": "Modèles",
 	"More": "Plus de",
+	"Move to Top": "",
 	"Name": "Nom",
 	"Name Tag": "Étiquette de nom",
 	"Name your model": "Nommez votre modèle",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "URL/Clé OpenAI requise.",
 	"or": "ou",
 	"Other": "Autre",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Mot de passe",
 	"PDF document (.pdf)": "Document au format PDF  (.pdf)",
 	"PDF Extract Images (OCR)": "Extraction d'images PDF (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Enregistrer",
 	"Save & Create": "Enregistrer & Créer",
 	"Save & Update": "Enregistrer & Mettre à jour",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "La sauvegarde des journaux de discussion directement dans le stockage de votre navigateur n'est plus prise en charge. Veuillez prendre un instant pour télécharger et supprimer vos journaux de discussion en cliquant sur le bouton ci-dessous. Pas de soucis, vous pouvez facilement les réimporter depuis le backend via l'interface ci-dessous",
 	"Scan": "Scanner",
@@ -569,16 +582,20 @@
 	"Send": "Envoyer",
 	"Send a Message": "Envoyer un message",
 	"Send message": "Envoyer un message",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "Septembre",
 	"Serper API Key": "Clé API Serper",
 	"Serply API Key": "Clé API Serply",
 	"Serpstack API Key": "Clé API Serpstack",
 	"Server connection verified": "Connexion au serveur vérifiée",
 	"Set as default": "Définir comme valeur par défaut",
+	"Set CFG Scale": "",
 	"Set Default Model": "Définir le modèle par défaut",
 	"Set embedding model (e.g. {{model}})": "Définir le modèle d'encodage (par ex. {{model}})",
 	"Set Image Size": "Définir la taille de l'image",
 	"Set reranking model (e.g. {{model}})": "Définir le modèle de reclassement (par ex. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Définir les étapes",
 	"Set Task Model": "Définir le modèle de tâche",
 	"Set Voice": "Définir la voix",
@@ -601,7 +618,9 @@
 	"Source": "Source",
 	"Speech recognition error: {{error}}": "Erreur de reconnaissance vocale\u00a0: {{error}}",
 	"Speech-to-Text Engine": "Moteur de reconnaissance vocale",
+	"Speed Rate": "",
 	"Stop Sequence": "Séquence d'arrêt",
+	"Stream Chat Response": "",
 	"STT Model": "Modèle de STT",
 	"STT Settings": "Paramètres de STT",
 	"Submit": "Soumettre",

+ 132 - 113
src/lib/i18n/locales/fr-FR/translation.json

@@ -3,12 +3,12 @@
 	"(Beta)": "(Version bêta)",
 	"(e.g. `sh webui.sh --api --api-auth username_password`)": "(par ex. `sh webui.sh --api --api-auth username_password`)",
 	"(e.g. `sh webui.sh --api`)": "(par exemple `sh webui.sh --api`)",
-	"(latest)": "(dernier)",
+	"(latest)": "(dernière version)",
 	"{{ models }}": "{{ modèles }}",
 	"{{ owner }}: You cannot delete a base model": "{{ propriétaire }} : Vous ne pouvez pas supprimer un modèle de base.",
-	"{{user}}'s Chats": "Discussions de {{user}}",
+	"{{user}}'s Chats": "Conversations de {{user}}",
 	"{{webUIName}} Backend Required": "Backend {{webUIName}} requis",
-	"*Prompt node ID(s) are required for image generation": "*Le(s) identifiant(s) de noeud du prompt sont nécessaires pour la génération d’images",
+	"*Prompt node ID(s) are required for image generation": "*Les ID de noeud du prompt sont nécessaires pour la génération d’images",
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modèle de tâche est utilisé lors de l’exécution de tâches telles que la génération de titres pour les conversations et les requêtes de recherche sur le web.",
 	"a user": "un utilisateur",
 	"About": "À propos",
@@ -30,13 +30,13 @@
 	"Add Model": "Ajouter un modèle",
 	"Add Tag": "Ajouter une étiquette",
 	"Add Tags": "Ajouter des étiquettes",
-	"Add User": "Ajouter un Utilisateur",
+	"Add User": "Ajouter un utilisateur",
 	"Adjusting these settings will apply changes universally to all users.": "L'ajustement de ces paramètres appliquera universellement les changements à tous les utilisateurs.",
 	"admin": "administrateur",
 	"Admin": "Administrateur",
-	"Admin Panel": "Tableau de bord administrateur",
-	"Admin Settings": "Paramètres d'administration",
-	"Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Les administrateurs ont accès à tous les outils en tout temps ; les utilisateurs ont besoin d'outils affectés par modèle dans l'espace de travail.",
+	"Admin Panel": "Panneau d'administration",
+	"Admin Settings": "Paramètres admin.",
+	"Admins have access to all tools at all times; users need tools assigned per model in the workspace.": "Les administrateurs ont accès à tous les outils en tout temps ; il faut attribuer des outils aux utilisateurs par modèle dans l'espace de travail.",
 	"Advanced Parameters": "Paramètres avancés",
 	"Advanced Params": "Paramètres avancés",
 	"all": "toutes",
@@ -68,17 +68,20 @@
 	"Attention to detail": "Attention aux détails",
 	"Audio": "Audio",
 	"August": "Août",
-	"Auto-playback response": "Réponse de lecture automatique",
+	"Auto-playback response": "Lire automatiquement la réponse",
 	"Automatic1111": "Automatic1111",
 	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Chaîne d'authentification de l'API",
 	"AUTOMATIC1111 Base URL": "URL de base AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "L'URL de base {AUTOMATIC1111} est requise.",
+	"Available list": "",
 	"available!": "disponible !",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Retour en arrière",
 	"Bad Response": "Mauvaise réponse",
-	"Banners": "Banniers",
+	"Banners": "Bannières",
 	"Base Model (From)": "Modèle de base (à partir de)",
-	"Batch Size (num_batch)": "Taille du lot (num_batch)",
+	"Batch Size (num_batch)": "Batch Size (num_batch)",
 	"before": "avant",
 	"Being lazy": "Être fainéant",
 	"Brave Search API Key": "Clé API Brave Search",
@@ -91,31 +94,32 @@
 	"Change Password": "Changer le mot de passe",
 	"Chat": "Chat",
 	"Chat Background Image": "Image d'arrière-plan de la fenêtre de chat",
-	"Chat Bubble UI": "Bulles de discussion",
+	"Chat Bubble UI": "Bulles de chat",
 	"Chat Controls": "Contrôles du chat",
 	"Chat direction": "Direction du chat",
+	"Chat Overview": "",
 	"Chats": "Conversations",
 	"Check Again": "Vérifiez à nouveau.",
 	"Check for updates": "Vérifier les mises à jour disponibles",
 	"Checking for updates...": "Recherche de mises à jour...",
 	"Choose a model before saving...": "Choisissez un modèle avant de sauvegarder...",
-	"Chunk Overlap": "Chevauchement de blocs",
-	"Chunk Params": "Paramètres d'encombrement",
-	"Chunk Size": "Taille de bloc",
+	"Chunk Overlap": "Chevauchement des chunks",
+	"Chunk Params": "Paramètres des chunks",
+	"Chunk Size": "Taille des chunks",
 	"Citation": "Citation",
 	"Clear memory": "Libérer la mémoire",
 	"Click here for help.": "Cliquez ici pour obtenir de l'aide.",
 	"Click here to": "Cliquez ici pour",
-	"Click here to download user import template file.": "Cliquez ici pour télécharger le fichier modèle d'importation utilisateur.",
+	"Click here to download user import template file.": "Cliquez ici pour télécharger le fichier modèle d'importation des utilisateurs.",
 	"Click here to select": "Cliquez ici pour sélectionner",
-	"Click here to select a csv file.": "Cliquez ici pour sélectionner un fichier CSV.",
+	"Click here to select a csv file.": "Cliquez ici pour sélectionner un fichier .csv.",
 	"Click here to select a py file.": "Cliquez ici pour sélectionner un fichier .py.",
 	"Click here to select documents.": "Cliquez ici pour sélectionner les documents.",
 	"Click here to upload a workflow.json file.": "Cliquez ici pour télécharger un fichier workflow.json.",
 	"click here.": "cliquez ici.",
 	"Click on the user role button to change a user's role.": "Cliquez sur le bouton de rôle d'utilisateur pour modifier le rôle d'un utilisateur.",
 	"Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "L'autorisation d'écriture du presse-papier a été refusée. Veuillez vérifier les paramètres de votre navigateur pour accorder l'accès nécessaire.",
-	"Clone": "Copie conforme",
+	"Clone": "Cloner",
 	"Close": "Fermer",
 	"Code formatted successfully": "Le code a été formaté avec succès",
 	"Collection": "Collection",
@@ -128,19 +132,19 @@
 	"Concurrent Requests": "Demandes concurrentes",
 	"Confirm": "Confirmer",
 	"Confirm Password": "Confirmer le mot de passe",
-	"Confirm your action": "Confirmez votre action",
+	"Confirm your action": "Confirmer votre action",
 	"Connections": "Connexions",
-	"Contact Admin for WebUI Access": "Contacter l'administrateur pour l'accès à l'interface Web",
+	"Contact Admin for WebUI Access": "Contacter l'administrateur pour obtenir l'accès à WebUI",
 	"Content": "Contenu",
 	"Content Extraction": "Extraction du contenu",
 	"Context Length": "Longueur du contexte",
 	"Continue Response": "Continuer la réponse",
 	"Continue with {{provider}}": "Continuer avec {{provider}}",
-	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Contrôle la façon dont le texte des messages est divisé pour les demandes de TTS. 'Ponctuation' divise en phrases, 'paragraphes' divise en paragraphes et 'aucun' garde le message en tant que chaîne unique.",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Contrôle la façon dont le texte des messages est divisé pour les demandes de Text-to-Speech. « ponctuation » divise en phrases, « paragraphes » divise en paragraphes et « aucun » garde le message en tant que chaîne de texte unique.",
 	"Controls": "Contrôles",
 	"Copied": "Copié",
-	"Copied shared chat URL to clipboard!": "URL du chat copiée dans le presse-papiers\u00a0!",
-	"Copied to clipboard": "",
+	"Copied shared chat URL to clipboard!": "URL du chat copié dans le presse-papiers !",
+	"Copied to clipboard": "Copié dans le presse-papiers",
 	"Copy": "Copie",
 	"Copy Code": "Copier le code",
 	"Copy last code block": "Copier le dernier bloc de code",
@@ -149,22 +153,22 @@
 	"Copying to clipboard was successful!": "La copie dans le presse-papiers a réussi !",
 	"Create a model": "Créer un modèle",
 	"Create Account": "Créer un compte",
-	"Create new key": "Créer une nouvelle clé principale",
+	"Create new key": "Créer une nouvelle clé",
 	"Create new secret key": "Créer une nouvelle clé secrète",
 	"Created at": "Créé à",
 	"Created At": "Créé le",
 	"Created by": "Créé par",
 	"CSV Import": "Import CSV",
-	"Current Model": "Modèle actuel amélioré",
+	"Current Model": "Modèle actuel",
 	"Current Password": "Mot de passe actuel",
 	"Custom": "Sur mesure",
-	"Customize models for a specific purpose": "Personnaliser les modèles pour une fonction spécifique",
-	"Dark": "Obscur",
+	"Customize models for a specific purpose": "Personnaliser les modèles pour un usage spécifique",
+	"Dark": "Sombre",
 	"Dashboard": "Tableau de bord",
 	"Database": "Base de données",
 	"December": "Décembre",
 	"Default": "Par défaut",
-	"Default (Open AI)": "Par défaut (Open AI)",
+	"Default (Open AI)": "Par défaut (OpenAI)",
 	"Default (SentenceTransformers)": "Par défaut (Sentence Transformers)",
 	"Default Model": "Modèle standard",
 	"Default model updated": "Modèle par défaut mis à jour",
@@ -198,7 +202,7 @@
 	"Discover, download, and explore model presets": "Découvrir, télécharger et explorer des préréglages de modèles",
 	"Dismissible": "Fermeture",
 	"Display Emoji in Call": "Afficher les emojis pendant l'appel",
-	"Display the username instead of You in the Chat": "Afficher le nom d'utilisateur à la place de \"Vous\" dans le Chat",
+	"Display the username instead of You in the Chat": "Afficher le nom d'utilisateur à la place de \"Vous\" dans le chat",
 	"Do not install functions from sources you do not fully trust.": "N'installez pas de fonctions provenant de sources auxquelles vous ne faites pas entièrement confiance.",
 	"Do not install tools from sources you do not fully trust.": "N'installez pas d'outils provenant de sources auxquelles vous ne faites pas entièrement confiance.",
 	"Document": "Document",
@@ -222,15 +226,15 @@
 	"Edit User": "Modifier l'utilisateur",
 	"ElevenLabs": "ElevenLabs",
 	"Email": "E-mail",
-	"Embedding Batch Size": "Taille du lot d'encodage",
+	"Embedding Batch Size": "Taille du lot d'embedding",
 	"Embedding Model": "Modèle d'embedding",
-	"Embedding Model Engine": "Moteur de modèle d'encodage",
-	"Embedding model set to \"{{embedding_model}}\"": "Modèle d'encodage défini sur « {{embedding_model}} »",
+	"Embedding Model Engine": "Moteur de modèle d'embedding",
+	"Embedding model set to \"{{embedding_model}}\"": "Modèle d'embedding défini sur « {{embedding_model}} »",
 	"Enable Community Sharing": "Activer le partage communautaire",
 	"Enable Message Rating": "Activer l'évaluation des messages",
 	"Enable New Sign Ups": "Activer les nouvelles inscriptions",
-	"Enable Web Search": "Activer la recherche web",
-	"Enable Web Search Query Generation": "",
+	"Enable Web Search": "Activer la recherche Web",
+	"Enable Web Search Query Generation": "Activer la génération de requêtes de recherche Web",
 	"Enabled": "Activé",
 	"Engine": "Moteur",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.",
@@ -238,19 +242,22 @@
 	"Enter a detail about yourself for your LLMs to recall": "Saisissez un détail sur vous-même que vos LLMs pourront se rappeler",
 	"Enter api auth string (e.g. username:password)": "Entrez la chaîne d'authentification de l'API (par ex. nom d'utilisateur:mot de passe)",
 	"Enter Brave Search API Key": "Entrez la clé API Brave Search",
-	"Enter Chunk Overlap": "Entrez le chevauchement de chunk",
-	"Enter Chunk Size": "Entrez la taille de bloc",
+	"Enter CFG Scale (e.g. 7.0)": "",
+	"Enter Chunk Overlap": "Entrez le chevauchement des chunks",
+	"Enter Chunk Size": "Entrez la taille des chunks",
 	"Enter Github Raw URL": "Entrez l'URL brute de GitHub",
 	"Enter Google PSE API Key": "Entrez la clé API Google PSE",
 	"Enter Google PSE Engine Id": "Entrez l'identifiant du moteur Google PSE",
 	"Enter Image Size (e.g. 512x512)": "Entrez la taille de l'image (par ex. 512x512)",
 	"Enter language codes": "Entrez les codes de langue",
-	"Enter Model ID": "Entrez l'id du model",
+	"Enter Model ID": "Entrez l'ID du modèle",
 	"Enter model tag (e.g. {{modelTag}})": "Entrez l'étiquette du modèle (par ex. {{modelTag}})",
-	"Enter Number of Steps (e.g. 50)": "Entrez le nombre de pas (par ex. 50)",
+	"Enter Number of Steps (e.g. 50)": "Entrez le nombre d'étapes (par ex. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Entrez votre score",
-	"Enter SearchApi API Key": "",
-	"Enter SearchApi Engine": "",
+	"Enter SearchApi API Key": "Entrez la clé API SearchApi",
+	"Enter SearchApi Engine": "Entrez le moteur de recherche SearchApi",
 	"Enter Searxng Query URL": "Entrez l'URL de la requête Searxng",
 	"Enter Serper API Key": "Entrez la clé API Serper",
 	"Enter Serply API Key": "Entrez la clé API Serply",
@@ -270,15 +277,15 @@
 	"Error": "Erreur",
 	"Experimental": "Expérimental",
 	"Export": "Exportation",
-	"Export All Chats (All Users)": "Exporter toutes les conversations (tous les utilisateurs)",
-	"Export chat (.json)": "Exporter la discussion (.json)",
+	"Export All Chats (All Users)": "Exporter toutes les conversations (pour tous les utilisateurs)",
+	"Export chat (.json)": "Exporter la conversation (.json)",
 	"Export Chats": "Exporter les conversations",
-	"Export Config to JSON File": "",
-	"Export Documents Mapping": "Exportez la correspondance des documents",
-	"Export Functions": "Exportez les Fonctions",
+	"Export Config to JSON File": "Exporter la configuration vers un fichier JSON",
+	"Export Documents Mapping": "Exporter le mapping des documents",
+	"Export Functions": "Exportez des fonctions",
 	"Export LiteLLM config.yaml": "Exportez le fichier LiteLLM config.yaml",
-	"Export Models": "Exporter les modèles",
-	"Export Prompts": "Exporter les Prompts",
+	"Export Models": "Exporter des modèles",
+	"Export Prompts": "Exporter des prompts",
 	"Export Tools": "Outils d'exportation",
 	"External Models": "Modèles externes",
 	"Failed to create API Key.": "Échec de la création de la clé API.",
@@ -289,13 +296,13 @@
 	"File": "Fichier",
 	"File Mode": "Mode fichier",
 	"File not found.": "Fichier introuvable.",
-	"File size should not exceed {{maxSize}} MB.": "",
+	"File size should not exceed {{maxSize}} MB.": "La taille du fichier ne doit pas dépasser {{maxSize}} Mo.",
 	"Files": "Fichiers",
 	"Filter is now globally disabled": "Le filtre est maintenant désactivé globalement",
 	"Filter is now globally enabled": "Le filtre est désormais activé globalement",
 	"Filters": "Filtres",
 	"Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "Spoofing détecté : impossible d'utiliser les initiales comme avatar. Retour à l'image de profil par défaut.",
-	"Fluidly stream large external response chunks": "Diffuser de manière fluide de larges portions de réponses externes",
+	"Fluidly stream large external response chunks": "Streaming fluide de gros morceaux de réponses externes",
 	"Focus chat input": "Se concentrer sur le chat en entrée",
 	"Followed instructions perfectly": "A parfaitement suivi les instructions",
 	"Form": "Formulaire",
@@ -314,7 +321,7 @@
 	"Functions allow arbitrary code execution.": "Les fonctions permettent l'exécution de code arbitraire.",
 	"Functions imported successfully": "Fonctions importées avec succès",
 	"General": "Général",
-	"General Settings": "Paramètres Généraux",
+	"General Settings": "Paramètres généraux",
 	"Generate Image": "Générer une image",
 	"Generating search query": "Génération d'une requête de recherche",
 	"Generation Info": "Informations sur la génération",
@@ -322,7 +329,7 @@
 	"Global": "Mondial",
 	"Good Response": "Bonne réponse",
 	"Google PSE API Key": "Clé API Google PSE",
-	"Google PSE Engine Id": "ID du moteur de recherche personnalisé de Google",
+	"Google PSE Engine Id": "ID du moteur de recherche PSE de Google",
 	"h:mm a": "h:mm a",
 	"Haptic Feedback": "Retour haptique",
 	"has no conversations.": "n'a aucune conversation.",
@@ -335,11 +342,11 @@
 	"I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "Je reconnais avoir lu et compris les implications de mes actions. Je suis conscient des risques associés à l'exécution d'un code arbitraire et j'ai vérifié la fiabilité de la source.",
 	"Image Generation (Experimental)": "Génération d'images (expérimental)",
 	"Image Generation Engine": "Moteur de génération d'images",
-	"Image Settings": "Paramètres de l'image",
+	"Image Settings": "Paramètres de génération d'images",
 	"Images": "Images",
-	"Import Chats": "Importer les discussions",
-	"Import Config from JSON File": "",
-	"Import Documents Mapping": "Import de la correspondance des documents",
+	"Import Chats": "Importer les conversations",
+	"Import Config from JSON File": "Importer la configuration depuis un fichier JSON",
+	"Import Documents Mapping": "Importer le mapping des documents",
 	"Import Functions": "Import de fonctions",
 	"Import Models": "Importer des modèles",
 	"Import Prompts": "Importer des prompts",
@@ -349,7 +356,7 @@
 	"Info": "Info",
 	"Input commands": "Entrez les commandes",
 	"Install from Github URL": "Installer depuis l'URL GitHub",
-	"Instant Auto-Send After Voice Transcription": "Envoi automatique instantané après transcription vocale",
+	"Instant Auto-Send After Voice Transcription": "Envoi automatique après la transcription",
 	"Interface": "Interface utilisateur",
 	"Invalid Tag": "Étiquette non valide",
 	"January": "Janvier",
@@ -360,17 +367,17 @@
 	"June": "Juin",
 	"JWT Expiration": "Expiration du jeton JWT",
 	"JWT Token": "Jeton JWT",
-	"Keep Alive": "Rester connecté",
+	"Keep Alive": "Temps de maintien connecté",
 	"Keyboard shortcuts": "Raccourcis clavier",
 	"Knowledge": "Connaissance",
 	"Language": "Langue",
-	"large language models, locally.": "grand modèle de langage, localement",
+	"large language models, locally.": "grand modèle de langage, localement.",
 	"Last Active": "Dernière activité",
 	"Last Modified": "Dernière modification",
-	"Leave empty for unlimited": "",
-	"Leave empty to use the default prompt, or enter a custom prompt": "",
-	"Light": "Lumineux",
-	"Listening...": "En train d'écouter...",
+	"Leave empty for unlimited": "Laissez vide pour illimité",
+	"Leave empty to use the default prompt, or enter a custom prompt": "Laissez vide pour utiliser le prompt par défaut, ou entrez un prompt personnalisé",
+	"Light": "Clair",
+	"Listening...": "Écoute en cours...",
 	"LLMs can make mistakes. Verify important information.": "Les LLM peuvent faire des erreurs. Vérifiez les informations importantes.",
 	"Local Models": "Modèles locaux",
 	"LTR": "LTR",
@@ -378,13 +385,13 @@
 	"Make sure to enclose them with": "Assurez-vous de les inclure dans",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Veillez à exporter un fichier workflow.json au format API depuis ComfyUI.",
 	"Manage": "Gérer",
-	"Manage Models": "Gérer les Modèles",
+	"Manage Models": "Gérer les modèles",
 	"Manage Ollama Models": "Gérer les modèles Ollama",
 	"Manage Pipelines": "Gérer les pipelines",
 	"March": "Mars",
 	"Max Tokens (num_predict)": "Tokens maximaux (num_predict)",
-	"Max Upload Count": "",
-	"Max Upload Size": "",
+	"Max Upload Count": "Nombre maximal",
+	"Max Upload Size": "Taille maximale",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Un maximum de 3 modèles peut être téléchargé en même temps. Veuillez réessayer ultérieurement.",
 	"May": "Mai",
 	"Memories accessible by LLMs will be shown here.": "Les mémoires accessibles par les LLMs seront affichées ici.",
@@ -394,7 +401,7 @@
 	"Memory deleted successfully": "La mémoire a été supprimée avec succès",
 	"Memory updated successfully": "La mémoire a été mise à jour avec succès",
 	"Merge Responses": "Fusionner les réponses",
-	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Les messages que vous envoyez après avoir créé votre lien ne seront pas partagés. Les utilisateurs disposant de l'URL pourront voir le chat partagé.",
+	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Les messages que vous envoyez après avoir créé votre lien ne seront pas partagés. Les utilisateurs disposant de l'URL pourront voir la conversation partagée.",
 	"Min P": "P min",
 	"Minimum Score": "Score minimal",
 	"Mirostat": "Mirostat",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Modèle {{modelId}} introuvable",
 	"Model {{modelName}} is not vision capable": "Le modèle {{modelName}} n'a pas de capacités visuelles",
 	"Model {{name}} is now {{status}}": "Le modèle {{name}} est désormais {{status}}.",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "Le modèle a été créé avec succès !",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Chemin du système de fichiers de modèle détecté. Le nom court du modèle est requis pour la mise à jour, l'opération ne peut pas être poursuivie.",
 	"Model ID": "ID du modèle",
@@ -419,7 +428,8 @@
 	"Modelfile Content": "Contenu du Fichier de Modèle",
 	"Models": "Modèles",
 	"More": "Plus de",
-	"Name": "Nom",
+	"Move to Top": "",
+	"Name": "Nom d'utilisateur",
 	"Name Tag": "Nom de l'étiquette",
 	"Name your model": "Nommez votre modèle",
 	"New Chat": "Nouvelle conversation",
@@ -447,7 +457,7 @@
 	"Ollama API": "API Ollama",
 	"Ollama API disabled": "API Ollama désactivée",
 	"Ollama API is disabled": "L'API Ollama est désactivée",
-	"Ollama Version": "Version Ollama améliorée",
+	"Ollama Version": "Version Ollama",
 	"On": "Activé",
 	"Only": "Seulement",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Seuls les caractères alphanumériques et les tirets sont autorisés dans la chaîne de commande.",
@@ -455,15 +465,17 @@
 	"Oops! Looks like the URL is invalid. Please double-check and try again.": "Oups ! Il semble que l'URL soit invalide. Veuillez vérifier à nouveau et réessayer.",
 	"Oops! There was an error in the previous response. Please try again or contact admin.": "Oops ! Il y a eu une erreur dans la réponse précédente. Veuillez réessayer ou contacter l'administrateur.",
 	"Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "Oups\u00a0! Vous utilisez une méthode non prise en charge (frontend uniquement). Veuillez servir l'interface Web à partir du backend.",
-	"Open new chat": "Ouvrir une nouvelle discussion",
+	"Open new chat": "Ouvrir une nouvelle conversation",
 	"Open WebUI version (v{{OPEN_WEBUI_VERSION}}) is lower than required version (v{{REQUIRED_VERSION}})": "La version Open WebUI (v{{OPEN_WEBUI_VERSION}}) est inférieure à la version requise (v{{REQUIRED_VERSION}})",
 	"OpenAI": "OpenAI",
-	"OpenAI API": "API OpenAI",
+	"OpenAI API": "API compatibles OpenAI",
 	"OpenAI API Config": "Configuration de l'API OpenAI",
 	"OpenAI API Key is required.": "Une clé API OpenAI est requise.",
 	"OpenAI URL/Key required.": "URL/Clé OpenAI requise.",
 	"or": "ou",
 	"Other": "Autre",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Mot de passe",
 	"PDF document (.pdf)": "Document au format PDF  (.pdf)",
 	"PDF Extract Images (OCR)": "Extraction d'images PDF (OCR)",
@@ -478,9 +490,9 @@
 	"Pipeline downloaded successfully": "Le pipeline a été téléchargé avec succès",
 	"Pipelines": "Pipelines",
 	"Pipelines Not Detected": "Aucun pipelines détecté",
-	"Pipelines Valves": "Vannes de Pipelines",
+	"Pipelines Valves": "Vannes de pipelines",
 	"Plain text (.txt)": "Texte simple (.txt)",
-	"Playground": "Aire de jeux",
+	"Playground": "Playground",
 	"Please carefully review the following warnings:": "Veuillez lire attentivement les avertissements suivants :",
 	"Positive attitude": "Attitude positive",
 	"Previous 30 days": "30 derniers jours",
@@ -506,15 +518,15 @@
 	"Remove Model": "Retirer le modèle",
 	"Rename": "Renommer",
 	"Repeat Last N": "Répéter les N derniers",
-	"Request Mode": "Mode de Requête",
+	"Request Mode": "Mode de requête",
 	"Reranking Model": "Modèle de ré-ranking",
 	"Reranking model disabled": "Modèle de ré-ranking désactivé",
 	"Reranking model set to \"{{reranking_model}}\"": "Modèle de ré-ranking défini sur « {{reranking_model}} »",
 	"Reset": "Réinitialiser",
-	"Reset Upload Directory": "Répertoire de téléchargement réinitialisé",
+	"Reset Upload Directory": "Réinitialiser le répertoire de téléchargement",
 	"Reset Vector Storage": "Réinitialiser le stockage des vecteurs",
 	"Response AutoCopy to Clipboard": "Copie automatique de la réponse vers le presse-papiers",
-	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de réponse ne peuvent pas être activées car les autorisations du site web ont été refusées. Veuillez visiter les paramètres de votre navigateur pour accorder l'accès nécessaire.",
+	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de réponse ne peuvent pas être activées car les autorisations du site web ont été refusées. Veuillez vérifier les paramètres de votre navigateur pour accorder l'accès nécessaire.",
 	"Response splitting": "Fractionnement de la réponse",
 	"Role": "Rôle",
 	"Rosé Pine": "Pin rosé",
@@ -522,12 +534,13 @@
 	"RTL": "RTL",
 	"Run": "Exécuter",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "Exécutez Llama 2, Code Llama et d'autres modèles. Personnalisez et créez votre propre modèle.",
-	"Running": "Courir",
+	"Running": "Exécution",
 	"Save": "Enregistrer",
 	"Save & Create": "Enregistrer & Créer",
 	"Save & Update": "Enregistrer & Mettre à jour",
+	"Save As Copy": "",
 	"Save Tag": "Enregistrer l'étiquette",
-	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "La sauvegarde des journaux de discussion directement dans le stockage de votre navigateur n'est plus prise en charge. Veuillez prendre un instant pour télécharger et supprimer vos journaux de discussion en cliquant sur le bouton ci-dessous. Pas de soucis, vous pouvez facilement les réimporter depuis le backend via l'interface ci-dessous",
+	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "La sauvegarde des journaux de conversation directement dans le stockage de votre navigateur n'est plus prise en charge. Veuillez prendre un instant pour télécharger et supprimer vos journaux de conversation en cliquant sur le bouton ci-dessous. Ne vous inquiétez pas, vous pouvez facilement réimporter vos journaux de conversation dans le backend via",
 	"Scan": "Scanner",
 	"Scan complete!": "Scan terminé !",
 	"Scan for documents from {{path}}": "Scanner des documents depuis {{path}}",
@@ -535,20 +548,20 @@
 	"Search": "Recherche",
 	"Search a model": "Rechercher un modèle",
 	"Search Chats": "Rechercher des conversations",
-	"Search Documents": "Recherche de documents",
-	"Search Functions": "Fonctions de recherche",
+	"Search Documents": "Rechercher des documents",
+	"Search Functions": "Rechercher des fonctions",
 	"Search Models": "Rechercher des modèles",
-	"Search Prompts": "Recherche de prompts",
+	"Search Prompts": "Rechercher des prompts",
 	"Search Query Generation Prompt": "Génération d'interrogation de recherche",
 	"Search Result Count": "Nombre de résultats de recherche",
-	"Search Tools": "Outils de recherche",
-	"SearchApi API Key": "",
-	"SearchApi Engine": "",
+	"Search Tools": "Rechercher des outils",
+	"SearchApi API Key": "Clé API SearchApi",
+	"SearchApi Engine": "Moteur de recherche SearchApi",
 	"Searched {{count}} sites_one": "Recherché {{count}} site(s)_one",
 	"Searched {{count}} sites_many": "Recherché {{count}} sites_many",
 	"Searched {{count}} sites_other": "Recherché {{count}} sites_autres",
 	"Searching \"{{searchQuery}}\"": "Recherche de « {{searchQuery}} »",
-	"Searching Knowledge for \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "Recherche des connaissances pour « {{searchQuery}} »",
 	"Searxng Query URL": "URL de recherche Searxng",
 	"See readme.md for instructions": "Voir le fichier readme.md pour les instructions",
 	"See what's new": "Découvrez les nouvelles fonctionnalités",
@@ -569,19 +582,23 @@
 	"Send": "Envoyer",
 	"Send a Message": "Envoyer un message",
 	"Send message": "Envoyer un message",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "Septembre",
 	"Serper API Key": "Clé API Serper",
 	"Serply API Key": "Clé API Serply",
 	"Serpstack API Key": "Clé API Serpstack",
 	"Server connection verified": "Connexion au serveur vérifiée",
 	"Set as default": "Définir comme valeur par défaut",
+	"Set CFG Scale": "",
 	"Set Default Model": "Définir le modèle par défaut",
-	"Set embedding model (e.g. {{model}})": "Définir le modèle d'encodage (par ex. {{model}})",
+	"Set embedding model (e.g. {{model}})": "Définir le modèle d'embedding (par ex. {{model}})",
 	"Set Image Size": "Définir la taille de l'image",
-	"Set reranking model (e.g. {{model}})": "Définir le modèle de reclassement (par ex. {{model}})",
-	"Set Steps": "Définir les étapes",
+	"Set reranking model (e.g. {{model}})": "Définir le modèle de ré-ranking (par ex. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
+	"Set Steps": "Définir le nombre d'étapes",
 	"Set Task Model": "Définir le modèle de tâche",
-	"Set Voice": "Définir la voix",
+	"Set Voice": "Choisir la voix",
 	"Settings": "Paramètres",
 	"Settings saved successfully!": "Paramètres enregistrés avec succès !",
 	"Share": "Partager",
@@ -589,7 +606,7 @@
 	"Share to OpenWebUI Community": "Partager avec la communauté OpenWebUI",
 	"short-summary": "résumé concis",
 	"Show": "Montrer",
-	"Show Admin Details in Account Pending Overlay": "Afficher les détails de l'administrateur dans la superposition en attente du compte",
+	"Show Admin Details in Account Pending Overlay": "Afficher les coordonnées de l'administrateur dans l'écran du compte en attente",
 	"Show Model": "Montrer le modèle",
 	"Show shortcuts": "Afficher les raccourcis",
 	"Show your support!": "Montre ton soutien !",
@@ -601,9 +618,11 @@
 	"Source": "Source",
 	"Speech recognition error: {{error}}": "Erreur de reconnaissance vocale\u00a0: {{error}}",
 	"Speech-to-Text Engine": "Moteur de reconnaissance vocale",
+	"Speed Rate": "",
 	"Stop Sequence": "Séquence d'arrêt",
-	"STT Model": "Modèle de STT",
-	"STT Settings": "Paramètres de STT",
+	"Stream Chat Response": "",
+	"STT Model": "Modèle de Speech-to-Text",
+	"STT Settings": "Paramètres de Speech-to-Text",
 	"Submit": "Soumettre",
 	"Subtitle (e.g. about the Roman Empire)": "Sous-titres (par ex. sur l'Empire romain)",
 	"Success": "Réussite",
@@ -621,12 +640,12 @@
 	"Template": "Template",
 	"Temporary Chat": "Chat éphémère",
 	"Text Completion": "Complétion de texte",
-	"Text-to-Speech Engine": "Moteur de synthèse vocale",
+	"Text-to-Speech Engine": "Moteur de Text-to-Speech",
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Merci pour vos commentaires !",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Les développeurs de ce plugin sont des bénévoles passionnés issus de la communauté. Si vous trouvez ce plugin utile, merci de contribuer à son développement.",
-	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
-	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "La taille maximale du fichier en Mo. Si la taille du fichier dépasse cette limite, le fichier ne sera pas téléchargé.",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "Le nombre maximal de fichiers pouvant être utilisés en même temps dans la conversation. Si le nombre de fichiers dépasse cette limite, les fichiers ne seront pas téléchargés.",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0\u00a0%) et 1,0 (100\u00a0%).",
 	"Theme": "Thème",
 	"Thinking...": "En train de réfléchir...",
@@ -644,18 +663,18 @@
 	"Title cannot be an empty string.": "Le titre ne peut pas être une chaîne de caractères vide.",
 	"Title Generation Prompt": "Prompt de génération de titre",
 	"to": "à",
-	"To access the available model names for downloading,": "Pour accéder aux noms des modèles disponibles en téléchargement,",
-	"To access the GGUF models available for downloading,": "Pour accéder aux modèles GGUF disponibles en téléchargement,",
+	"To access the available model names for downloading,": "Pour accéder aux noms des modèles disponibles,",
+	"To access the GGUF models available for downloading,": "Pour accéder aux modèles GGUF disponibles,",
 	"To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "Pour accéder à l'interface Web, veuillez contacter l'administrateur. Les administrateurs peuvent gérer les statuts des utilisateurs depuis le panneau d'administration.",
 	"To add documents here, upload them to the \"Documents\" workspace first.": "Pour ajouter des documents ici, téléchargez-les d'abord dans l'espace de travail « Documents ». ",
-	"to chat input.": "à l'entrée de discussion.",
+	"to chat input.": "Vers la zone de chat.",
 	"To select actions here, add them to the \"Functions\" workspace first.": "Pour sélectionner des actions ici, ajoutez-les d'abord à l'espace de travail « Fonctions ».",
 	"To select filters here, add them to the \"Functions\" workspace first.": "Pour sélectionner des filtres ici, ajoutez-les d'abord à l'espace de travail « Fonctions ». ",
 	"To select toolkits here, add them to the \"Tools\" workspace first.": "Pour sélectionner des toolkits ici, ajoutez-les d'abord à l'espace de travail « Outils ». ",
 	"Today": "Aujourd'hui",
 	"Toggle settings": "Basculer les paramètres",
 	"Toggle sidebar": "Basculer la barre latérale",
-	"Tokens To Keep On Context Refresh (num_keep)": "Jeton à conserver pour l'actualisation du contexte (num_keep)",
+	"Tokens To Keep On Context Refresh (num_keep)": "Jeton à conserver lors du rafraîchissement du contexte (num_keep)",
 	"Tool created successfully": "L'outil a été créé avec succès",
 	"Tool deleted successfully": "Outil supprimé avec succès",
 	"Tool imported successfully": "Outil importé avec succès",
@@ -669,14 +688,14 @@
 	"Tools have a function calling system that allows arbitrary code execution.": "Les outils ont un système d'appel de fonction qui permet l'exécution de code arbitraire.",
 	"Top K": "Top K",
 	"Top P": "Top P",
-	"Trouble accessing Ollama?": "Rencontrez-vous des difficultés pour accéder à Ollama ?",
-	"TTS Model": "Modèle de synthèse vocale",
-	"TTS Settings": "Paramètres de synthèse vocale",
-	"TTS Voice": "Voix TTS",
+	"Trouble accessing Ollama?": "Problèmes d'accès à Ollama ?",
+	"TTS Model": "Modèle de Text-to-Speech",
+	"TTS Settings": "Paramètres de Text-to-Speech",
+	"TTS Voice": "Voix de Text-to-Speech",
 	"Type": "Type",
 	"Type Hugging Face Resolve (Download) URL": "Entrez l'URL de Téléchargement Hugging Face Resolve",
 	"Uh-oh! There was an issue connecting to {{provider}}.": "Oh non ! Un problème est survenu lors de la connexion à {{provider}}.",
-	"UI": "Interface utilisateur",
+	"UI": "UI",
 	"Unknown file type '{{file_type}}'. Proceeding with the file upload anyway.": "Type de fichier inconnu '{{file_type}}'. Continuons tout de même le téléchargement du fichier.",
 	"Unpin": "Désépingler",
 	"Update": "Mise à jour",
@@ -689,11 +708,11 @@
 	"Upload Pipeline": "Pipeline de téléchargement",
 	"Upload Progress": "Progression de l'envoi",
 	"URL Mode": "Mode d'URL",
-	"Use '#' in the prompt input to load and select your documents.": "Utilisez '#' dans l'entrée de prompt pour charger et sélectionner vos documents.",
+	"Use '#' in the prompt input to load and select your documents.": "Utilisez '#' dans la zone de saisie du prompt pour charger et sélectionner vos documents.",
 	"Use Gravatar": "Utilisez Gravatar",
 	"Use Initials": "Utiliser les initiales",
-	"use_mlock (Ollama)": "use_mlock (Ollama)",
-	"use_mmap (Ollama)": "utiliser mmap (Ollama)",
+	"use_mlock (Ollama)": "Utiliser mlock (Ollama)",
+	"use_mmap (Ollama)": "Utiliser mmap (Ollama)",
 	"user": "utilisateur",
 	"User location successfully retrieved.": "L'emplacement de l'utilisateur a été récupéré avec succès.",
 	"User Permissions": "Permissions utilisateur",
@@ -705,14 +724,14 @@
 	"Valves updated successfully": "Les vannes ont été mises à jour avec succès",
 	"variable": "variable",
 	"variable to have them replaced with clipboard content.": "variable pour qu'elles soient remplacées par le contenu du presse-papiers.",
-	"Version": "Version améliorée",
+	"Version": "version:",
 	"Voice": "Voix",
 	"Warning": "Avertissement !",
 	"Warning:": "Avertissement :",
-	"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Avertissement : Si vous mettez à jour ou modifiez votre modèle d'encodage, vous devrez réimporter tous les documents.",
+	"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Avertissement : Si vous mettez à jour ou modifiez votre modèle d'embedding, vous devrez réimporter tous les documents.",
 	"Web": "Web",
 	"Web API": "API Web",
-	"Web Loader Settings": "Paramètres du chargeur web",
+	"Web Loader Settings": "Paramètres du Web Loader",
 	"Web Search": "Recherche Web",
 	"Web Search Engine": "Moteur de recherche Web",
 	"Webhook URL": "URL du webhook",
@@ -720,18 +739,18 @@
 	"WebUI will make requests to": "WebUI effectuera des requêtes vers",
 	"What’s New in": "Quoi de neuf",
 	"Whisper (Local)": "Whisper (local)",
-	"Widescreen Mode": "Mode Grand Écran",
+	"Widescreen Mode": "Mode grand écran",
 	"Workspace": "Espace de travail",
 	"Write a prompt suggestion (e.g. Who are you?)": "Écrivez une suggestion de prompt (par exemple : Qui êtes-vous ?)",
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Rédigez un résumé de 50 mots qui résume [sujet ou mot-clé].",
 	"Yesterday": "Hier",
 	"You": "Vous",
-	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
-	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Vous pouvez personnaliser vos interactions avec les LLM en ajoutant des souvenirs via le bouton 'Gérer' ci-dessous, ce qui les rendra plus utiles et adaptés à vos besoins.",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "Vous ne pouvez discuter qu'avec un maximum de {{maxCount}} fichier(s) à la fois.",
+	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Vous pouvez personnaliser vos interactions avec les LLM en ajoutant des mémoires à l'aide du bouton « Gérer » ci-dessous, ce qui les rendra plus utiles et mieux adaptées à vos besoins.",
 	"You cannot clone a base model": "Vous ne pouvez pas cloner un modèle de base",
-	"You have no archived conversations.": "Vous n'avez aucune conversation archivée",
+	"You have no archived conversations.": "Vous n'avez aucune conversation archivée.",
 	"You have shared this chat": "Vous avez partagé cette conversation.",
-	"You're a helpful assistant.": "Vous êtes un assistant serviable.",
+	"You're a helpful assistant.": "Vous êtes un assistant efficace.",
 	"You're now logged in.": "Vous êtes désormais connecté.",
 	"Your account status is currently pending activation.": "Votre statut de compte est actuellement en attente d'activation.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "L'intégralité de votre contribution ira directement au développeur du plugin ; Open WebUI ne prend aucun pourcentage. Cependant, la plateforme de financement choisie peut avoir ses propres frais.",

+ 19 - 0
src/lib/i18n/locales/he-IL/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "כתובת URL בסיסית של AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "נדרשת כתובת URL בסיסית של AUTOMATIC1111",
+	"Available list": "",
 	"available!": "זמין!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "חזור",
 	"Bad Response": "תגובה שגויה",
 	"Banners": "באנרים",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "UI של תיבת הדיבור",
 	"Chat Controls": "",
 	"Chat direction": "כיוון צ'אט",
+	"Chat Overview": "",
 	"Chats": "צ'אטים",
 	"Check Again": "בדוק שוב",
 	"Check for updates": "בדוק עדכונים",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "הזן פרטים על עצמך כדי שLLMs יזכור",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "הזן מפתח API של חיפוש אמיץ",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "הזן חפיפת נתונים",
 	"Enter Chunk Size": "הזן גודל נתונים",
 	"Enter Github Raw URL": "הזן כתובת URL של Github Raw",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "הזן תג מודל (למשל {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "הזן מספר שלבים (למשל 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "הזן ציון",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "המודל {{modelId}} לא נמצא",
 	"Model {{modelName}} is not vision capable": "דגם {{modelName}} אינו בעל יכולת ראייה",
 	"Model {{name}} is now {{status}}": "דגם {{name}} הוא כעת {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "נתיב מערכת הקבצים של המודל זוהה. נדרש שם קצר של המודל לעדכון, לא ניתן להמשיך.",
 	"Model ID": "מזהה דגם",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "תוכן קובץ מודל",
 	"Models": "מודלים",
 	"More": "עוד",
+	"Move to Top": "",
 	"Name": "שם",
 	"Name Tag": "תג שם",
 	"Name your model": "תן שם לדגם שלך",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "נדרשת כתובת URL/מפתח של OpenAI.",
 	"or": "או",
 	"Other": "אחר",
+	"Output format": "",
+	"Overview": "",
 	"Password": "סיסמה",
 	"PDF document (.pdf)": "מסמך PDF (.pdf)",
 	"PDF Extract Images (OCR)": "חילוץ תמונות מ-PDF (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "שמור",
 	"Save & Create": "שמור וצור",
 	"Save & Update": "שמור ועדכן",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "שמירת יומני צ'אט ישירות באחסון הדפדפן שלך אינה נתמכת יותר. אנא הקדש רגע להוריד ולמחוק את יומני הצ'אט שלך על ידי לחיצה על הכפתור למטה. אל דאגה, באפשרותך לייבא מחדש בקלות את יומני הצ'אט שלך לשרת האחורי דרך",
 	"Scan": "סרוק",
@@ -569,16 +582,20 @@
 	"Send": "שלח",
 	"Send a Message": "שלח הודעה",
 	"Send message": "שלח הודעה",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "ספטמבר",
 	"Serper API Key": "מפתח Serper API",
 	"Serply API Key": "",
 	"Serpstack API Key": "מפתח API של Serpstack",
 	"Server connection verified": "החיבור לשרת אומת",
 	"Set as default": "הגדר כברירת מחדל",
+	"Set CFG Scale": "",
 	"Set Default Model": "הגדר מודל ברירת מחדל",
 	"Set embedding model (e.g. {{model}})": "הגדר מודל הטמעה (למשל {{model}})",
 	"Set Image Size": "הגדר גודל תמונה",
 	"Set reranking model (e.g. {{model}})": "הגדר מודל דירוג מחדש (למשל {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "הגדר שלבים",
 	"Set Task Model": "הגדרת מודל משימה",
 	"Set Voice": "הגדר קול",
@@ -601,7 +618,9 @@
 	"Source": "מקור",
 	"Speech recognition error: {{error}}": "שגיאת תחקור שמע: {{error}}",
 	"Speech-to-Text Engine": "מנוע תחקור שמע",
+	"Speed Rate": "",
 	"Stop Sequence": "סידור עצירה",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "הגדרות חקירה של TTS",
 	"Submit": "שלח",

+ 19 - 0
src/lib/i18n/locales/hi-IN/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 बेस यूआरएल",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 का बेस यूआरएल आवश्यक है।",
+	"Available list": "",
 	"available!": "उपलब्ध!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "पीछे",
 	"Bad Response": "ख़राब प्रतिक्रिया",
 	"Banners": "बैनर",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "चैट बॉली",
 	"Chat Controls": "",
 	"Chat direction": "चैट दिशा",
+	"Chat Overview": "",
 	"Chats": "सभी चैट",
 	"Check Again": "फिर से जाँचो",
 	"Check for updates": "अपडेट के लिए जाँच",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "अपने एलएलएम को याद करने के लिए अपने बारे में एक विवरण दर्ज करें",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Brave सर्च एपीआई कुंजी डालें",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "चंक ओवरलैप दर्ज करें",
 	"Enter Chunk Size": "खंड आकार दर्ज करें",
 	"Enter Github Raw URL": "Github Raw URL दर्ज करें",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Model tag दर्ज करें (उदा. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "चरणों की संख्या दर्ज करें (उदा. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "स्कोर दर्ज करें",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "मॉडल {{modelId}} नहीं मिला",
 	"Model {{modelName}} is not vision capable": "मॉडल {{modelName}} दृष्टि सक्षम नहीं है",
 	"Model {{name}} is now {{status}}": "मॉडल {{name}} अब {{status}} है",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "मॉडल फ़ाइल सिस्टम पथ का पता चला. अद्यतन के लिए मॉडल संक्षिप्त नाम आवश्यक है, जारी नहीं रखा जा सकता।",
 	"Model ID": "मॉडल आईडी",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "मॉडल फ़ाइल सामग्री",
 	"Models": "सभी मॉडल",
 	"More": "और..",
+	"Move to Top": "",
 	"Name": "नाम",
 	"Name Tag": "नाम टैग",
 	"Name your model": "अपने मॉडल को नाम दें",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/Key आवश्यक है।",
 	"or": "या",
 	"Other": "अन्य",
+	"Output format": "",
+	"Overview": "",
 	"Password": "पासवर्ड",
 	"PDF document (.pdf)": "PDF दस्तावेज़ (.pdf)",
 	"PDF Extract Images (OCR)": "PDF छवियाँ निकालें (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "सहेजें",
 	"Save & Create": "सहेजें और बनाएं",
 	"Save & Update": "सहेजें और अपडेट करें",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "चैट लॉग को सीधे आपके ब्राउज़र के स्टोरेज में सहेजना अब समर्थित नहीं है। कृपया नीचे दिए गए बटन पर क्लिक करके डाउनलोड करने और अपने चैट लॉग को हटाने के लिए कुछ समय दें। चिंता न करें, आप आसानी से अपने चैट लॉग को बैकएंड पर पुनः आयात कर सकते हैं",
 	"Scan": "स्कैन",
@@ -568,16 +581,20 @@
 	"Send": "भेज",
 	"Send a Message": "एक संदेश भेजो",
 	"Send message": "मेसेज भेजें",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "सितंबर",
 	"Serper API Key": "Serper API कुंजी",
 	"Serply API Key": "",
 	"Serpstack API Key": "सर्पस्टैक एपीआई कुंजी",
 	"Server connection verified": "सर्वर कनेक्शन सत्यापित",
 	"Set as default": "डिफाल्ट के रूप में सेट",
+	"Set CFG Scale": "",
 	"Set Default Model": "डिफ़ॉल्ट मॉडल सेट करें",
 	"Set embedding model (e.g. {{model}})": "ईम्बेडिंग मॉडल सेट करें (उदाहरण: {{model}})",
 	"Set Image Size": "छवि का आकार सेट करें",
 	"Set reranking model (e.g. {{model}})": "रीकरण मॉडल सेट करें (उदाहरण: {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "चरण निर्धारित करें",
 	"Set Task Model": "कार्य मॉडल सेट करें",
 	"Set Voice": "आवाज सेट करें",
@@ -600,7 +617,9 @@
 	"Source": "स्रोत",
 	"Speech recognition error: {{error}}": "वाक् पहचान त्रुटि: {{error}}",
 	"Speech-to-Text Engine": "वाक्-से-पाठ इंजन",
+	"Speed Rate": "",
 	"Stop Sequence": "अनुक्रम रोकें",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT सेटिंग्स ",
 	"Submit": "सबमिट करें",

+ 19 - 0
src/lib/i18n/locales/hr-HR/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 osnovni URL",
 	"AUTOMATIC1111 Base URL is required.": "Potreban je AUTOMATIC1111 osnovni URL.",
+	"Available list": "",
 	"available!": "dostupno!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Natrag",
 	"Bad Response": "Loš odgovor",
 	"Banners": "Baneri",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Razgovor - Bubble UI",
 	"Chat Controls": "",
 	"Chat direction": "Razgovor - smijer",
+	"Chat Overview": "",
 	"Chats": "Razgovori",
 	"Check Again": "Provjeri ponovo",
 	"Check for updates": "Provjeri za ažuriranja",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Unesite pojedinosti o sebi da bi učitali memoriju u LLM",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Unesite Brave Search API ključ",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Unesite preklapanje dijelova",
 	"Enter Chunk Size": "Unesite veličinu dijela",
 	"Enter Github Raw URL": "Unesite Github sirovi URL",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Unesite oznaku modela (npr. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Unesite broj koraka (npr. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Unesite ocjenu",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Model {{modelId}} nije pronađen",
 	"Model {{modelName}} is not vision capable": "Model {{modelName}} ne čita vizualne impute",
 	"Model {{name}} is now {{status}}": "Model {{name}} sada je {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Otkriven put datotečnog sustava modela. Kratko ime modela je potrebno za ažuriranje, nije moguće nastaviti.",
 	"Model ID": "ID modela",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Sadržaj datoteke modela",
 	"Models": "Modeli",
 	"More": "Više",
+	"Move to Top": "",
 	"Name": "Ime",
 	"Name Tag": "Naziv oznake",
 	"Name your model": "Dodijelite naziv modelu",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "Potreban je OpenAI URL/ključ.",
 	"or": "ili",
 	"Other": "Ostalo",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Lozinka",
 	"PDF document (.pdf)": "PDF dokument (.pdf)",
 	"PDF Extract Images (OCR)": "PDF izdvajanje slika (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Spremi",
 	"Save & Create": "Spremi i stvori",
 	"Save & Update": "Spremi i ažuriraj",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Spremanje zapisnika razgovora izravno u pohranu vašeg preglednika više nije podržano. Molimo vas da odvojite trenutak za preuzimanje i brisanje zapisnika razgovora klikom na gumb ispod. Ne brinite, možete lako ponovno uvesti zapisnike razgovora u backend putem",
 	"Scan": "Skeniraj",
@@ -569,16 +582,20 @@
 	"Send": "Pošalji",
 	"Send a Message": "Pošaljite poruku",
 	"Send message": "Pošalji poruku",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "Rujan",
 	"Serper API Key": "Serper API ključ",
 	"Serply API Key": "Serply API ključ",
 	"Serpstack API Key": "Serpstack API API ključ",
 	"Server connection verified": "Veza s poslužiteljem potvrđena",
 	"Set as default": "Postavi kao zadano",
+	"Set CFG Scale": "",
 	"Set Default Model": "Postavi zadani model",
 	"Set embedding model (e.g. {{model}})": "Postavi model za embedding (npr. {{model}})",
 	"Set Image Size": "Postavi veličinu slike",
 	"Set reranking model (e.g. {{model}})": "Postavi model za ponovno rangiranje (npr. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Postavi korake",
 	"Set Task Model": "Postavite model zadatka",
 	"Set Voice": "Postavi glas",
@@ -601,7 +618,9 @@
 	"Source": "Izvor",
 	"Speech recognition error: {{error}}": "Pogreška prepoznavanja govora: {{error}}",
 	"Speech-to-Text Engine": "Stroj za prepoznavanje govora",
+	"Speed Rate": "",
 	"Stop Sequence": "Zaustavi sekvencu",
+	"Stream Chat Response": "",
 	"STT Model": "STT model",
 	"STT Settings": "STT postavke",
 	"Submit": "Pošalji",

+ 19 - 0
src/lib/i18n/locales/id-ID/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Api Auth String",
 	"AUTOMATIC1111 Base URL": "URL Dasar AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 URL Dasar diperlukan.",
+	"Available list": "",
 	"available!": "tersedia!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Kembali",
 	"Bad Response": "Respons Buruk",
 	"Banners": "Spanduk",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "UI Gelembung Obrolan",
 	"Chat Controls": "",
 	"Chat direction": "Arah obrolan",
+	"Chat Overview": "",
 	"Chats": "Obrolan",
 	"Check Again": "Periksa Lagi",
 	"Check for updates": "Memeriksa pembaruan",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Masukkan detail tentang diri Anda untuk diingat oleh LLM Anda",
 	"Enter api auth string (e.g. username:password)": "Masukkan string pengesahan API (misalnya nama pengguna: kata sandi)",
 	"Enter Brave Search API Key": "Masukkan Kunci API Pencarian Berani",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Masukkan Tumpang Tindih Chunk",
 	"Enter Chunk Size": "Masukkan Ukuran Potongan",
 	"Enter Github Raw URL": "Masukkan URL Mentah Github",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Masukkan tag model (misalnya {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Masukkan Jumlah Langkah (mis. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Masukkan Skor",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Model {{modelId}} tidak ditemukan",
 	"Model {{modelName}} is not vision capable": "Model {{modelName}} tidak dapat dilihat",
 	"Model {{name}} is now {{status}}": "Model {{name}} sekarang menjadi {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "Model berhasil dibuat!",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Jalur sistem berkas model terdeteksi. Nama pendek model diperlukan untuk pembaruan, tidak dapat dilanjutkan.",
 	"Model ID": "ID Model",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Konten File Model",
 	"Models": "Model",
 	"More": "Lainnya",
+	"Move to Top": "",
 	"Name": "Nama",
 	"Name Tag": "Label Nama",
 	"Name your model": "Beri nama model Anda",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "Diperlukan URL/Kunci OpenAI.",
 	"or": "atau",
 	"Other": "Lainnya",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Kata sandi",
 	"PDF document (.pdf)": "Dokumen PDF (.pdf)",
 	"PDF Extract Images (OCR)": "Ekstrak Gambar PDF (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Simpan",
 	"Save & Create": "Simpan & Buat",
 	"Save & Update": "Simpan & Perbarui",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Menyimpan log obrolan secara langsung ke penyimpanan browser Anda tidak lagi didukung. Mohon luangkan waktu sejenak untuk mengunduh dan menghapus log obrolan Anda dengan mengeklik tombol di bawah ini. Jangan khawatir, Anda dapat dengan mudah mengimpor kembali log obrolan Anda ke backend melalui",
 	"Scan": "Pindai",
@@ -568,16 +581,20 @@
 	"Send": "Kirim",
 	"Send a Message": "Kirim Pesan",
 	"Send message": "Kirim pesan",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "September",
 	"Serper API Key": "Kunci API Serper",
 	"Serply API Key": "Kunci API Serply",
 	"Serpstack API Key": "Kunci API Serpstack",
 	"Server connection verified": "Koneksi server diverifikasi",
 	"Set as default": "Ditetapkan sebagai default",
+	"Set CFG Scale": "",
 	"Set Default Model": "Tetapkan Model Default",
 	"Set embedding model (e.g. {{model}})": "Tetapkan model penyematan (mis. {{model}})",
 	"Set Image Size": "Mengatur Ukuran Gambar",
 	"Set reranking model (e.g. {{model}})": "Tetapkan model pemeringkatan ulang (mis. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Tetapkan Langkah",
 	"Set Task Model": "Tetapkan Model Tugas",
 	"Set Voice": "Mengatur Suara",
@@ -600,7 +617,9 @@
 	"Source": "Sumber",
 	"Speech recognition error: {{error}}": "Kesalahan pengenalan suara: {{error}}",
 	"Speech-to-Text Engine": "Mesin Pengenal Ucapan ke Teks",
+	"Speed Rate": "",
 	"Stop Sequence": "Hentikan Urutan",
+	"Stream Chat Response": "",
 	"STT Model": "Model STT",
 	"STT Settings": "Pengaturan STT",
 	"Submit": "Kirim",

+ 19 - 0
src/lib/i18n/locales/it-IT/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "URL base AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "L'URL base AUTOMATIC1111 è obbligatorio.",
+	"Available list": "",
 	"available!": "disponibile!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Indietro",
 	"Bad Response": "Risposta non valida",
 	"Banners": "Banner",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "UI bolle chat",
 	"Chat Controls": "",
 	"Chat direction": "Direzione chat",
+	"Chat Overview": "",
 	"Chats": "Chat",
 	"Check Again": "Controlla di nuovo",
 	"Check for updates": "Controlla aggiornamenti",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Inserisci un dettaglio su di te per che i LLM possano ricordare",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Inserisci la chiave API di Brave Search",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Inserisci la sovrapposizione chunk",
 	"Enter Chunk Size": "Inserisci la dimensione chunk",
 	"Enter Github Raw URL": "Immettere l'URL grezzo di Github",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Inserisci il tag del modello (ad esempio {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Inserisci il numero di passaggi (ad esempio 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Inserisci il punteggio",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Modello {{modelId}} non trovato",
 	"Model {{modelName}} is not vision capable": "Il modello {{modelName}} non è in grado di vedere",
 	"Model {{name}} is now {{status}}": "Il modello {{name}} è ora {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Percorso del filesystem del modello rilevato. Il nome breve del modello è richiesto per l'aggiornamento, impossibile continuare.",
 	"Model ID": "ID modello",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Contenuto del file modello",
 	"Models": "Modelli",
 	"More": "Altro",
+	"Move to Top": "",
 	"Name": "Nome",
 	"Name Tag": "Nome tag",
 	"Name your model": "Assegna un nome al tuo modello",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "URL/Chiave OpenAI obbligatori.",
 	"or": "o",
 	"Other": "Altro",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Password",
 	"PDF document (.pdf)": "Documento PDF (.pdf)",
 	"PDF Extract Images (OCR)": "Estrazione immagini PDF (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Salva",
 	"Save & Create": "Salva e crea",
 	"Save & Update": "Salva e aggiorna",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Il salvataggio dei registri della chat direttamente nell'archivio del browser non è più supportato. Si prega di dedicare un momento per scaricare ed eliminare i registri della chat facendo clic sul pulsante in basso. Non preoccuparti, puoi facilmente reimportare i registri della chat nel backend tramite",
 	"Scan": "Scansione",
@@ -569,16 +582,20 @@
 	"Send": "Invia",
 	"Send a Message": "Invia un messaggio",
 	"Send message": "Invia messaggio",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "Settembre",
 	"Serper API Key": "Chiave API Serper",
 	"Serply API Key": "",
 	"Serpstack API Key": "Chiave API Serpstack",
 	"Server connection verified": "Connessione al server verificata",
 	"Set as default": "Imposta come predefinito",
+	"Set CFG Scale": "",
 	"Set Default Model": "Imposta modello predefinito",
 	"Set embedding model (e.g. {{model}})": "Imposta modello di embedding (ad esempio {{model}})",
 	"Set Image Size": "Imposta dimensione immagine",
 	"Set reranking model (e.g. {{model}})": "Imposta modello di riclassificazione (ad esempio {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Imposta passaggi",
 	"Set Task Model": "Imposta modello di attività",
 	"Set Voice": "Imposta voce",
@@ -601,7 +618,9 @@
 	"Source": "Fonte",
 	"Speech recognition error: {{error}}": "Errore di riconoscimento vocale: {{error}}",
 	"Speech-to-Text Engine": "Motore da voce a testo",
+	"Speed Rate": "",
 	"Stop Sequence": "Sequenza di arresto",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "Impostazioni STT",
 	"Submit": "Invia",

+ 19 - 0
src/lib/i18n/locales/ja-JP/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 ベース URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 ベース URL が必要です。",
+	"Available list": "",
 	"available!": "利用可能!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "戻る",
 	"Bad Response": "応答が悪い",
 	"Banners": "バナー",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "チャットバブルUI",
 	"Chat Controls": "",
 	"Chat direction": "チャットの方向",
+	"Chat Overview": "",
 	"Chats": "チャット",
 	"Check Again": "再確認",
 	"Check for updates": "アップデートを確認",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "LLM が記憶するために、自分についての詳細を入力してください",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Brave Search APIキーの入力",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "チャンクオーバーラップを入力してください",
 	"Enter Chunk Size": "チャンクサイズを入力してください",
 	"Enter Github Raw URL": "Github Raw URLを入力",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "モデルタグを入力してください (例: {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "ステップ数を入力してください (例: 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "スコアを入力してください",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "モデル {{modelId}} が見つかりません",
 	"Model {{modelName}} is not vision capable": "モデル {{modelName}} は視覚に対応していません",
 	"Model {{name}} is now {{status}}": "モデル {{name}} は {{status}} になりました。",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "モデルファイルシステムパスが検出されました。モデルの短縮名が必要です。更新できません。",
 	"Model ID": "モデルID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "モデルファイルの内容",
 	"Models": "モデル",
 	"More": "もっと見る",
+	"Move to Top": "",
 	"Name": "名前",
 	"Name Tag": "名前タグ",
 	"Name your model": "モデルに名前を付ける",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/Key が必要です。",
 	"or": "または",
 	"Other": "その他",
+	"Output format": "",
+	"Overview": "",
 	"Password": "パスワード",
 	"PDF document (.pdf)": "PDF ドキュメント (.pdf)",
 	"PDF Extract Images (OCR)": "PDF 画像抽出 (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "保存",
 	"Save & Create": "保存して作成",
 	"Save & Update": "保存して更新",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "チャットログをブラウザのストレージに直接保存する機能はサポートされなくなりました。下のボタンをクリックして、チャットログをダウンロードして削除してください。ご心配なく。チャットログは、次の方法でバックエンドに簡単に再インポートできます。",
 	"Scan": "スキャン",
@@ -567,16 +580,20 @@
 	"Send": "送信",
 	"Send a Message": "メッセージを送信",
 	"Send message": "メッセージを送信",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "9月",
 	"Serper API Key": "Serper APIキー",
 	"Serply API Key": "",
 	"Serpstack API Key": "Serpstack APIキー",
 	"Server connection verified": "サーバー接続が確認されました",
 	"Set as default": "デフォルトに設定",
+	"Set CFG Scale": "",
 	"Set Default Model": "デフォルトモデルを設定",
 	"Set embedding model (e.g. {{model}})": "埋め込みモデルを設定します(例:{{model}})",
 	"Set Image Size": "画像サイズを設定",
 	"Set reranking model (e.g. {{model}})": "モデルを設定します(例:{{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "ステップを設定",
 	"Set Task Model": "タスクモデルの設定",
 	"Set Voice": "音声を設定",
@@ -599,7 +616,9 @@
 	"Source": "ソース",
 	"Speech recognition error: {{error}}": "音声認識エラー: {{error}}",
 	"Speech-to-Text Engine": "音声テキスト変換エンジン",
+	"Speed Rate": "",
 	"Stop Sequence": "ストップシーケンス",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT 設定",
 	"Submit": "送信",

+ 19 - 0
src/lib/i18n/locales/ka-GE/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 საბაზისო მისამართი",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 საბაზისო მისამართი აუცილებელია",
+	"Available list": "",
 	"available!": "ხელმისაწვდომია!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "უკან",
 	"Bad Response": "ხარვეზი",
 	"Banners": "რეკლამა",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "ჩატის ბულბი",
 	"Chat Controls": "",
 	"Chat direction": "ჩატის მიმართულება",
+	"Chat Overview": "",
 	"Chats": "მიმოწერები",
 	"Check Again": "თავიდან შემოწმება",
 	"Check for updates": "განახლებების ძიება",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "შეიყვანე დეტალი ჩემთათვის, რომ ჩვენი LLMs-ს შეიძლოს აღაქვს",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "შეიყვანეთ Brave Search API გასაღები",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "შეიყვანეთ ნაწილის გადახურვა",
 	"Enter Chunk Size": "შეიყვანე ბლოკის ზომა",
 	"Enter Github Raw URL": "შეიყვანეთ Github Raw URL",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "შეიყვანეთ მოდელის ტეგი (მაგ. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "შეიყვანეთ ნაბიჯების რაოდენობა (მაგ. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "შეიყვანეთ ქულა",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "მოდელი {{modelId}} ვერ მოიძებნა",
 	"Model {{modelName}} is not vision capable": "Model {{modelName}} is not vision capable",
 	"Model {{name}} is now {{status}}": "Model {{name}} is now {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "აღმოჩენილია მოდელის ფაილური სისტემის გზა. განახლებისთვის საჭიროა მოდელის მოკლე სახელი, გაგრძელება შეუძლებელია.",
 	"Model ID": "მოდელის ID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "მოდელური ფაილის კონტენტი",
 	"Models": "მოდელები",
 	"More": "ვრცლად",
+	"Move to Top": "",
 	"Name": "სახელი",
 	"Name Tag": "სახელის ტეგი",
 	"Name your model": "დაასახელეთ თქვენი მოდელი",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/Key აუცილებელია",
 	"or": "ან",
 	"Other": "სხვა",
+	"Output format": "",
+	"Overview": "",
 	"Password": "პაროლი",
 	"PDF document (.pdf)": "PDF დოკუმენტი (.pdf)",
 	"PDF Extract Images (OCR)": "PDF იდან ამოღებული სურათები (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "შენახვა",
 	"Save & Create": "დამახსოვრება და შექმნა",
 	"Save & Update": "დამახსოვრება და განახლება",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "ჩეთის ისტორიის შენახვა პირდაპირ თქვენი ბრაუზერის საცავში აღარ არის მხარდაჭერილი. გთხოვთ, დაუთმოთ და წაშალოთ თქვენი ჩატის ჟურნალები ქვემოთ მოცემულ ღილაკზე დაწკაპუნებით. არ ინერვიულოთ, თქვენ შეგიძლიათ მარტივად ხელახლა შემოიტანოთ თქვენი ჩეთის ისტორია ბექენდში",
 	"Scan": "სკანირება",
@@ -568,16 +581,20 @@
 	"Send": "გაგზავნა",
 	"Send a Message": "შეტყობინების გაგზავნა",
 	"Send message": "შეტყობინების გაგზავნა",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "სექტემბერი",
 	"Serper API Key": "Serper API Key",
 	"Serply API Key": "",
 	"Serpstack API Key": "Serpstack API Key",
 	"Server connection verified": "სერვერთან კავშირი დადასტურებულია",
 	"Set as default": "დეფოლტად დაყენება",
+	"Set CFG Scale": "",
 	"Set Default Model": "დეფოლტ მოდელის დაყენება",
 	"Set embedding model (e.g. {{model}})": "ჩვენება მოდელის დაყენება (მაგ. {{model}})",
 	"Set Image Size": "სურათის ზომის დაყენება",
 	"Set reranking model (e.g. {{model}})": "რეტარირება მოდელის დაყენება (მაგ. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "ნაბიჯების დაყენება",
 	"Set Task Model": "დააყენეთ სამუშაო მოდელი",
 	"Set Voice": "ხმის დაყენება",
@@ -600,7 +617,9 @@
 	"Source": "წყარო",
 	"Speech recognition error: {{error}}": "მეტყველების ამოცნობის შეცდომა: {{error}}",
 	"Speech-to-Text Engine": "ხმოვან-ტექსტური ძრავი",
+	"Speed Rate": "",
 	"Stop Sequence": "შეჩერების თანმიმდევრობა",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "მეტყველების ამოცნობის პარამეტრები",
 	"Submit": "გაგზავნა",

+ 19 - 0
src/lib/i18n/locales/ko-KR/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 기본 URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 기본 URL 설정이 필요합니다.",
+	"Available list": "",
 	"available!": "사용 가능!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "뒤로가기",
 	"Bad Response": "잘못된 응답",
 	"Banners": "배너",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "버블형 채팅 UI",
 	"Chat Controls": "",
 	"Chat direction": "채팅 방향",
+	"Chat Overview": "",
 	"Chats": "채팅",
 	"Check Again": "다시 확인",
 	"Check for updates": "업데이트 확인",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "자신에 대한 세부사항을 입력하여 LLM들이 기억할 수 있도록 하세요.",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Brave Search API Key 입력",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "청크 오버랩 입력",
 	"Enter Chunk Size": "청크 크기 입력",
 	"Enter Github Raw URL": "Github Raw URL 입력",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "모델 태그 입력(예: {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "단계 수 입력(예: 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "점수 입력",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "{{modelId}} 모델을 찾을 수 없습니다.",
 	"Model {{modelName}} is not vision capable": "{{modelName}} 모델은 비전을 사용할 수 없습니다.",
 	"Model {{name}} is now {{status}}": "{{name}} 모델은 이제 {{status}} 상태입니다.",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "모델 파일 시스템 경로가 감지되었습니다. 업데이트하려면 모델 단축 이름이 필요하며 계속할 수 없습니다.",
 	"Model ID": "모델 ID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Modelfile 내용",
 	"Models": "모델",
 	"More": "더보기",
+	"Move to Top": "",
 	"Name": "이름",
 	"Name Tag": "이름 태그",
 	"Name your model": "모델 이름 지정",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/키가 필요합니다.",
 	"or": "또는",
 	"Other": "기타",
+	"Output format": "",
+	"Overview": "",
 	"Password": "비밀번호",
 	"PDF document (.pdf)": "PDF 문서(.pdf)",
 	"PDF Extract Images (OCR)": "PDF 이미지 추출(OCR)",
@@ -526,6 +538,7 @@
 	"Save": "저장",
 	"Save & Create": "저장 및 생성",
 	"Save & Update": "저장 및 업데이트",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "브라우저의 저장소에 채팅 로그를 직접 저장하는 것은 더 이상 지원되지 않습니다. 아래 버튼을 클릭하여 채팅 로그를 다운로드하고 삭제하세요. 걱정 마세요. 백엔드를 통해 채팅 로그를 쉽게 다시 가져올 수 있습니다.",
 	"Scan": "스캔",
@@ -568,16 +581,20 @@
 	"Send": "보내기",
 	"Send a Message": "메시지 보내기",
 	"Send message": "메시지 보내기",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "9월",
 	"Serper API Key": "Serper API 키",
 	"Serply API Key": "Serply API 키",
 	"Serpstack API Key": "Serpstack API 키",
 	"Server connection verified": "서버 연결 확인됨",
 	"Set as default": "기본값으로 설정",
+	"Set CFG Scale": "",
 	"Set Default Model": "기본 모델 설정",
 	"Set embedding model (e.g. {{model}})": "임베딩 모델 설정 (예: {{model}})",
 	"Set Image Size": "이미지 크기 설정",
 	"Set reranking model (e.g. {{model}})": "reranking 모델 설정 (예: {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "단계 설정",
 	"Set Task Model": "작업 모델 설정",
 	"Set Voice": "음성 설정",
@@ -600,7 +617,9 @@
 	"Source": "출처",
 	"Speech recognition error: {{error}}": "음성 인식 오류: {{error}}",
 	"Speech-to-Text Engine": "음성-텍스트 변환 엔진",
+	"Speed Rate": "",
 	"Stop Sequence": "중지 시퀀스",
+	"Stream Chat Response": "",
 	"STT Model": "STT 모델",
 	"STT Settings": "STT 설정",
 	"Submit": "제출",

+ 19 - 0
src/lib/i18n/locales/lt-LT/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Api Auth String",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 bazės nuoroda",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 bazės nuoroda reikalinga.",
+	"Available list": "",
 	"available!": "prieinama!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Atgal",
 	"Bad Response": "Neteisingas atsakymas",
 	"Banners": "Baneriai",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Pokalbio burbulo sąsaja",
 	"Chat Controls": "Pokalbio valdymas",
 	"Chat direction": "Pokalbio linkmė",
+	"Chat Overview": "",
 	"Chats": "Pokalbiai",
 	"Check Again": "Patikrinti iš naujo",
 	"Check for updates": "Patikrinti atnaujinimus",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Įveskite informaciją apie save jūsų modelio atminčiai",
 	"Enter api auth string (e.g. username:password)": "Įveskite API autentifikacijos kodą (pvz. username:password)",
 	"Enter Brave Search API Key": "Įveskite Bravo Search API raktą",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Įveskite blokų persidengimą",
 	"Enter Chunk Size": "Įveskite blokų dydį",
 	"Enter Github Raw URL": "Įveskite GitHub Raw nuorodą",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Įveskite modelio žymą (pvz. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Įveskite žingsnių kiekį (pvz. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Įveskite rezultatą",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Modelis {{modelId}} nerastas",
 	"Model {{modelName}} is not vision capable": "Modelis {{modelName}} neturi vaizdo gebėjimų",
 	"Model {{name}} is now {{status}}": "Modelis {{name}} dabar {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "Modelis sukurtas sėkmingai",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modelio failų sistemos kelias aptiktas. Reikalingas trumpas modelio pavadinimas atnaujinimui.",
 	"Model ID": "Modelio ID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Modelio failo turinys",
 	"Models": "Modeliai",
 	"More": "Daugiau",
+	"Move to Top": "",
 	"Name": "Pavadinimas",
 	"Name Tag": "Žymos pavadinimas",
 	"Name your model": "Pavadinkite savo modelį",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI API nuoroda ir raktas būtini",
 	"or": "arba",
 	"Other": "Kita",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Slaptažodis",
 	"PDF document (.pdf)": "PDF dokumentas (.pdf)",
 	"PDF Extract Images (OCR)": "PDF paveikslėlių skaitymas (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Išsaugoti",
 	"Save & Create": "Išsaugoti ir sukurti",
 	"Save & Update": "Išsaugoti ir atnaujinti",
+	"Save As Copy": "",
 	"Save Tag": "Išsaugoti žymą",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Pokalbių saugojimas naršyklėje nebegalimas.",
 	"Scan": "Skenuoti",
@@ -570,16 +583,20 @@
 	"Send": "Siųsti",
 	"Send a Message": "Siųsti žinutę",
 	"Send message": "Siųsti žinutę",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "rugsėjis",
 	"Serper API Key": "Serper API raktas",
 	"Serply API Key": "Serply API raktas",
 	"Serpstack API Key": "Serpstach API raktas",
 	"Server connection verified": "Serverio sujungimas patvirtintas",
 	"Set as default": "Nustatyti numatytąjį",
+	"Set CFG Scale": "",
 	"Set Default Model": "Nustatyti numatytąjį modelį",
 	"Set embedding model (e.g. {{model}})": "Nustatyti embedding modelį",
 	"Set Image Size": "Nustatyti paveikslėlių dydį",
 	"Set reranking model (e.g. {{model}})": "Nustatyti reranking modelį",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Numatyti etapus",
 	"Set Task Model": "Numatyti užduočių modelį",
 	"Set Voice": "Numatyti balsą",
@@ -602,7 +619,9 @@
 	"Source": "Šaltinis",
 	"Speech recognition error: {{error}}": "Balso atpažinimo problema: {{error}}",
 	"Speech-to-Text Engine": "Balso atpažinimo modelis",
+	"Speed Rate": "",
 	"Stop Sequence": "Baigt sekvenciją",
+	"Stream Chat Response": "",
 	"STT Model": "STT modelis",
 	"STT Settings": "STT nustatymai",
 	"Submit": "Pateikti",

+ 19 - 0
src/lib/i18n/locales/ms-MY/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Api Auth String",
 	"AUTOMATIC1111 Base URL": "URL Asas AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "URL Asas AUTOMATIC1111 diperlukan.",
+	"Available list": "",
 	"available!": "tersedia!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Kembali",
 	"Bad Response": "Maklumbalas Salah",
 	"Banners": "Sepanduk",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Antaramuka Buih Perbualan",
 	"Chat Controls": "Kawalan Perbualan",
 	"Chat direction": "Arah Perbualan",
+	"Chat Overview": "",
 	"Chats": "Perbualan",
 	"Check Again": "Semak Kembali",
 	"Check for updates": "Semak kemas kini",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Masukkan butiran tentang diri anda untuk diingati oleh LLM anda",
 	"Enter api auth string (e.g. username:password)": "Masukkan kekunci auth api ( cth nama pengguna:kata laluan )",
 	"Enter Brave Search API Key": "Masukkan Kekunci API carian Brave",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Masukkan Tindihan 'Chunk'",
 	"Enter Chunk Size": "Masukkan Saiz 'Chunk'",
 	"Enter Github Raw URL": "Masukkan URL 'Github Raw'",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Masukkan tag model (cth {{ modelTag }})",
 	"Enter Number of Steps (e.g. 50)": "Masukkan Bilangan Langkah (cth 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Masukkan Skor",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Model {{ modelId }} tidak dijumpai",
 	"Model {{modelName}} is not vision capable": "Model {{ modelName }} tidak mempunyai keupayaan penglihatan",
 	"Model {{name}} is now {{status}}": "Model {{name}} kini {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "Model berjaya dibuat!",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Laluan sistem fail model dikesan. Nama pendek model diperlukan untuk kemas kini, tidak boleh diteruskan.",
 	"Model ID": "ID Model",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Kandungan Modelfail",
 	"Models": "Model",
 	"More": "Lagi",
+	"Move to Top": "",
 	"Name": "Nama",
 	"Name Tag": "Nama Tag",
 	"Name your model": "Namakan Model Anda",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "URL/Kekunci OpenAI diperlukan",
 	"or": "atau",
 	"Other": "Lain-lain",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Kata Laluan",
 	"PDF document (.pdf)": "Dokumen PDF (.pdf)",
 	"PDF Extract Images (OCR)": "Imej Ekstrak PDF (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Simpan",
 	"Save & Create": "Simpan & Cipta",
 	"Save & Update": "Simpan & Kemas Kini",
+	"Save As Copy": "",
 	"Save Tag": "Simpan Tag",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Penyimpanan log perbualan terus ke storan pelayan web anda tidak lagi disokong. Sila luangkan sedikit masa untuk memuat turun dan memadam log perbualan anda dengan mengklik butang di bawah. Jangan risau, anda boleh mengimport semula log perbualan anda dengan mudah melalui 'backend'",
 	"Scan": "Imbas",
@@ -568,16 +581,20 @@
 	"Send": "Hantar",
 	"Send a Message": "Hantar Pesanan",
 	"Send message": "Hantar pesanan",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "September",
 	"Serper API Key": "Kunci API Serper",
 	"Serply API Key": "Kunci API Serply",
 	"Serpstack API Key": "Kunci API Serpstack",
 	"Server connection verified": "Sambungan pelayan disahkan",
 	"Set as default": "Tetapkan sebagai lalai",
+	"Set CFG Scale": "",
 	"Set Default Model": "Tetapkan Model Lalai",
 	"Set embedding model (e.g. {{model}})": "Tetapkan model benamkan (cth {{model}})",
 	"Set Image Size": "Tetapkan saiz imej",
 	"Set reranking model (e.g. {{model}})": "Tetapkan model 'reranking' (cth {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "tapkan Langkah",
 	"Set Task Model": "Tetapkan Model Tugasan",
 	"Set Voice": "Tetapan Suara",
@@ -600,7 +617,9 @@
 	"Source": "Sumber",
 	"Speech recognition error: {{error}}": "Ralat pengecaman pertuturan: {{error}}",
 	"Speech-to-Text Engine": "Enjin Ucapan-ke-Teks",
+	"Speed Rate": "",
 	"Stop Sequence": "Jujukan Henti",
+	"Stream Chat Response": "",
 	"STT Model": "Model STT",
 	"STT Settings": "Tetapan STT",
 	"Submit": "Hantar",

+ 19 - 0
src/lib/i18n/locales/nb-NO/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Api Autentiseringsstreng",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 Grunn-URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 Grunn-URL kreves.",
+	"Available list": "",
 	"available!": "tilgjengelig!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Tilbake",
 	"Bad Response": "Dårlig svar",
 	"Banners": "Bannere",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Chat-boble UI",
 	"Chat Controls": "Chat-kontroller",
 	"Chat direction": "Chat-retning",
+	"Chat Overview": "",
 	"Chats": "Chatter",
 	"Check Again": "Sjekk igjen",
 	"Check for updates": "Sjekk for oppdateringer",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Skriv inn en detalj om deg selv som språkmodellene dine kan huske",
 	"Enter api auth string (e.g. username:password)": "Skriv inn api-autentiseringsstreng (f.eks. brukernavn:passord)",
 	"Enter Brave Search API Key": "Skriv inn Brave Search API-nøkkel",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Skriv inn Chunk Overlap",
 	"Enter Chunk Size": "Skriv inn Chunk-størrelse",
 	"Enter Github Raw URL": "Skriv inn Github Raw-URL",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Skriv inn modelltag (f.eks. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Skriv inn antall steg (f.eks. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Skriv inn poengsum",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Modellen {{modelId}} ble ikke funnet",
 	"Model {{modelName}} is not vision capable": "Modellen {{modelName}} er ikke visjonsdyktig",
 	"Model {{name}} is now {{status}}": "Modellen {{name}} er nå {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "Modellen ble opprettet!",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Modellens filsystemsti oppdaget. Modellens kortnavn er påkrevd for oppdatering, kan ikke fortsette.",
 	"Model ID": "Modell-ID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Modellfilinnhold",
 	"Models": "Modeller",
 	"More": "Mer",
+	"Move to Top": "",
 	"Name": "Navn",
 	"Name Tag": "Navnetag",
 	"Name your model": "Gi modellen din et navn",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/nøkkel kreves.",
 	"or": "eller",
 	"Other": "Annet",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Passord",
 	"PDF document (.pdf)": "PDF-dokument (.pdf)",
 	"PDF Extract Images (OCR)": "PDF-ekstraktbilder (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Lagre",
 	"Save & Create": "Lagre og opprett",
 	"Save & Update": "Lagre og oppdater",
+	"Save As Copy": "",
 	"Save Tag": "Lagre tag",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Lagring av chatlogger direkte til nettleserens lagring støttes ikke lenger. Vennligst ta et øyeblikk for å laste ned og slette chatloggene dine ved å klikke på knappen nedenfor. Ikke bekymre deg, du kan enkelt re-importere chatloggene dine til backend via",
 	"Scan": "Skann",
@@ -568,16 +581,20 @@
 	"Send": "Send",
 	"Send a Message": "Send en melding",
 	"Send message": "Send melding",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "september",
 	"Serper API Key": "Serper API-nøkkel",
 	"Serply API Key": "Serply API-nøkkel",
 	"Serpstack API Key": "Serpstack API-nøkkel",
 	"Server connection verified": "Servertilkobling bekreftet",
 	"Set as default": "Sett som standard",
+	"Set CFG Scale": "",
 	"Set Default Model": "Sett standardmodell",
 	"Set embedding model (e.g. {{model}})": "Sett embedding-modell (f.eks. {{model}})",
 	"Set Image Size": "Sett bildestørrelse",
 	"Set reranking model (e.g. {{model}})": "Sett reranking-modell (f.eks. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Sett steg",
 	"Set Task Model": "Sett oppgavemodell",
 	"Set Voice": "Sett stemme",
@@ -600,7 +617,9 @@
 	"Source": "Kilde",
 	"Speech recognition error: {{error}}": "Feil ved talegjenkjenning: {{error}}",
 	"Speech-to-Text Engine": "Tale-til-tekst-motor",
+	"Speed Rate": "",
 	"Stop Sequence": "Stoppsekvens",
+	"Stream Chat Response": "",
 	"STT Model": "STT-modell",
 	"STT Settings": "STT-innstillinger",
 	"Submit": "Send inn",

+ 19 - 0
src/lib/i18n/locales/nl-NL/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 Base URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 Basis URL is verplicht",
+	"Available list": "",
 	"available!": "beschikbaar!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Terug",
 	"Bad Response": "Ongeldig antwoord",
 	"Banners": "Banners",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Chat Bubble UI",
 	"Chat Controls": "",
 	"Chat direction": "Chat Richting",
+	"Chat Overview": "",
 	"Chats": "Chats",
 	"Check Again": "Controleer Opnieuw",
 	"Check for updates": "Controleer op updates",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Voer een detail over jezelf in voor je LLMs om het her te onthouden",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Voer de Brave Search API-sleutel in",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Voeg Chunk Overlap toe",
 	"Enter Chunk Size": "Voeg Chunk Size toe",
 	"Enter Github Raw URL": "Voer de Github Raw-URL in",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Voeg model tag toe (Bijv. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Voeg aantal stappen toe (Bijv. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Voeg score toe",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Model {{modelId}} niet gevonden",
 	"Model {{modelName}} is not vision capable": "Model {{modelName}} is niet geschikt voor visie",
 	"Model {{name}} is now {{status}}": "Model {{name}} is nu {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Model filesystem path gedetecteerd. Model shortname is vereist voor update, kan niet doorgaan.",
 	"Model ID": "Model-ID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Modelfile Inhoud",
 	"Models": "Modellen",
 	"More": "Meer",
+	"Move to Top": "",
 	"Name": "Naam",
 	"Name Tag": "Naam Tag",
 	"Name your model": "Geef uw model een naam",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "OpenAI URL/Sleutel vereist.",
 	"or": "of",
 	"Other": "Andere",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Wachtwoord",
 	"PDF document (.pdf)": "PDF document (.pdf)",
 	"PDF Extract Images (OCR)": "PDF Extract Afbeeldingen (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Opslaan",
 	"Save & Create": "Opslaan & Creëren",
 	"Save & Update": "Opslaan & Bijwerken",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Chat logs direct opslaan in de opslag van je browser wordt niet langer ondersteund. Neem even de tijd om je chat logs te downloaden en te verwijderen door op de knop hieronder te klikken. Maak je geen zorgen, je kunt je chat logs eenvoudig opnieuw importeren naar de backend via",
 	"Scan": "Scan",
@@ -568,16 +581,20 @@
 	"Send": "Verzenden",
 	"Send a Message": "Stuur een Bericht",
 	"Send message": "Stuur bericht",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "September",
 	"Serper API Key": "Serper API-sleutel",
 	"Serply API Key": "",
 	"Serpstack API Key": "Serpstack API-sleutel",
 	"Server connection verified": "Server verbinding geverifieerd",
 	"Set as default": "Stel in als standaard",
+	"Set CFG Scale": "",
 	"Set Default Model": "Stel Standaard Model in",
 	"Set embedding model (e.g. {{model}})": "Stel embedding model in (bv. {{model}})",
 	"Set Image Size": "Stel Afbeelding Grootte in",
 	"Set reranking model (e.g. {{model}})": "Stel reranking model in (bv. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Stel Stappen in",
 	"Set Task Model": "Taakmodel instellen",
 	"Set Voice": "Stel Stem in",
@@ -600,7 +617,9 @@
 	"Source": "Bron",
 	"Speech recognition error: {{error}}": "Spraakherkenning fout: {{error}}",
 	"Speech-to-Text Engine": "Spraak-naar-tekst Engine",
+	"Speed Rate": "",
 	"Stop Sequence": "Stop Sequentie",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT Instellingen",
 	"Submit": "Verzenden",

+ 19 - 0
src/lib/i18n/locales/pa-IN/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 ਬੇਸ URL",
 	"AUTOMATIC1111 Base URL is required.": "AUTOMATIC1111 ਬੇਸ URL ਦੀ ਲੋੜ ਹੈ।",
+	"Available list": "",
 	"available!": "ਉਪਲਬਧ ਹੈ!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "ਵਾਪਸ",
 	"Bad Response": "ਖਰਾਬ ਜਵਾਬ",
 	"Banners": "ਬੈਨਰ",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "ਗੱਲਬਾਤ ਬਬਲ UI",
 	"Chat Controls": "",
 	"Chat direction": "ਗੱਲਬਾਤ ਡਿਰੈਕਟਨ",
+	"Chat Overview": "",
 	"Chats": "ਗੱਲਾਂ",
 	"Check Again": "ਮੁੜ ਜਾਂਚ ਕਰੋ",
 	"Check for updates": "ਅੱਪਡੇਟ ਲਈ ਜਾਂਚ ਕਰੋ",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "ਤੁਹਾਡੇ LLMs ਨੂੰ ਸੁਨੇਹਾ ਕਰਨ ਲਈ ਸੁਨੇਹਾ ਇੱਥੇ ਦਰਜ ਕਰੋ",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "ਬਹਾਦਰ ਖੋਜ API ਕੁੰਜੀ ਦਾਖਲ ਕਰੋ",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "ਚੰਕ ਓਵਰਲੈਪ ਦਰਜ ਕਰੋ",
 	"Enter Chunk Size": "ਚੰਕ ਆਕਾਰ ਦਰਜ ਕਰੋ",
 	"Enter Github Raw URL": "Github ਕੱਚਾ URL ਦਾਖਲ ਕਰੋ",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "ਮਾਡਲ ਟੈਗ ਦਰਜ ਕਰੋ (ਉਦਾਹਰਣ ਲਈ {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "ਕਦਮਾਂ ਦੀ ਗਿਣਤੀ ਦਰਜ ਕਰੋ (ਉਦਾਹਰਣ ਲਈ 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "ਸਕੋਰ ਦਰਜ ਕਰੋ",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "ਮਾਡਲ {{modelId}} ਨਹੀਂ ਮਿਲਿਆ",
 	"Model {{modelName}} is not vision capable": "ਮਾਡਲ {{modelName}} ਦ੍ਰਿਸ਼ਟੀ ਸਮਰੱਥ ਨਹੀਂ ਹੈ",
 	"Model {{name}} is now {{status}}": "ਮਾਡਲ {{name}} ਹੁਣ {{status}} ਹੈ",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "ਮਾਡਲ ਫਾਈਲਸਿਸਟਮ ਪੱਥ ਪਾਇਆ ਗਿਆ। ਅੱਪਡੇਟ ਲਈ ਮਾਡਲ ਸ਼ੌਰਟਨੇਮ ਦੀ ਲੋੜ ਹੈ, ਜਾਰੀ ਨਹੀਂ ਰੱਖ ਸਕਦੇ।",
 	"Model ID": "ਮਾਡਲ ID",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "ਮਾਡਲਫਾਈਲ ਸਮੱਗਰੀ",
 	"Models": "ਮਾਡਲ",
 	"More": "ਹੋਰ",
+	"Move to Top": "",
 	"Name": "ਨਾਮ",
 	"Name Tag": "ਨਾਮ ਟੈਗ",
 	"Name your model": "ਆਪਣੇ ਮਾਡਲ ਦਾ ਨਾਮ ਦੱਸੋ",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "ਓਪਨਏਆਈ URL/ਕੁੰਜੀ ਦੀ ਲੋੜ ਹੈ।",
 	"or": "ਜਾਂ",
 	"Other": "ਹੋਰ",
+	"Output format": "",
+	"Overview": "",
 	"Password": "ਪਾਸਵਰਡ",
 	"PDF document (.pdf)": "PDF ਡਾਕੂਮੈਂਟ (.pdf)",
 	"PDF Extract Images (OCR)": "PDF ਚਿੱਤਰ ਕੱਢੋ (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "ਸੰਭਾਲੋ",
 	"Save & Create": "ਸੰਭਾਲੋ ਅਤੇ ਬਣਾਓ",
 	"Save & Update": "ਸੰਭਾਲੋ ਅਤੇ ਅੱਪਡੇਟ ਕਰੋ",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "ਤੁਹਾਡੇ ਬ੍ਰਾਊਜ਼ਰ ਦੇ ਸਟੋਰੇਜ ਵਿੱਚ ਸਿੱਧੇ ਗੱਲਬਾਤ ਲੌਗ ਸੰਭਾਲਣਾ ਹੁਣ ਸਮਰਥਿਤ ਨਹੀਂ ਹੈ। ਕਿਰਪਾ ਕਰਕੇ ਹੇਠਾਂ ਦਿੱਤੇ ਬਟਨ 'ਤੇ ਕਲਿੱਕ ਕਰਕੇ ਆਪਣੇ ਗੱਲਬਾਤ ਲੌਗ ਡਾਊਨਲੋਡ ਅਤੇ ਮਿਟਾਉਣ ਲਈ ਕੁਝ ਸਮਾਂ ਲਓ। ਚਿੰਤਾ ਨਾ ਕਰੋ, ਤੁਸੀਂ ਆਪਣੇ ਗੱਲਬਾਤ ਲੌਗ ਨੂੰ ਬੈਕਐਂਡ ਵਿੱਚ ਆਸਾਨੀ ਨਾਲ ਮੁੜ ਆਯਾਤ ਕਰ ਸਕਦੇ ਹੋ",
 	"Scan": "ਸਕੈਨ ਕਰੋ",
@@ -568,16 +581,20 @@
 	"Send": "ਭੇਜੋ",
 	"Send a Message": "ਇੱਕ ਸੁਨੇਹਾ ਭੇਜੋ",
 	"Send message": "ਸੁਨੇਹਾ ਭੇਜੋ",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "ਸਤੰਬਰ",
 	"Serper API Key": "Serper API ਕੁੰਜੀ",
 	"Serply API Key": "",
 	"Serpstack API Key": "Serpstack API ਕੁੰਜੀ",
 	"Server connection verified": "ਸਰਵਰ ਕਨੈਕਸ਼ਨ ਦੀ ਪੁਸ਼ਟੀ ਕੀਤੀ ਗਈ",
 	"Set as default": "ਮੂਲ ਵਜੋਂ ਸੈੱਟ ਕਰੋ",
+	"Set CFG Scale": "",
 	"Set Default Model": "ਮੂਲ ਮਾਡਲ ਸੈੱਟ ਕਰੋ",
 	"Set embedding model (e.g. {{model}})": "ਐਮਬੈੱਡਿੰਗ ਮਾਡਲ ਸੈੱਟ ਕਰੋ (ਉਦਾਹਰਣ ਲਈ {{model}})",
 	"Set Image Size": "ਚਿੱਤਰ ਆਕਾਰ ਸੈੱਟ ਕਰੋ",
 	"Set reranking model (e.g. {{model}})": "ਮੁੜ ਰੈਂਕਿੰਗ ਮਾਡਲ ਸੈੱਟ ਕਰੋ (ਉਦਾਹਰਣ ਲਈ {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "ਕਦਮ ਸੈੱਟ ਕਰੋ",
 	"Set Task Model": "ਟਾਸਕ ਮਾਡਲ ਸੈੱਟ ਕਰੋ",
 	"Set Voice": "ਆਵਾਜ਼ ਸੈੱਟ ਕਰੋ",
@@ -600,7 +617,9 @@
 	"Source": "ਸਰੋਤ",
 	"Speech recognition error: {{error}}": "ਬੋਲ ਪਛਾਣ ਗਲਤੀ: {{error}}",
 	"Speech-to-Text Engine": "ਬੋਲ-ਤੋਂ-ਪਾਠ ਇੰਜਣ",
+	"Speed Rate": "",
 	"Stop Sequence": "ਰੋਕੋ ਕ੍ਰਮ",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "STT ਸੈਟਿੰਗਾਂ",
 	"Submit": "ਜਮ੍ਹਾਂ ਕਰੋ",

+ 19 - 0
src/lib/i18n/locales/pl-PL/translation.json

@@ -73,7 +73,10 @@
 	"AUTOMATIC1111 Api Auth String": "",
 	"AUTOMATIC1111 Base URL": "Podstawowy adres URL AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "Podstawowy adres URL AUTOMATIC1111 jest wymagany.",
+	"Available list": "",
 	"available!": "dostępny!",
+	"Azure AI Speech": "",
+	"Azure Region": "",
 	"Back": "Wstecz",
 	"Bad Response": "Zła odpowiedź",
 	"Banners": "Banery",
@@ -94,6 +97,7 @@
 	"Chat Bubble UI": "Bąbelki czatu",
 	"Chat Controls": "",
 	"Chat direction": "Kierunek czatu",
+	"Chat Overview": "",
 	"Chats": "Czaty",
 	"Check Again": "Sprawdź ponownie",
 	"Check for updates": "Sprawdź aktualizacje",
@@ -238,6 +242,7 @@
 	"Enter a detail about yourself for your LLMs to recall": "Wprowadź szczegóły o sobie, aby LLMs mogli pamiętać",
 	"Enter api auth string (e.g. username:password)": "",
 	"Enter Brave Search API Key": "Wprowadź klucz API Brave Search",
+	"Enter CFG Scale (e.g. 7.0)": "",
 	"Enter Chunk Overlap": "Wprowadź zakchodzenie bloku",
 	"Enter Chunk Size": "Wprowadź rozmiar bloku",
 	"Enter Github Raw URL": "Wprowadź nieprzetworzony adres URL usługi Github",
@@ -248,6 +253,8 @@
 	"Enter Model ID": "",
 	"Enter model tag (e.g. {{modelTag}})": "Wprowadź tag modelu (np. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Wprowadź liczbę kroków (np. 50)",
+	"Enter Sampler (e.g. Euler a)": "",
+	"Enter Scheduler (e.g. Karras)": "",
 	"Enter Score": "Wprowadź wynik",
 	"Enter SearchApi API Key": "",
 	"Enter SearchApi Engine": "",
@@ -408,6 +415,8 @@
 	"Model {{modelId}} not found": "Model {{modelId}} nie został znaleziony",
 	"Model {{modelName}} is not vision capable": "Model {{modelName}} nie jest w stanie zobaczyć",
 	"Model {{name}} is now {{status}}": "Model {{name}} to teraz {{status}}",
+	"Model {{name}} is now at the top": "",
+	"Model accepts image inputs": "",
 	"Model created successfully!": "",
 	"Model filesystem path detected. Model shortname is required for update, cannot continue.": "Wykryto ścieżkę systemu plików modelu. Wymagana jest krótka nazwa modelu do aktualizacji, nie można kontynuować.",
 	"Model ID": "Identyfikator modelu",
@@ -419,6 +428,7 @@
 	"Modelfile Content": "Zawartość pliku modelu",
 	"Models": "Modele",
 	"More": "Więcej",
+	"Move to Top": "",
 	"Name": "Nazwa",
 	"Name Tag": "Etykieta nazwy",
 	"Name your model": "Nazwij swój model",
@@ -464,6 +474,8 @@
 	"OpenAI URL/Key required.": "URL/Klucz OpenAI jest wymagany.",
 	"or": "lub",
 	"Other": "Inne",
+	"Output format": "",
+	"Overview": "",
 	"Password": "Hasło",
 	"PDF document (.pdf)": "Dokument PDF (.pdf)",
 	"PDF Extract Images (OCR)": "PDF Wyodrębnij obrazy (OCR)",
@@ -526,6 +538,7 @@
 	"Save": "Zapisz",
 	"Save & Create": "Zapisz i utwórz",
 	"Save & Update": "Zapisz i zaktualizuj",
+	"Save As Copy": "",
 	"Save Tag": "",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "Bezpośrednie zapisywanie dzienników czatu w pamięci przeglądarki nie jest już obsługiwane. Prosimy o pobranie i usunięcie dzienników czatu, klikając poniższy przycisk. Nie martw się, możesz łatwo ponownie zaimportować dzienniki czatu do backendu za pomocą",
 	"Scan": "Skanuj",
@@ -570,16 +583,20 @@
 	"Send": "Wyślij",
 	"Send a Message": "Wyślij Wiadomość",
 	"Send message": "Wyślij wiadomość",
+	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "",
 	"September": "Wrzesień",
 	"Serper API Key": "Klucz API Serper",
 	"Serply API Key": "",
 	"Serpstack API Key": "Klucz API Serpstack",
 	"Server connection verified": "Połączenie z serwerem zweryfikowane",
 	"Set as default": "Ustaw jako domyślne",
+	"Set CFG Scale": "",
 	"Set Default Model": "Ustaw domyślny model",
 	"Set embedding model (e.g. {{model}})": "Ustaw model osadzania (e.g. {{model}})",
 	"Set Image Size": "Ustaw rozmiar obrazu",
 	"Set reranking model (e.g. {{model}})": "Ustaw zmianę rankingu modelu (e.g. {{model}})",
+	"Set Sampler": "",
+	"Set Scheduler": "",
 	"Set Steps": "Ustaw kroki",
 	"Set Task Model": "Ustawianie modelu zadań",
 	"Set Voice": "Ustaw głos",
@@ -602,7 +619,9 @@
 	"Source": "Źródło",
 	"Speech recognition error: {{error}}": "Błąd rozpoznawania mowy: {{error}}",
 	"Speech-to-Text Engine": "Silnik mowy na tekst",
+	"Speed Rate": "",
 	"Stop Sequence": "Zatrzymaj sekwencję",
+	"Stream Chat Response": "",
 	"STT Model": "",
 	"STT Settings": "Ustawienia STT",
 	"Submit": "Zatwierdź",

この差分においてかなりの量のファイルが変更されているため、一部のファイルを表示していません