Ver Fonte

Merge pull request #4811 from open-webui/dev

0.3.16
Timothy Jaeryang Baek há 8 meses atrás
pai
commit
693dc3107a
100 ficheiros alterados com 2691 adições e 3116 exclusões
  1. 44 28
      .github/workflows/docker-build.yaml
  2. 26 0
      CHANGELOG.md
  3. 6 0
      backend/apps/audio/main.py
  4. 4 2
      backend/apps/images/main.py
  5. 26 3
      backend/apps/ollama/main.py
  6. 26 3
      backend/apps/rag/main.py
  7. 14 10
      backend/apps/rag/utils.py
  8. 8 21
      backend/apps/webui/internal/db.py
  9. 4 10
      backend/apps/webui/internal/wrappers.py
  10. 35 29
      backend/apps/webui/main.py
  11. 2 2
      backend/apps/webui/models/auths.py
  12. 12 9
      backend/apps/webui/models/chats.py
  13. 1 1
      backend/apps/webui/models/documents.py
  14. 8 1
      backend/apps/webui/models/files.py
  15. 1 1
      backend/apps/webui/models/functions.py
  16. 1 1
      backend/apps/webui/models/models.py
  17. 1 1
      backend/apps/webui/models/tags.py
  18. 1 1
      backend/apps/webui/models/tools.py
  19. 5 2
      backend/apps/webui/routers/auths.py
  20. 8 6
      backend/apps/webui/routers/files.py
  21. 35 30
      backend/apps/webui/routers/memories.py
  22. 20 0
      backend/apps/webui/utils.py
  23. 163 217
      backend/config.py
  24. 0 36
      backend/data/config.json
  25. 252 0
      backend/env.py
  26. 48 42
      backend/main.py
  27. 1 1
      backend/migrations/env.py
  28. 43 0
      backend/migrations/versions/ca81bd47c050_add_config_table.py
  29. 6 5
      backend/requirements.txt
  30. 1 1
      backend/utils/misc.py
  31. 4 4
      backend/utils/utils.py
  32. 6 6
      package-lock.json
  33. 1 1
      package.json
  34. 6 5
      pyproject.toml
  35. 0 750
      requirements-dev.lock
  36. 0 750
      requirements.lock
  37. 5 1
      src/lib/apis/audio/index.ts
  38. 1 1
      src/lib/apis/memories/index.ts
  39. 2 2
      src/lib/apis/rag/index.ts
  40. 4 1
      src/lib/components/admin/Settings.svelte
  41. 49 16
      src/lib/components/admin/Settings/Audio.svelte
  42. 75 8
      src/lib/components/admin/Settings/Documents.svelte
  43. 196 100
      src/lib/components/chat/Chat.svelte
  44. 45 8
      src/lib/components/chat/ChatControls.svelte
  45. 1 3
      src/lib/components/chat/Controls/Controls.svelte
  46. 91 170
      src/lib/components/chat/MessageInput.svelte
  47. 327 281
      src/lib/components/chat/MessageInput/CallOverlay.svelte
  48. 131 0
      src/lib/components/chat/MessageInput/Commands.svelte
  49. 14 10
      src/lib/components/chat/MessageInput/Commands/Documents.svelte
  50. 90 0
      src/lib/components/chat/MessageInput/Commands/Models.svelte
  51. 23 18
      src/lib/components/chat/MessageInput/Commands/Prompts.svelte
  52. 0 181
      src/lib/components/chat/MessageInput/Models.svelte
  53. 1 0
      src/lib/components/chat/Messages/Markdown/KatexRenderer.svelte
  54. 18 5
      src/lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte
  55. 1 1
      src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte
  56. 217 129
      src/lib/components/chat/Messages/ResponseMessage.svelte
  57. 1 0
      src/lib/components/workspace/Documents.svelte
  58. 11 2
      src/lib/i18n/locales/ar-BH/translation.json
  59. 11 2
      src/lib/i18n/locales/bg-BG/translation.json
  60. 11 2
      src/lib/i18n/locales/bn-BD/translation.json
  61. 27 18
      src/lib/i18n/locales/ca-ES/translation.json
  62. 11 2
      src/lib/i18n/locales/ceb-PH/translation.json
  63. 11 2
      src/lib/i18n/locales/de-DE/translation.json
  64. 11 2
      src/lib/i18n/locales/dg-DG/translation.json
  65. 11 2
      src/lib/i18n/locales/en-GB/translation.json
  66. 11 2
      src/lib/i18n/locales/en-US/translation.json
  67. 11 2
      src/lib/i18n/locales/es-ES/translation.json
  68. 11 2
      src/lib/i18n/locales/fa-IR/translation.json
  69. 11 2
      src/lib/i18n/locales/fi-FI/translation.json
  70. 12 3
      src/lib/i18n/locales/fr-CA/translation.json
  71. 81 72
      src/lib/i18n/locales/fr-FR/translation.json
  72. 11 2
      src/lib/i18n/locales/he-IL/translation.json
  73. 11 2
      src/lib/i18n/locales/hi-IN/translation.json
  74. 11 2
      src/lib/i18n/locales/hr-HR/translation.json
  75. 11 2
      src/lib/i18n/locales/id-ID/translation.json
  76. 11 2
      src/lib/i18n/locales/it-IT/translation.json
  77. 11 2
      src/lib/i18n/locales/ja-JP/translation.json
  78. 11 2
      src/lib/i18n/locales/ka-GE/translation.json
  79. 11 2
      src/lib/i18n/locales/ko-KR/translation.json
  80. 11 2
      src/lib/i18n/locales/lt-LT/translation.json
  81. 11 2
      src/lib/i18n/locales/ms-MY/translation.json
  82. 11 2
      src/lib/i18n/locales/nb-NO/translation.json
  83. 11 2
      src/lib/i18n/locales/nl-NL/translation.json
  84. 11 2
      src/lib/i18n/locales/pa-IN/translation.json
  85. 11 2
      src/lib/i18n/locales/pl-PL/translation.json
  86. 11 2
      src/lib/i18n/locales/pt-BR/translation.json
  87. 11 2
      src/lib/i18n/locales/pt-PT/translation.json
  88. 11 2
      src/lib/i18n/locales/ro-RO/translation.json
  89. 11 2
      src/lib/i18n/locales/ru-RU/translation.json
  90. 11 2
      src/lib/i18n/locales/sr-RS/translation.json
  91. 11 2
      src/lib/i18n/locales/sv-SE/translation.json
  92. 11 2
      src/lib/i18n/locales/th-TH/translation.json
  93. 3 0
      src/lib/i18n/locales/tk-TM/transaltion.json
  94. 11 2
      src/lib/i18n/locales/tk-TW/translation.json
  95. 11 2
      src/lib/i18n/locales/tr-TR/translation.json
  96. 11 2
      src/lib/i18n/locales/uk-UA/translation.json
  97. 28 19
      src/lib/i18n/locales/vi-VN/translation.json
  98. 27 18
      src/lib/i18n/locales/zh-CN/translation.json
  99. 11 2
      src/lib/i18n/locales/zh-TW/translation.json
  100. 6 0
      src/lib/types/index.ts

+ 44 - 28
.github/workflows/docker-build.yaml

@@ -56,19 +56,25 @@ jobs:
           username: ${{ github.actor }}
           password: ${{ secrets.GITHUB_TOKEN }}
 
-      - name: Extract metadata for Docker images (default latest tag)
+      - name: Get version number from package.json
+        id: get_version
+        run: |
+          VERSION=$(jq -r '.version' package.json)
+          echo "version=$VERSION" >> $GITHUB_OUTPUT
+
+      - name: Extract metadata for Docker images
         id: meta
         uses: docker/metadata-action@v5
         with:
           images: ${{ env.FULL_IMAGE_NAME }}
           tags: |
+            type=raw,value=latest,enable=${{ github.ref == 'refs/heads/main' }}
+            type=raw,value=${{ steps.get_version.outputs.version }},enable=${{ github.ref == 'refs/heads/main' }}
             type=ref,event=branch
             type=ref,event=tag
             type=sha,prefix=git-
             type=semver,pattern={{version}}
             type=semver,pattern={{major}}.{{minor}}
-          flavor: |
-            latest=${{ github.ref == 'refs/heads/main' }}
 
       - name: Extract metadata for Docker cache
         id: cache-meta
@@ -82,7 +88,7 @@ jobs:
             prefix=cache-${{ matrix.platform }}-
             latest=false
 
-      - name: Build Docker image (latest)
+      - name: Build Docker image
         uses: docker/build-push-action@v5
         id: build
         with:
@@ -90,7 +96,8 @@ jobs:
           push: true
           platforms: ${{ matrix.platform }}
           labels: ${{ steps.meta.outputs.labels }}
-          outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
+          tags: ${{ steps.meta.outputs.tags }}
+          outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push=true
           cache-from: type=registry,ref=${{ steps.cache-meta.outputs.tags }}
           cache-to: type=registry,ref=${{ steps.cache-meta.outputs.tags }},mode=max
           build-args: |
@@ -153,21 +160,25 @@ jobs:
           username: ${{ github.actor }}
           password: ${{ secrets.GITHUB_TOKEN }}
 
-      - name: Extract metadata for Docker images (cuda tag)
+      - name: Get version number from package.json
+        id: get_version
+        run: |
+          VERSION=$(jq -r '.version' package.json)
+          echo "version=$VERSION" >> $GITHUB_OUTPUT
+
+      - name: Extract metadata for Docker images
         id: meta
         uses: docker/metadata-action@v5
         with:
           images: ${{ env.FULL_IMAGE_NAME }}
           tags: |
-            type=ref,event=branch
-            type=ref,event=tag
-            type=sha,prefix=git-
-            type=semver,pattern={{version}}
-            type=semver,pattern={{major}}.{{minor}}
-            type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=cuda
-          flavor: |
-            latest=${{ github.ref == 'refs/heads/main' }}
-            suffix=-cuda,onlatest=true
+            type=raw,value=latest-cuda,enable=${{ github.ref == 'refs/heads/main' }}
+            type=raw,value=${{ steps.get_version.outputs.version }}-cuda,enable=${{ github.ref == 'refs/heads/main' }}
+            type=ref,event=branch,suffix=-cuda
+            type=ref,event=tag,suffix=-cuda
+            type=sha,prefix=git-,suffix=-cuda
+            type=semver,pattern={{version}},suffix=-cuda
+            type=semver,pattern={{major}}.{{minor}},suffix=-cuda
 
       - name: Extract metadata for Docker cache
         id: cache-meta
@@ -189,7 +200,8 @@ jobs:
           push: true
           platforms: ${{ matrix.platform }}
           labels: ${{ steps.meta.outputs.labels }}
-          outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
+          tags: ${{ steps.meta.outputs.tags }}
+          outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push=true
           cache-from: type=registry,ref=${{ steps.cache-meta.outputs.tags }}
           cache-to: type=registry,ref=${{ steps.cache-meta.outputs.tags }},mode=max
           build-args: |
@@ -253,21 +265,25 @@ jobs:
           username: ${{ github.actor }}
           password: ${{ secrets.GITHUB_TOKEN }}
 
-      - name: Extract metadata for Docker images (ollama tag)
+      - name: Get version number from package.json
+        id: get_version
+        run: |
+          VERSION=$(jq -r '.version' package.json)
+          echo "version=$VERSION" >> $GITHUB_OUTPUT
+
+      - name: Extract metadata for Docker images
         id: meta
         uses: docker/metadata-action@v5
         with:
           images: ${{ env.FULL_IMAGE_NAME }}
           tags: |
-            type=ref,event=branch
-            type=ref,event=tag
-            type=sha,prefix=git-
-            type=semver,pattern={{version}}
-            type=semver,pattern={{major}}.{{minor}}
-            type=raw,enable=${{ github.ref == 'refs/heads/main' }},prefix=,suffix=,value=ollama
-          flavor: |
-            latest=${{ github.ref == 'refs/heads/main' }}
-            suffix=-ollama,onlatest=true
+            type=raw,value=latest-ollama,enable=${{ github.ref == 'refs/heads/main' }}
+            type=raw,value=${{ steps.get_version.outputs.version }}-ollama,enable=${{ github.ref == 'refs/heads/main' }}
+            type=ref,event=branch,suffix=-ollama
+            type=ref,event=tag,suffix=-ollama
+            type=sha,prefix=git-,suffix=-ollama
+            type=semver,pattern={{version}},suffix=-ollama
+            type=semver,pattern={{major}}.{{minor}},suffix=-ollama
 
       - name: Extract metadata for Docker cache
         id: cache-meta
@@ -289,7 +305,8 @@ jobs:
           push: true
           platforms: ${{ matrix.platform }}
           labels: ${{ steps.meta.outputs.labels }}
-          outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push-by-digest=true,name-canonical=true,push=true
+          tags: ${{ steps.meta.outputs.tags }}
+          outputs: type=image,name=${{ env.FULL_IMAGE_NAME }},push=true
           cache-from: type=registry,ref=${{ steps.cache-meta.outputs.tags }}
           cache-to: type=registry,ref=${{ steps.cache-meta.outputs.tags }},mode=max
           build-args: |
@@ -309,7 +326,6 @@ jobs:
           path: /tmp/digests/*
           if-no-files-found: error
           retention-days: 1
-
   merge-main-images:
     runs-on: ubuntu-latest
     needs: [ build-main-image ]

+ 26 - 0
CHANGELOG.md

@@ -5,6 +5,32 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.3.16] - 2024-08-27
+
+### Added
+
+- **🚀 Config DB Migration**: Migrated configuration handling from config.json to the database, enabling high-availability setups and load balancing across multiple Open WebUI instances.
+- **🔗 Call Mode Activation via URL**: Added a 'call=true' URL search parameter enabling direct shortcuts to activate call mode, enhancing user interaction on mobile devices.
+- **✨ TTS Content Control**: Added functionality to control how message content is segmented for Text-to-Speech (TTS) generation requests, allowing for more flexible speech output options.
+- **😄 Show Knowledge Search Status**: Enhanced model usage transparency by displaying status when working with knowledge-augmented models, helping users understand the system's state during queries.
+- **👆 Click-to-Copy for Codespan**: Enhanced interactive experience in the WebUI by allowing users to click to copy content from code spans directly.
+- **🚫 API User Blocking via Model Filter**: Introduced the ability to block API users based on customized model filters, enhancing security and control over API access.
+- **🎬 Call Overlay Styling**: Adjusted call overlay styling on large screens to not cover the entire interface, but only the chat control area, for a more unobtrusive interaction experience.
+
+### Fixed
+
+- **🔧 LaTeX Rendering Issue**: Addressed an issue that affected the correct rendering of LaTeX.
+- **📁 File Leak Prevention**: Resolved the issue of uploaded files mistakenly being accessible across user chats.
+- **🔧 Pipe Functions with '**files**' Param**: Fixed issues with '**files**' parameter not functioning correctly in pipe functions.
+- **📝 Markdown Processing for RAG**: Fixed issues with processing Markdown in files.
+- **🚫 Duplicate System Prompts**: Fixed bugs causing system prompts to duplicate.
+
+### Changed
+
+- **🔋 Wakelock Permission**: Optimized the activation of wakelock to only engage during call mode, conserving device resources and improving battery performance during idle periods.
+- **🔍 Content-Type for Ollama Chats**: Added 'application/x-ndjson' content-type to '/api/chat' endpoint responses to match raw Ollama responses.
+- **✋ Disable Signups Conditionally**: Implemented conditional logic to disable sign-ups when 'ENABLE_LOGIN_FORM' is set to false.
+
 ## [0.3.15] - 2024-08-21
 
 ### Added

+ 6 - 0
backend/apps/audio/main.py

@@ -37,6 +37,7 @@ from config import (
     AUDIO_TTS_ENGINE,
     AUDIO_TTS_MODEL,
     AUDIO_TTS_VOICE,
+    AUDIO_TTS_SPLIT_ON,
     AppConfig,
     CORS_ALLOW_ORIGIN,
 )
@@ -72,6 +73,7 @@ app.state.config.TTS_ENGINE = AUDIO_TTS_ENGINE
 app.state.config.TTS_MODEL = AUDIO_TTS_MODEL
 app.state.config.TTS_VOICE = AUDIO_TTS_VOICE
 app.state.config.TTS_API_KEY = AUDIO_TTS_API_KEY
+app.state.config.TTS_SPLIT_ON = AUDIO_TTS_SPLIT_ON
 
 # setting device type for whisper model
 whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
@@ -88,6 +90,7 @@ class TTSConfigForm(BaseModel):
     ENGINE: str
     MODEL: str
     VOICE: str
+    SPLIT_ON: str
 
 
 class STTConfigForm(BaseModel):
@@ -139,6 +142,7 @@ async def get_audio_config(user=Depends(get_admin_user)):
             "ENGINE": app.state.config.TTS_ENGINE,
             "MODEL": app.state.config.TTS_MODEL,
             "VOICE": app.state.config.TTS_VOICE,
+            "SPLIT_ON": app.state.config.TTS_SPLIT_ON,
         },
         "stt": {
             "OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,
@@ -159,6 +163,7 @@ async def update_audio_config(
     app.state.config.TTS_ENGINE = form_data.tts.ENGINE
     app.state.config.TTS_MODEL = form_data.tts.MODEL
     app.state.config.TTS_VOICE = form_data.tts.VOICE
+    app.state.config.TTS_SPLIT_ON = form_data.tts.SPLIT_ON
 
     app.state.config.STT_OPENAI_API_BASE_URL = form_data.stt.OPENAI_API_BASE_URL
     app.state.config.STT_OPENAI_API_KEY = form_data.stt.OPENAI_API_KEY
@@ -173,6 +178,7 @@ async def update_audio_config(
             "ENGINE": app.state.config.TTS_ENGINE,
             "MODEL": app.state.config.TTS_MODEL,
             "VOICE": app.state.config.TTS_VOICE,
+            "SPLIT_ON": app.state.config.TTS_SPLIT_ON,
         },
         "stt": {
             "OPENAI_API_BASE_URL": app.state.config.STT_OPENAI_API_BASE_URL,

+ 4 - 2
backend/apps/images/main.py

@@ -15,6 +15,7 @@ import json
 import logging
 import re
 import requests
+import asyncio
 
 from utils.utils import (
     get_verified_user,
@@ -533,7 +534,9 @@ async def image_generations(
             if form_data.negative_prompt is not None:
                 data["negative_prompt"] = form_data.negative_prompt
 
-            r = requests.post(
+            # Use asyncio.to_thread for the requests.post call
+            r = await asyncio.to_thread(
+                requests.post,
                 url=f"{app.state.config.AUTOMATIC1111_BASE_URL}/sdapi/v1/txt2img",
                 json=data,
                 headers={"authorization": get_automatic1111_api_auth()},
@@ -553,7 +556,6 @@ async def image_generations(
                     json.dump({**data, "info": res["info"]}, f)
 
             return images
-
     except Exception as e:
         error = e
         if r != None:

+ 26 - 3
backend/apps/ollama/main.py

@@ -148,7 +148,9 @@ async def cleanup_response(
         await session.close()
 
 
-async def post_streaming_url(url: str, payload: Union[str, bytes], stream: bool = True):
+async def post_streaming_url(
+    url: str, payload: Union[str, bytes], stream: bool = True, content_type=None
+):
     r = None
     try:
         session = aiohttp.ClientSession(
@@ -162,10 +164,13 @@ async def post_streaming_url(url: str, payload: Union[str, bytes], stream: bool
         r.raise_for_status()
 
         if stream:
+            headers = dict(r.headers)
+            if content_type:
+                headers["Content-Type"] = content_type
             return StreamingResponse(
                 r.content,
                 status_code=r.status,
-                headers=dict(r.headers),
+                headers=headers,
                 background=BackgroundTask(
                     cleanup_response, response=r, session=session
                 ),
@@ -737,6 +742,14 @@ async def generate_chat_completion(
         del payload["metadata"]
 
     model_id = form_data.model
+
+    if app.state.config.ENABLE_MODEL_FILTER:
+        if user.role == "user" and model_id not in app.state.config.MODEL_FILTER_LIST:
+            raise HTTPException(
+                status_code=403,
+                detail="Model not found",
+            )
+
     model_info = Models.get_model_by_id(model_id)
 
     if model_info:
@@ -761,7 +774,9 @@ async def generate_chat_completion(
     log.info(f"url: {url}")
     log.debug(payload)
 
-    return await post_streaming_url(f"{url}/api/chat", json.dumps(payload))
+    return await post_streaming_url(
+        f"{url}/api/chat", json.dumps(payload), content_type="application/x-ndjson"
+    )
 
 
 # TODO: we should update this part once Ollama supports other types
@@ -797,6 +812,14 @@ async def generate_openai_chat_completion(
         del payload["metadata"]
 
     model_id = completion_form.model
+
+    if app.state.config.ENABLE_MODEL_FILTER:
+        if user.role == "user" and model_id not in app.state.config.MODEL_FILTER_LIST:
+            raise HTTPException(
+                status_code=403,
+                detail="Model not found",
+            )
+
     model_info = Models.get_model_by_id(model_id)
 
     if model_info:

+ 26 - 3
backend/apps/rag/main.py

@@ -95,6 +95,8 @@ from config import (
     TIKA_SERVER_URL,
     RAG_TOP_K,
     RAG_RELEVANCE_THRESHOLD,
+    RAG_FILE_MAX_SIZE,
+    RAG_FILE_MAX_COUNT,
     RAG_EMBEDDING_ENGINE,
     RAG_EMBEDDING_MODEL,
     RAG_EMBEDDING_MODEL_AUTO_UPDATE,
@@ -143,6 +145,8 @@ app.state.config = AppConfig()
 
 app.state.config.TOP_K = RAG_TOP_K
 app.state.config.RELEVANCE_THRESHOLD = RAG_RELEVANCE_THRESHOLD
+app.state.config.FILE_MAX_SIZE = RAG_FILE_MAX_SIZE
+app.state.config.FILE_MAX_COUNT = RAG_FILE_MAX_COUNT
 
 app.state.config.ENABLE_RAG_HYBRID_SEARCH = ENABLE_RAG_HYBRID_SEARCH
 app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = (
@@ -393,6 +397,10 @@ async def get_rag_config(user=Depends(get_admin_user)):
     return {
         "status": True,
         "pdf_extract_images": app.state.config.PDF_EXTRACT_IMAGES,
+        "file": {
+            "max_size": app.state.config.FILE_MAX_SIZE,
+            "max_count": app.state.config.FILE_MAX_COUNT,
+        },
         "content_extraction": {
             "engine": app.state.config.CONTENT_EXTRACTION_ENGINE,
             "tika_server_url": app.state.config.TIKA_SERVER_URL,
@@ -426,6 +434,11 @@ async def get_rag_config(user=Depends(get_admin_user)):
     }
 
 
+class FileConfig(BaseModel):
+    max_size: Optional[int] = None
+    max_count: Optional[int] = None
+
+
 class ContentExtractionConfig(BaseModel):
     engine: str = ""
     tika_server_url: Optional[str] = None
@@ -464,6 +477,7 @@ class WebConfig(BaseModel):
 
 class ConfigUpdateForm(BaseModel):
     pdf_extract_images: Optional[bool] = None
+    file: Optional[FileConfig] = None
     content_extraction: Optional[ContentExtractionConfig] = None
     chunk: Optional[ChunkParamUpdateForm] = None
     youtube: Optional[YoutubeLoaderConfig] = None
@@ -478,6 +492,10 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
         else app.state.config.PDF_EXTRACT_IMAGES
     )
 
+    if form_data.file is not None:
+        app.state.config.FILE_MAX_SIZE = form_data.file.max_size
+        app.state.config.FILE_MAX_COUNT = form_data.file.max_count
+
     if form_data.content_extraction is not None:
         log.info(f"Updating text settings: {form_data.content_extraction}")
         app.state.config.CONTENT_EXTRACTION_ENGINE = form_data.content_extraction.engine
@@ -519,6 +537,10 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
     return {
         "status": True,
         "pdf_extract_images": app.state.config.PDF_EXTRACT_IMAGES,
+        "file": {
+            "max_size": app.state.config.FILE_MAX_SIZE,
+            "max_count": app.state.config.FILE_MAX_COUNT,
+        },
         "content_extraction": {
             "engine": app.state.config.CONTENT_EXTRACTION_ENGINE,
             "tika_server_url": app.state.config.TIKA_SERVER_URL,
@@ -590,6 +612,7 @@ async def update_query_settings(
     app.state.config.ENABLE_RAG_HYBRID_SEARCH = (
         form_data.hybrid if form_data.hybrid else False
     )
+
     return {
         "status": True,
         "template": app.state.config.RAG_TEMPLATE,
@@ -1373,12 +1396,12 @@ def scan_docs_dir(user=Depends(get_admin_user)):
     return True
 
 
-@app.get("/reset/db")
+@app.post("/reset/db")
 def reset_vector_db(user=Depends(get_admin_user)):
     CHROMA_CLIENT.reset()
 
 
-@app.get("/reset/uploads")
+@app.post("/reset/uploads")
 def reset_upload_dir(user=Depends(get_admin_user)) -> bool:
     folder = f"{UPLOAD_DIR}"
     try:
@@ -1402,7 +1425,7 @@ def reset_upload_dir(user=Depends(get_admin_user)) -> bool:
     return True
 
 
-@app.get("/reset")
+@app.post("/reset")
 def reset(user=Depends(get_admin_user)) -> bool:
     folder = f"{UPLOAD_DIR}"
     for filename in os.listdir(folder):

+ 14 - 10
backend/apps/rag/utils.py

@@ -149,16 +149,20 @@ def query_collection(
 ):
     results = []
     for collection_name in collection_names:
-        try:
-            result = query_doc(
-                collection_name=collection_name,
-                query=query,
-                k=k,
-                embedding_function=embedding_function,
-            )
-            results.append(result)
-        except Exception:
+        if collection_name:
+            try:
+                result = query_doc(
+                    collection_name=collection_name,
+                    query=query,
+                    k=k,
+                    embedding_function=embedding_function,
+                )
+                results.append(result)
+            except Exception:
+                pass
+        else:
             pass
+
     return merge_and_sort_query_results(results, k=k)
 
 
@@ -257,7 +261,7 @@ def get_rag_context(
         collection_names = (
             file["collection_names"]
             if file["type"] == "collection"
-            else [file["collection_name"]]
+            else [file["collection_name"]] if file["collection_name"] else []
         )
 
         collection_names = set(collection_names).difference(extracted_collections)

+ 8 - 21
backend/apps/webui/internal/db.py

@@ -3,18 +3,19 @@ import logging
 import json
 from contextlib import contextmanager
 
-from peewee_migrate import Router
-from apps.webui.internal.wrappers import register_connection
 
 from typing import Optional, Any
 from typing_extensions import Self
 
 from sqlalchemy import create_engine, types, Dialect
+from sqlalchemy.sql.type_api import _T
 from sqlalchemy.ext.declarative import declarative_base
 from sqlalchemy.orm import sessionmaker, scoped_session
-from sqlalchemy.sql.type_api import _T
 
-from config import SRC_LOG_LEVELS, DATA_DIR, DATABASE_URL, BACKEND_DIR
+
+from peewee_migrate import Router
+from apps.webui.internal.wrappers import register_connection
+from env import SRC_LOG_LEVELS, BACKEND_DIR, DATABASE_URL
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["DB"])
@@ -42,34 +43,21 @@ class JSONField(types.TypeDecorator):
             return json.loads(value)
 
 
-# Check if the file exists
-if os.path.exists(f"{DATA_DIR}/ollama.db"):
-    # Rename the file
-    os.rename(f"{DATA_DIR}/ollama.db", f"{DATA_DIR}/webui.db")
-    log.info("Database migrated from Ollama-WebUI successfully.")
-else:
-    pass
-
-
 # Workaround to handle the peewee migration
 # This is required to ensure the peewee migration is handled before the alembic migration
 def handle_peewee_migration(DATABASE_URL):
+    # db = None
     try:
-        # Replace the postgresql:// with postgres:// and %40 with @ in the DATABASE_URL
-        db = register_connection(
-            DATABASE_URL.replace("postgresql://", "postgres://").replace("%40", "@")
-        )
+        # Replace the postgresql:// with postgres:// to handle the peewee migration
+        db = register_connection(DATABASE_URL.replace("postgresql://", "postgres://"))
         migrate_dir = BACKEND_DIR / "apps" / "webui" / "internal" / "migrations"
         router = Router(db, logger=log, migrate_dir=migrate_dir)
         router.run()
         db.close()
 
-        # check if db connection has been closed
-
     except Exception as e:
         log.error(f"Failed to initialize the database connection: {e}")
         raise
-
     finally:
         # Properly closing the database connection
         if db and not db.is_closed():
@@ -98,7 +86,6 @@ Base = declarative_base()
 Session = scoped_session(SessionLocal)
 
 
-# Dependency
 def get_session():
     db = SessionLocal()
     try:

+ 4 - 10
backend/apps/webui/internal/wrappers.py

@@ -6,7 +6,7 @@ import logging
 from playhouse.db_url import connect, parse
 from playhouse.shortcuts import ReconnectMixin
 
-from config import SRC_LOG_LEVELS
+from env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["DB"])
@@ -43,7 +43,7 @@ class ReconnectingPostgresqlDatabase(CustomReconnectMixin, PostgresqlDatabase):
 
 
 def register_connection(db_url):
-    db = connect(db_url)
+    db = connect(db_url, unquote_password=True)
     if isinstance(db, PostgresqlDatabase):
         # Enable autoconnect for SQLite databases, managed by Peewee
         db.autoconnect = True
@@ -51,16 +51,10 @@ def register_connection(db_url):
         log.info("Connected to PostgreSQL database")
 
         # Get the connection details
-        connection = parse(db_url)
+        connection = parse(db_url, unquote_password=True)
 
         # Use our custom database class that supports reconnection
-        db = ReconnectingPostgresqlDatabase(
-            connection["database"],
-            user=connection["user"],
-            password=connection["password"],
-            host=connection["host"],
-            port=connection["port"],
-        )
+        db = ReconnectingPostgresqlDatabase(**connection)
         db.connect(reuse_if_open=True)
     elif isinstance(db, SqliteDatabase):
         # Enable autoconnect for SQLite databases, managed by Peewee

+ 35 - 29
backend/apps/webui/main.py

@@ -56,12 +56,15 @@ from apps.socket.main import get_event_call, get_event_emitter
 
 import inspect
 import json
+import logging
 
 from typing import Iterator, Generator, AsyncGenerator
 from pydantic import BaseModel
 
 app = FastAPI()
 
+log = logging.getLogger(__name__)
+
 app.state.config = AppConfig()
 
 app.state.config.ENABLE_SIGNUP = ENABLE_SIGNUP
@@ -243,43 +246,37 @@ def get_pipe_id(form_data: dict) -> str:
     return pipe_id
 
 
-def get_function_params(function_module, form_data, user, extra_params={}):
+def get_function_params(function_module, form_data, user, extra_params=None):
+    if extra_params is None:
+        extra_params = {}
+
     pipe_id = get_pipe_id(form_data)
+
     # Get the signature of the function
     sig = inspect.signature(function_module.pipe)
-    params = {"body": form_data}
-
-    for key, value in extra_params.items():
-        if key in sig.parameters:
-            params[key] = value
-
-    if "__user__" in sig.parameters:
-        __user__ = {
-            "id": user.id,
-            "email": user.email,
-            "name": user.name,
-            "role": user.role,
-        }
+    params = {"body": form_data} | {
+        k: v for k, v in extra_params.items() if k in sig.parameters
+    }
 
+    if "__user__" in params and hasattr(function_module, "UserValves"):
+        user_valves = Functions.get_user_valves_by_id_and_user_id(pipe_id, user.id)
         try:
-            if hasattr(function_module, "UserValves"):
-                __user__["valves"] = function_module.UserValves(
-                    **Functions.get_user_valves_by_id_and_user_id(pipe_id, user.id)
-                )
+            params["__user__"]["valves"] = function_module.UserValves(**user_valves)
         except Exception as e:
-            print(e)
+            log.exception(e)
+            params["__user__"]["valves"] = function_module.UserValves()
 
-        params["__user__"] = __user__
     return params
 
 
 async def generate_function_chat_completion(form_data, user):
     model_id = form_data.get("model")
     model_info = Models.get_model_by_id(model_id)
+
     metadata = form_data.pop("metadata", {})
+
     files = metadata.get("files", [])
     tool_ids = metadata.get("tool_ids", [])
-
     # Check if tool_ids is None
     if tool_ids is None:
         tool_ids = []
@@ -298,16 +295,25 @@ async def generate_function_chat_completion(form_data, user):
         "__event_emitter__": __event_emitter__,
         "__event_call__": __event_call__,
         "__task__": __task__,
-    }
-    tools_params = {
-        **extra_params,
-        "__model__": app.state.MODELS[form_data["model"]],
-        "__messages__": form_data["messages"],
         "__files__": files,
+        "__user__": {
+            "id": user.id,
+            "email": user.email,
+            "name": user.name,
+            "role": user.role,
+        },
     }
-
-    tools = get_tools(app, tool_ids, user, tools_params)
-    extra_params["__tools__"] = tools
+    extra_params["__tools__"] = get_tools(
+        app,
+        tool_ids,
+        user,
+        {
+            **extra_params,
+            "__model__": app.state.MODELS[form_data["model"]],
+            "__messages__": form_data["messages"],
+            "__files__": files,
+        },
+    )
 
     if model_info:
         if model_info.base_model_id:

+ 2 - 2
backend/apps/webui/models/auths.py

@@ -4,12 +4,12 @@ import uuid
 import logging
 from sqlalchemy import String, Column, Boolean, Text
 
-from apps.webui.models.users import UserModel, Users
 from utils.utils import verify_password
 
+from apps.webui.models.users import UserModel, Users
 from apps.webui.internal.db import Base, get_db
 
-from config import SRC_LOG_LEVELS
+from env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])

+ 12 - 9
backend/apps/webui/models/chats.py

@@ -249,22 +249,25 @@ class ChatTable:
         self,
         user_id: str,
         include_archived: bool = False,
-        skip: int = 0,
-        limit: int = -1,
+        skip: Optional[int] = None,
+        limit: Optional[int] = None,
     ) -> list[ChatTitleIdResponse]:
         with get_db() as db:
             query = db.query(Chat).filter_by(user_id=user_id)
             if not include_archived:
                 query = query.filter_by(archived=False)
 
-            all_chats = (
-                query.order_by(Chat.updated_at.desc())
-                # limit cols
-                .with_entities(Chat.id, Chat.title, Chat.updated_at, Chat.created_at)
-                .limit(limit)
-                .offset(skip)
-                .all()
+            query = query.order_by(Chat.updated_at.desc()).with_entities(
+                Chat.id, Chat.title, Chat.updated_at, Chat.created_at
             )
+
+            if limit:
+                query = query.limit(limit)
+            if skip:
+                query = query.offset(skip)
+
+            all_chats = query.all()
+
             # result has to be destrctured from sqlalchemy `row` and mapped to a dict since the `ChatModel`is not the returned dataclass.
             return [
                 ChatTitleIdResponse.model_validate(

+ 1 - 1
backend/apps/webui/models/documents.py

@@ -9,7 +9,7 @@ from apps.webui.internal.db import Base, get_db
 
 import json
 
-from config import SRC_LOG_LEVELS
+from env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])

+ 8 - 1
backend/apps/webui/models/files.py

@@ -9,7 +9,7 @@ from apps.webui.internal.db import JSONField, Base, get_db
 
 import json
 
-from config import SRC_LOG_LEVELS
+from env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])
@@ -98,6 +98,13 @@ class FilesTable:
 
             return [FileModel.model_validate(file) for file in db.query(File).all()]
 
+    def get_files_by_user_id(self, user_id: str) -> list[FileModel]:
+        with get_db() as db:
+            return [
+                FileModel.model_validate(file)
+                for file in db.query(File).filter_by(user_id=user_id).all()
+            ]
+
     def delete_file_by_id(self, id: str) -> bool:
 
         with get_db() as db:

+ 1 - 1
backend/apps/webui/models/functions.py

@@ -12,7 +12,7 @@ import json
 import copy
 
 
-from config import SRC_LOG_LEVELS
+from env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])

+ 1 - 1
backend/apps/webui/models/models.py

@@ -6,7 +6,7 @@ from sqlalchemy import Column, BigInteger, Text
 
 from apps.webui.internal.db import Base, JSONField, get_db
 
-from config import SRC_LOG_LEVELS
+from env import SRC_LOG_LEVELS
 
 import time
 

+ 1 - 1
backend/apps/webui/models/tags.py

@@ -10,7 +10,7 @@ from sqlalchemy import String, Column, BigInteger, Text
 
 from apps.webui.internal.db import Base, get_db
 
-from config import SRC_LOG_LEVELS
+from env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])

+ 1 - 1
backend/apps/webui/models/tools.py

@@ -11,7 +11,7 @@ import json
 import copy
 
 
-from config import SRC_LOG_LEVELS
+from env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])

+ 5 - 2
backend/apps/webui/routers/auths.py

@@ -195,7 +195,11 @@ async def signin(request: Request, response: Response, form_data: SigninForm):
 
 @router.post("/signup", response_model=SigninResponse)
 async def signup(request: Request, response: Response, form_data: SignupForm):
-    if not request.app.state.config.ENABLE_SIGNUP and WEBUI_AUTH:
+    if (
+        not request.app.state.config.ENABLE_SIGNUP
+        and request.app.state.config.ENABLE_LOGIN_FORM
+        and WEBUI_AUTH
+    ):
         raise HTTPException(
             status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.ACCESS_PROHIBITED
         )
@@ -228,7 +232,6 @@ async def signup(request: Request, response: Response, form_data: SignupForm):
                 data={"id": user.id},
                 expires_delta=parse_duration(request.app.state.config.JWT_EXPIRES_IN),
             )
-            # response.set_cookie(key='token', value=token, httponly=True)
 
             # Set the cookie token
             response.set_cookie(

+ 8 - 6
backend/apps/webui/routers/files.py

@@ -106,7 +106,10 @@ def upload_file(file: UploadFile = File(...), user=Depends(get_verified_user)):
 
 @router.get("/", response_model=list[FileModel])
 async def list_files(user=Depends(get_verified_user)):
-    files = Files.get_files()
+    if user.role == "admin":
+        files = Files.get_files()
+    else:
+        files = Files.get_files_by_user_id(user.id)
     return files
 
 
@@ -156,7 +159,7 @@ async def delete_all_files(user=Depends(get_admin_user)):
 async def get_file_by_id(id: str, user=Depends(get_verified_user)):
     file = Files.get_file_by_id(id)
 
-    if file:
+    if file and (file.user_id == user.id or user.role == "admin"):
         return file
     else:
         raise HTTPException(
@@ -174,7 +177,7 @@ async def get_file_by_id(id: str, user=Depends(get_verified_user)):
 async def get_file_content_by_id(id: str, user=Depends(get_verified_user)):
     file = Files.get_file_by_id(id)
 
-    if file:
+    if file and (file.user_id == user.id or user.role == "admin"):
         file_path = Path(file.meta["path"])
 
         # Check if the file already exists in the cache
@@ -197,7 +200,7 @@ async def get_file_content_by_id(id: str, user=Depends(get_verified_user)):
 async def get_file_content_by_id(id: str, user=Depends(get_verified_user)):
     file = Files.get_file_by_id(id)
 
-    if file:
+    if file and (file.user_id == user.id or user.role == "admin"):
         file_path = Path(file.meta["path"])
 
         # Check if the file already exists in the cache
@@ -224,8 +227,7 @@ async def get_file_content_by_id(id: str, user=Depends(get_verified_user)):
 @router.delete("/{id}")
 async def delete_file_by_id(id: str, user=Depends(get_verified_user)):
     file = Files.get_file_by_id(id)
-
-    if file:
+    if file and (file.user_id == user.id or user.role == "admin"):
         result = Files.delete_file_by_id(id)
         if result:
             return {"message": "File deleted successfully"}

+ 35 - 30
backend/apps/webui/routers/memories.py

@@ -68,34 +68,6 @@ async def add_memory(
     return memory
 
 
-@router.post("/{memory_id}/update", response_model=Optional[MemoryModel])
-async def update_memory_by_id(
-    memory_id: str,
-    request: Request,
-    form_data: MemoryUpdateModel,
-    user=Depends(get_verified_user),
-):
-    memory = Memories.update_memory_by_id(memory_id, form_data.content)
-    if memory is None:
-        raise HTTPException(status_code=404, detail="Memory not found")
-
-    if form_data.content is not None:
-        memory_embedding = request.app.state.EMBEDDING_FUNCTION(form_data.content)
-        collection = CHROMA_CLIENT.get_or_create_collection(
-            name=f"user-memory-{user.id}"
-        )
-        collection.upsert(
-            documents=[form_data.content],
-            ids=[memory.id],
-            embeddings=[memory_embedding],
-            metadatas=[
-                {"created_at": memory.created_at, "updated_at": memory.updated_at}
-            ],
-        )
-
-    return memory
-
-
 ############################
 # QueryMemory
 ############################
@@ -124,7 +96,7 @@ async def query_memory(
 ############################
 # ResetMemoryFromVectorDB
 ############################
-@router.get("/reset", response_model=bool)
+@router.post("/reset", response_model=bool)
 async def reset_memory_from_vector_db(
     request: Request, user=Depends(get_verified_user)
 ):
@@ -147,7 +119,7 @@ async def reset_memory_from_vector_db(
 ############################
 
 
-@router.delete("/user", response_model=bool)
+@router.delete("/delete/user", response_model=bool)
 async def delete_memory_by_user_id(user=Depends(get_verified_user)):
     result = Memories.delete_memories_by_user_id(user.id)
 
@@ -161,6 +133,39 @@ async def delete_memory_by_user_id(user=Depends(get_verified_user)):
     return False
 
 
+############################
+# UpdateMemoryById
+############################
+
+
+@router.post("/{memory_id}/update", response_model=Optional[MemoryModel])
+async def update_memory_by_id(
+    memory_id: str,
+    request: Request,
+    form_data: MemoryUpdateModel,
+    user=Depends(get_verified_user),
+):
+    memory = Memories.update_memory_by_id(memory_id, form_data.content)
+    if memory is None:
+        raise HTTPException(status_code=404, detail="Memory not found")
+
+    if form_data.content is not None:
+        memory_embedding = request.app.state.EMBEDDING_FUNCTION(form_data.content)
+        collection = CHROMA_CLIENT.get_or_create_collection(
+            name=f"user-memory-{user.id}"
+        )
+        collection.upsert(
+            documents=[form_data.content],
+            ids=[memory.id],
+            embeddings=[memory_embedding],
+            metadatas=[
+                {"created_at": memory.created_at, "updated_at": memory.updated_at}
+            ],
+        )
+
+    return memory
+
+
 ############################
 # DeleteMemoryById
 ############################

+ 20 - 0
backend/apps/webui/utils.py

@@ -4,6 +4,9 @@ import re
 import sys
 import subprocess
 
+
+from apps.webui.models.tools import Tools
+from apps.webui.models.functions import Functions
 from config import TOOLS_DIR, FUNCTIONS_DIR
 
 
@@ -49,6 +52,15 @@ def extract_frontmatter(file_path):
 
 def load_toolkit_module_by_id(toolkit_id):
     toolkit_path = os.path.join(TOOLS_DIR, f"{toolkit_id}.py")
+
+    if not os.path.exists(toolkit_path):
+        tool = Tools.get_tool_by_id(toolkit_id)
+        if tool:
+            with open(toolkit_path, "w") as file:
+                file.write(tool.content)
+        else:
+            raise Exception(f"Toolkit not found: {toolkit_id}")
+
     spec = util.spec_from_file_location(toolkit_id, toolkit_path)
     module = util.module_from_spec(spec)
     frontmatter = extract_frontmatter(toolkit_path)
@@ -71,6 +83,14 @@ def load_toolkit_module_by_id(toolkit_id):
 def load_function_module_by_id(function_id):
     function_path = os.path.join(FUNCTIONS_DIR, f"{function_id}.py")
 
+    if not os.path.exists(function_path):
+        function = Functions.get_function_by_id(function_id)
+        if function:
+            with open(function_path, "w") as file:
+                file.write(function.content)
+        else:
+            raise Exception(f"Function not found: {function_id}")
+
     spec = util.spec_from_file_location(function_id, function_path)
     module = util.module_from_spec(spec)
     frontmatter = extract_frontmatter(function_path)

+ 163 - 217
backend/config.py

@@ -1,13 +1,17 @@
+from sqlalchemy import create_engine, Column, Integer, DateTime, JSON, func
+from contextlib import contextmanager
+
+
 import os
 import sys
 import logging
 import importlib.metadata
 import pkgutil
 from urllib.parse import urlparse
+from datetime import datetime
 
 import chromadb
 from chromadb import Settings
-from bs4 import BeautifulSoup
 from typing import TypeVar, Generic
 from pydantic import BaseModel
 from typing import Optional
@@ -16,68 +20,39 @@ from pathlib import Path
 import json
 import yaml
 
-import markdown
 import requests
 import shutil
 
-from constants import ERROR_MESSAGES
-
-####################################
-# Load .env file
-####################################
-
-BACKEND_DIR = Path(__file__).parent  # the path containing this file
-BASE_DIR = BACKEND_DIR.parent  # the path containing the backend/
 
-print(BASE_DIR)
+from apps.webui.internal.db import Base, get_db
 
-try:
-    from dotenv import load_dotenv, find_dotenv
-
-    load_dotenv(find_dotenv(str(BASE_DIR / ".env")))
-except ImportError:
-    print("dotenv not installed, skipping...")
-
-
-####################################
-# LOGGING
-####################################
-
-log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]
-
-GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper()
-if GLOBAL_LOG_LEVEL in log_levels:
-    logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True)
-else:
-    GLOBAL_LOG_LEVEL = "INFO"
-
-log = logging.getLogger(__name__)
-log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
-
-log_sources = [
-    "AUDIO",
-    "COMFYUI",
-    "CONFIG",
-    "DB",
-    "IMAGES",
-    "MAIN",
-    "MODELS",
-    "OLLAMA",
-    "OPENAI",
-    "RAG",
-    "WEBHOOK",
-]
-
-SRC_LOG_LEVELS = {}
-
-for source in log_sources:
-    log_env_var = source + "_LOG_LEVEL"
-    SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper()
-    if SRC_LOG_LEVELS[source] not in log_levels:
-        SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL
-    log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}")
+from constants import ERROR_MESSAGES
 
-log.setLevel(SRC_LOG_LEVELS["CONFIG"])
+from env import (
+    ENV,
+    VERSION,
+    SAFE_MODE,
+    GLOBAL_LOG_LEVEL,
+    SRC_LOG_LEVELS,
+    BASE_DIR,
+    DATA_DIR,
+    BACKEND_DIR,
+    FRONTEND_BUILD_DIR,
+    WEBUI_NAME,
+    WEBUI_URL,
+    WEBUI_FAVICON_URL,
+    WEBUI_BUILD_HASH,
+    CONFIG_DATA,
+    DATABASE_URL,
+    CHANGELOG,
+    WEBUI_AUTH,
+    WEBUI_AUTH_TRUSTED_EMAIL_HEADER,
+    WEBUI_AUTH_TRUSTED_NAME_HEADER,
+    WEBUI_SECRET_KEY,
+    WEBUI_SESSION_COOKIE_SAME_SITE,
+    WEBUI_SESSION_COOKIE_SECURE,
+    log,
+)
 
 
 class EndpointFilter(logging.Filter):
@@ -88,141 +63,130 @@ class EndpointFilter(logging.Filter):
 # Filter out /endpoint
 logging.getLogger("uvicorn.access").addFilter(EndpointFilter())
 
-
-WEBUI_NAME = os.environ.get("WEBUI_NAME", "Open WebUI")
-if WEBUI_NAME != "Open WebUI":
-    WEBUI_NAME += " (Open WebUI)"
-
-WEBUI_URL = os.environ.get("WEBUI_URL", "http://localhost:3000")
-
-WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
-
-
 ####################################
-# ENV (dev,test,prod)
+# Config helpers
 ####################################
 
-ENV = os.environ.get("ENV", "dev")
 
-try:
-    PACKAGE_DATA = json.loads((BASE_DIR / "package.json").read_text())
-except Exception:
+# Function to run the alembic migrations
+def run_migrations():
+    print("Running migrations")
     try:
-        PACKAGE_DATA = {"version": importlib.metadata.version("open-webui")}
-    except importlib.metadata.PackageNotFoundError:
-        PACKAGE_DATA = {"version": "0.0.0"}
-
-VERSION = PACKAGE_DATA["version"]
-
-
-# Function to parse each section
-def parse_section(section):
-    items = []
-    for li in section.find_all("li"):
-        # Extract raw HTML string
-        raw_html = str(li)
-
-        # Extract text without HTML tags
-        text = li.get_text(separator=" ", strip=True)
-
-        # Split into title and content
-        parts = text.split(": ", 1)
-        title = parts[0].strip() if len(parts) > 1 else ""
-        content = parts[1].strip() if len(parts) > 1 else text
-
-        items.append({"title": title, "content": content, "raw": raw_html})
-    return items
-
-
-try:
-    changelog_path = BASE_DIR / "CHANGELOG.md"
-    with open(str(changelog_path.absolute()), "r", encoding="utf8") as file:
-        changelog_content = file.read()
+        from alembic.config import Config
+        from alembic import command
 
-except Exception:
-    changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
-
-
-# Convert markdown content to HTML
-html_content = markdown.markdown(changelog_content)
-
-# Parse the HTML content
-soup = BeautifulSoup(html_content, "html.parser")
-
-# Initialize JSON structure
-changelog_json = {}
-
-# Iterate over each version
-for version in soup.find_all("h2"):
-    version_number = version.get_text().strip().split(" - ")[0][1:-1]  # Remove brackets
-    date = version.get_text().strip().split(" - ")[1]
-
-    version_data = {"date": date}
+        alembic_cfg = Config("alembic.ini")
+        command.upgrade(alembic_cfg, "head")
+    except Exception as e:
+        print(f"Error: {e}")
 
-    # Find the next sibling that is a h3 tag (section title)
-    current = version.find_next_sibling()
 
-    while current and current.name != "h2":
-        if current.name == "h3":
-            section_title = current.get_text().lower()  # e.g., "added", "fixed"
-            section_items = parse_section(current.find_next_sibling("ul"))
-            version_data[section_title] = section_items
+run_migrations()
 
-        # Move to the next element
-        current = current.find_next_sibling()
 
-    changelog_json[version_number] = version_data
+class Config(Base):
+    __tablename__ = "config"
 
+    id = Column(Integer, primary_key=True)
+    data = Column(JSON, nullable=False)
+    version = Column(Integer, nullable=False, default=0)
+    created_at = Column(DateTime, nullable=False, server_default=func.now())
+    updated_at = Column(DateTime, nullable=True, onupdate=func.now())
 
-CHANGELOG = changelog_json
 
-####################################
-# SAFE_MODE
-####################################
+def load_json_config():
+    with open(f"{DATA_DIR}/config.json", "r") as file:
+        return json.load(file)
 
-SAFE_MODE = os.environ.get("SAFE_MODE", "false").lower() == "true"
 
-####################################
-# WEBUI_BUILD_HASH
-####################################
+def save_to_db(data):
+    with get_db() as db:
+        existing_config = db.query(Config).first()
+        if not existing_config:
+            new_config = Config(data=data, version=0)
+            db.add(new_config)
+        else:
+            existing_config.data = data
+            existing_config.updated_at = datetime.now()
+            db.add(existing_config)
+        db.commit()
 
-WEBUI_BUILD_HASH = os.environ.get("WEBUI_BUILD_HASH", "dev-build")
 
-####################################
-# DATA/FRONTEND BUILD DIR
-####################################
+# When initializing, check if config.json exists and migrate it to the database
+if os.path.exists(f"{DATA_DIR}/config.json"):
+    data = load_json_config()
+    save_to_db(data)
+    os.rename(f"{DATA_DIR}/config.json", f"{DATA_DIR}/old_config.json")
 
-DATA_DIR = Path(os.getenv("DATA_DIR", BACKEND_DIR / "data")).resolve()
-FRONTEND_BUILD_DIR = Path(os.getenv("FRONTEND_BUILD_DIR", BASE_DIR / "build")).resolve()
 
-RESET_CONFIG_ON_START = (
-    os.environ.get("RESET_CONFIG_ON_START", "False").lower() == "true"
-)
-if RESET_CONFIG_ON_START:
+def save_config():
     try:
-        os.remove(f"{DATA_DIR}/config.json")
         with open(f"{DATA_DIR}/config.json", "w") as f:
-            f.write("{}")
-    except Exception:
-        pass
+            json.dump(CONFIG_DATA, f, indent="\t")
+    except Exception as e:
+        log.exception(e)
 
-try:
-    CONFIG_DATA = json.loads((DATA_DIR / "config.json").read_text())
-except Exception:
-    CONFIG_DATA = {}
 
+DEFAULT_CONFIG = {
+    "version": 0,
+    "ui": {
+        "default_locale": "",
+        "prompt_suggestions": [
+            {
+                "title": [
+                    "Help me study",
+                    "vocabulary for a college entrance exam",
+                ],
+                "content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option.",
+            },
+            {
+                "title": [
+                    "Give me ideas",
+                    "for what to do with my kids' art",
+                ],
+                "content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter.",
+            },
+            {
+                "title": ["Tell me a fun fact", "about the Roman Empire"],
+                "content": "Tell me a random fun fact about the Roman Empire",
+            },
+            {
+                "title": [
+                    "Show me a code snippet",
+                    "of a website's sticky header",
+                ],
+                "content": "Show me a code snippet of a website's sticky header in CSS and JavaScript.",
+            },
+            {
+                "title": [
+                    "Explain options trading",
+                    "if I'm familiar with buying and selling stocks",
+                ],
+                "content": "Explain options trading in simple terms if I'm familiar with buying and selling stocks.",
+            },
+            {
+                "title": ["Overcome procrastination", "give me tips"],
+                "content": "Could you start by asking me about instances when I procrastinate the most and then give me some suggestions to overcome it?",
+            },
+            {
+                "title": [
+                    "Grammar check",
+                    "rewrite it for better readability ",
+                ],
+                "content": 'Check the following sentence for grammar and clarity: "[sentence]". Rewrite it for better readability while maintaining its original meaning.',
+            },
+        ],
+    },
+}
 
-####################################
-# Config helpers
-####################################
 
+def get_config():
+    with get_db() as db:
+        config_entry = db.query(Config).order_by(Config.id.desc()).first()
+        return config_entry.data if config_entry else DEFAULT_CONFIG
 
-def save_config():
-    try:
-        with open(f"{DATA_DIR}/config.json", "w") as f:
-            json.dump(CONFIG_DATA, f, indent="\t")
-    except Exception as e:
-        log.exception(e)
+
+CONFIG_DATA = get_config()
 
 
 def get_config_value(config_path: str):
@@ -246,7 +210,7 @@ class PersistentConfig(Generic[T]):
         self.env_value = env_value
         self.config_value = get_config_value(config_path)
         if self.config_value is not None:
-            log.info(f"'{env_name}' loaded from config.json")
+            log.info(f"'{env_name}' loaded from the latest database entry")
             self.value = self.config_value
         else:
             self.value = env_value
@@ -268,19 +232,15 @@ class PersistentConfig(Generic[T]):
         return super().__getattribute__(item)
 
     def save(self):
-        # Don't save if the value is the same as the env value and the config value
-        if self.env_value == self.value:
-            if self.config_value == self.value:
-                return
-        log.info(f"Saving '{self.env_name}' to config.json")
+        log.info(f"Saving '{self.env_name}' to the database")
         path_parts = self.config_path.split(".")
-        config = CONFIG_DATA
+        sub_config = CONFIG_DATA
         for key in path_parts[:-1]:
-            if key not in config:
-                config[key] = {}
-            config = config[key]
-        config[path_parts[-1]] = self.value
-        save_config()
+            if key not in sub_config:
+                sub_config[key] = {}
+            sub_config = sub_config[key]
+        sub_config[path_parts[-1]] = self.value
+        save_to_db(CONFIG_DATA)
         self.config_value = self.value
 
 
@@ -305,11 +265,6 @@ class AppConfig:
 # WEBUI_AUTH (Required for security)
 ####################################
 
-WEBUI_AUTH = os.environ.get("WEBUI_AUTH", "True").lower() == "true"
-WEBUI_AUTH_TRUSTED_EMAIL_HEADER = os.environ.get(
-    "WEBUI_AUTH_TRUSTED_EMAIL_HEADER", None
-)
-WEBUI_AUTH_TRUSTED_NAME_HEADER = os.environ.get("WEBUI_AUTH_TRUSTED_NAME_HEADER", None)
 JWT_EXPIRES_IN = PersistentConfig(
     "JWT_EXPIRES_IN", "auth.jwt_expiry", os.environ.get("JWT_EXPIRES_IN", "-1")
 )
@@ -999,30 +954,6 @@ TOOLS_FUNCTION_CALLING_PROMPT_TEMPLATE = PersistentConfig(
 )
 
 
-####################################
-# WEBUI_SECRET_KEY
-####################################
-
-WEBUI_SECRET_KEY = os.environ.get(
-    "WEBUI_SECRET_KEY",
-    os.environ.get(
-        "WEBUI_JWT_SECRET_KEY", "t0p-s3cr3t"
-    ),  # DEPRECATED: remove at next major version
-)
-
-WEBUI_SESSION_COOKIE_SAME_SITE = os.environ.get(
-    "WEBUI_SESSION_COOKIE_SAME_SITE",
-    os.environ.get("WEBUI_SESSION_COOKIE_SAME_SITE", "lax"),
-)
-
-WEBUI_SESSION_COOKIE_SECURE = os.environ.get(
-    "WEBUI_SESSION_COOKIE_SECURE",
-    os.environ.get("WEBUI_SESSION_COOKIE_SECURE", "false").lower() == "true",
-)
-
-if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
-    raise ValueError(ERROR_MESSAGES.ENV_VAR_NOT_FOUND)
-
 ####################################
 # RAG document content extraction
 ####################################
@@ -1074,6 +1005,26 @@ ENABLE_RAG_HYBRID_SEARCH = PersistentConfig(
     os.environ.get("ENABLE_RAG_HYBRID_SEARCH", "").lower() == "true",
 )
 
+RAG_FILE_MAX_COUNT = PersistentConfig(
+    "RAG_FILE_MAX_COUNT",
+    "rag.file.max_count",
+    (
+        int(os.environ.get("RAG_FILE_MAX_COUNT"))
+        if os.environ.get("RAG_FILE_MAX_COUNT")
+        else None
+    ),
+)
+
+RAG_FILE_MAX_SIZE = PersistentConfig(
+    "RAG_FILE_MAX_SIZE",
+    "rag.file.max_size",
+    (
+        int(os.environ.get("RAG_FILE_MAX_SIZE"))
+        if os.environ.get("RAG_FILE_MAX_SIZE")
+        else None
+    ),
+)
+
 ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = PersistentConfig(
     "ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION",
     "rag.enable_web_loader_ssl_verification",
@@ -1554,13 +1505,8 @@ AUDIO_TTS_VOICE = PersistentConfig(
     os.getenv("AUDIO_TTS_VOICE", "alloy"),  # OpenAI default voice
 )
 
-
-####################################
-# Database
-####################################
-
-DATABASE_URL = os.environ.get("DATABASE_URL", f"sqlite:///{DATA_DIR}/webui.db")
-
-# Replace the postgres:// with postgresql://
-if "postgres://" in DATABASE_URL:
-    DATABASE_URL = DATABASE_URL.replace("postgres://", "postgresql://")
+AUDIO_TTS_SPLIT_ON = PersistentConfig(
+    "AUDIO_TTS_SPLIT_ON",
+    "audio.tts.split_on",
+    os.getenv("AUDIO_TTS_SPLIT_ON", "punctuation"),
+)

+ 0 - 36
backend/data/config.json

@@ -1,36 +0,0 @@
-{
-	"version": 0,
-	"ui": {
-		"default_locale": "",
-		"prompt_suggestions": [
-			{
-				"title": ["Help me study", "vocabulary for a college entrance exam"],
-				"content": "Help me study vocabulary: write a sentence for me to fill in the blank, and I'll try to pick the correct option."
-			},
-			{
-				"title": ["Give me ideas", "for what to do with my kids' art"],
-				"content": "What are 5 creative things I could do with my kids' art? I don't want to throw them away, but it's also so much clutter."
-			},
-			{
-				"title": ["Tell me a fun fact", "about the Roman Empire"],
-				"content": "Tell me a random fun fact about the Roman Empire"
-			},
-			{
-				"title": ["Show me a code snippet", "of a website's sticky header"],
-				"content": "Show me a code snippet of a website's sticky header in CSS and JavaScript."
-			},
-			{
-				"title": ["Explain options trading", "if I'm familiar with buying and selling stocks"],
-				"content": "Explain options trading in simple terms if I'm familiar with buying and selling stocks."
-			},
-			{
-				"title": ["Overcome procrastination", "give me tips"],
-				"content": "Could you start by asking me about instances when I procrastinate the most and then give me some suggestions to overcome it?"
-			},
-			{
-				"title": ["Grammar check", "rewrite it for better readability "],
-				"content": "Check the following sentence for grammar and clarity: \"[sentence]\". Rewrite it for better readability while maintaining its original meaning."
-			}
-		]
-	}
-}

+ 252 - 0
backend/env.py

@@ -0,0 +1,252 @@
+from pathlib import Path
+import os
+import logging
+import sys
+import json
+
+
+import importlib.metadata
+import pkgutil
+from urllib.parse import urlparse
+from datetime import datetime
+
+
+import markdown
+from bs4 import BeautifulSoup
+
+from constants import ERROR_MESSAGES
+
+####################################
+# Load .env file
+####################################
+
+BACKEND_DIR = Path(__file__).parent  # the path containing this file
+BASE_DIR = BACKEND_DIR.parent  # the path containing the backend/
+
+print(BASE_DIR)
+
+try:
+    from dotenv import load_dotenv, find_dotenv
+
+    load_dotenv(find_dotenv(str(BASE_DIR / ".env")))
+except ImportError:
+    print("dotenv not installed, skipping...")
+
+
+####################################
+# LOGGING
+####################################
+
+log_levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]
+
+GLOBAL_LOG_LEVEL = os.environ.get("GLOBAL_LOG_LEVEL", "").upper()
+if GLOBAL_LOG_LEVEL in log_levels:
+    logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL, force=True)
+else:
+    GLOBAL_LOG_LEVEL = "INFO"
+
+log = logging.getLogger(__name__)
+log.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")
+
+log_sources = [
+    "AUDIO",
+    "COMFYUI",
+    "CONFIG",
+    "DB",
+    "IMAGES",
+    "MAIN",
+    "MODELS",
+    "OLLAMA",
+    "OPENAI",
+    "RAG",
+    "WEBHOOK",
+]
+
+SRC_LOG_LEVELS = {}
+
+for source in log_sources:
+    log_env_var = source + "_LOG_LEVEL"
+    SRC_LOG_LEVELS[source] = os.environ.get(log_env_var, "").upper()
+    if SRC_LOG_LEVELS[source] not in log_levels:
+        SRC_LOG_LEVELS[source] = GLOBAL_LOG_LEVEL
+    log.info(f"{log_env_var}: {SRC_LOG_LEVELS[source]}")
+
+log.setLevel(SRC_LOG_LEVELS["CONFIG"])
+
+
+WEBUI_NAME = os.environ.get("WEBUI_NAME", "Open WebUI")
+if WEBUI_NAME != "Open WebUI":
+    WEBUI_NAME += " (Open WebUI)"
+
+WEBUI_URL = os.environ.get("WEBUI_URL", "http://localhost:3000")
+
+WEBUI_FAVICON_URL = "https://openwebui.com/favicon.png"
+
+
+####################################
+# ENV (dev,test,prod)
+####################################
+
+ENV = os.environ.get("ENV", "dev")
+
+try:
+    PACKAGE_DATA = json.loads((BASE_DIR / "package.json").read_text())
+except Exception:
+    try:
+        PACKAGE_DATA = {"version": importlib.metadata.version("open-webui")}
+    except importlib.metadata.PackageNotFoundError:
+        PACKAGE_DATA = {"version": "0.0.0"}
+
+VERSION = PACKAGE_DATA["version"]
+
+
+# Function to parse each section
+def parse_section(section):
+    items = []
+    for li in section.find_all("li"):
+        # Extract raw HTML string
+        raw_html = str(li)
+
+        # Extract text without HTML tags
+        text = li.get_text(separator=" ", strip=True)
+
+        # Split into title and content
+        parts = text.split(": ", 1)
+        title = parts[0].strip() if len(parts) > 1 else ""
+        content = parts[1].strip() if len(parts) > 1 else text
+
+        items.append({"title": title, "content": content, "raw": raw_html})
+    return items
+
+
+try:
+    changelog_path = BASE_DIR / "CHANGELOG.md"
+    with open(str(changelog_path.absolute()), "r", encoding="utf8") as file:
+        changelog_content = file.read()
+
+except Exception:
+    changelog_content = (pkgutil.get_data("open_webui", "CHANGELOG.md") or b"").decode()
+
+
+# Convert markdown content to HTML
+html_content = markdown.markdown(changelog_content)
+
+# Parse the HTML content
+soup = BeautifulSoup(html_content, "html.parser")
+
+# Initialize JSON structure
+changelog_json = {}
+
+# Iterate over each version
+for version in soup.find_all("h2"):
+    version_number = version.get_text().strip().split(" - ")[0][1:-1]  # Remove brackets
+    date = version.get_text().strip().split(" - ")[1]
+
+    version_data = {"date": date}
+
+    # Find the next sibling that is a h3 tag (section title)
+    current = version.find_next_sibling()
+
+    while current and current.name != "h2":
+        if current.name == "h3":
+            section_title = current.get_text().lower()  # e.g., "added", "fixed"
+            section_items = parse_section(current.find_next_sibling("ul"))
+            version_data[section_title] = section_items
+
+        # Move to the next element
+        current = current.find_next_sibling()
+
+    changelog_json[version_number] = version_data
+
+
+CHANGELOG = changelog_json
+
+####################################
+# SAFE_MODE
+####################################
+
+SAFE_MODE = os.environ.get("SAFE_MODE", "false").lower() == "true"
+
+####################################
+# WEBUI_BUILD_HASH
+####################################
+
+WEBUI_BUILD_HASH = os.environ.get("WEBUI_BUILD_HASH", "dev-build")
+
+####################################
+# DATA/FRONTEND BUILD DIR
+####################################
+
+DATA_DIR = Path(os.getenv("DATA_DIR", BACKEND_DIR / "data")).resolve()
+FRONTEND_BUILD_DIR = Path(os.getenv("FRONTEND_BUILD_DIR", BASE_DIR / "build")).resolve()
+
+RESET_CONFIG_ON_START = (
+    os.environ.get("RESET_CONFIG_ON_START", "False").lower() == "true"
+)
+if RESET_CONFIG_ON_START:
+    try:
+        os.remove(f"{DATA_DIR}/config.json")
+        with open(f"{DATA_DIR}/config.json", "w") as f:
+            f.write("{}")
+    except Exception:
+        pass
+
+try:
+    CONFIG_DATA = json.loads((DATA_DIR / "config.json").read_text())
+except Exception:
+    CONFIG_DATA = {}
+
+
+####################################
+# Database
+####################################
+
+# Check if the file exists
+if os.path.exists(f"{DATA_DIR}/ollama.db"):
+    # Rename the file
+    os.rename(f"{DATA_DIR}/ollama.db", f"{DATA_DIR}/webui.db")
+    log.info("Database migrated from Ollama-WebUI successfully.")
+else:
+    pass
+
+DATABASE_URL = os.environ.get("DATABASE_URL", f"sqlite:///{DATA_DIR}/webui.db")
+
+# Replace the postgres:// with postgresql://
+if "postgres://" in DATABASE_URL:
+    DATABASE_URL = DATABASE_URL.replace("postgres://", "postgresql://")
+
+
+####################################
+# WEBUI_AUTH (Required for security)
+####################################
+
+WEBUI_AUTH = os.environ.get("WEBUI_AUTH", "True").lower() == "true"
+WEBUI_AUTH_TRUSTED_EMAIL_HEADER = os.environ.get(
+    "WEBUI_AUTH_TRUSTED_EMAIL_HEADER", None
+)
+WEBUI_AUTH_TRUSTED_NAME_HEADER = os.environ.get("WEBUI_AUTH_TRUSTED_NAME_HEADER", None)
+
+
+####################################
+# WEBUI_SECRET_KEY
+####################################
+
+WEBUI_SECRET_KEY = os.environ.get(
+    "WEBUI_SECRET_KEY",
+    os.environ.get(
+        "WEBUI_JWT_SECRET_KEY", "t0p-s3cr3t"
+    ),  # DEPRECATED: remove at next major version
+)
+
+WEBUI_SESSION_COOKIE_SAME_SITE = os.environ.get(
+    "WEBUI_SESSION_COOKIE_SAME_SITE",
+    os.environ.get("WEBUI_SESSION_COOKIE_SAME_SITE", "lax"),
+)
+
+WEBUI_SESSION_COOKIE_SECURE = os.environ.get(
+    "WEBUI_SESSION_COOKIE_SECURE",
+    os.environ.get("WEBUI_SESSION_COOKIE_SECURE", "false").lower() == "true",
+)
+
+if WEBUI_AUTH and WEBUI_SECRET_KEY == "":
+    raise ValueError(ERROR_MESSAGES.ENV_VAR_NOT_FOUND)

+ 48 - 42
backend/main.py

@@ -1,7 +1,6 @@
 import base64
 import uuid
 from contextlib import asynccontextmanager
-
 from authlib.integrations.starlette_client import OAuth
 from authlib.oidc.core import UserInfo
 import json
@@ -87,6 +86,7 @@ from utils.misc import (
 from apps.rag.utils import get_rag_context, rag_template
 
 from config import (
+    run_migrations,
     WEBUI_NAME,
     WEBUI_URL,
     WEBUI_AUTH,
@@ -165,17 +165,6 @@ https://github.com/open-webui/open-webui
 )
 
 
-def run_migrations():
-    try:
-        from alembic.config import Config
-        from alembic import command
-
-        alembic_cfg = Config("alembic.ini")
-        command.upgrade(alembic_cfg, "head")
-    except Exception as e:
-        print(f"Error: {e}")
-
-
 @asynccontextmanager
 async def lifespan(app: FastAPI):
     run_migrations()
@@ -299,24 +288,26 @@ async def chat_completion_filter_functions_handler(body, model, extra_params):
 
             # Get the signature of the function
             sig = inspect.signature(inlet)
-            params = {"body": body}
+            params = {"body": body} | {
+                k: v
+                for k, v in {
+                    **extra_params,
+                    "__model__": model,
+                    "__id__": filter_id,
+                }.items()
+                if k in sig.parameters
+            }
 
-            # Extra parameters to be passed to the function
-            custom_params = {**extra_params, "__model__": model, "__id__": filter_id}
-            if hasattr(function_module, "UserValves") and "__user__" in sig.parameters:
+            if "__user__" in params and hasattr(function_module, "UserValves"):
                 try:
-                    uid = custom_params["__user__"]["id"]
-                    custom_params["__user__"]["valves"] = function_module.UserValves(
-                        **Functions.get_user_valves_by_id_and_user_id(filter_id, uid)
+                    params["__user__"]["valves"] = function_module.UserValves(
+                        **Functions.get_user_valves_by_id_and_user_id(
+                            filter_id, params["__user__"]["id"]
+                        )
                     )
                 except Exception as e:
                     print(e)
 
-            # Add extra params in contained in function signature
-            for key, value in custom_params.items():
-                if key in sig.parameters:
-                    params[key] = value
-
             if inspect.iscoroutinefunction(inlet):
                 body = await inlet(**params)
             else:
@@ -372,7 +363,9 @@ async def chat_completion_tools_handler(
 ) -> tuple[dict, dict]:
     # If tool_ids field is present, call the functions
     metadata = body.get("metadata", {})
+
     tool_ids = metadata.get("tool_ids", None)
+    log.debug(f"{tool_ids=}")
     if not tool_ids:
         return body, {}
 
@@ -381,16 +374,17 @@ async def chat_completion_tools_handler(
     citations = []
 
     task_model_id = get_task_model_id(body["model"])
-
-    log.debug(f"{tool_ids=}")
-
-    custom_params = {
-        **extra_params,
-        "__model__": app.state.MODELS[task_model_id],
-        "__messages__": body["messages"],
-        "__files__": metadata.get("files", []),
-    }
-    tools = get_tools(webui_app, tool_ids, user, custom_params)
+    tools = get_tools(
+        webui_app,
+        tool_ids,
+        user,
+        {
+            **extra_params,
+            "__model__": app.state.MODELS[task_model_id],
+            "__messages__": body["messages"],
+            "__files__": metadata.get("files", []),
+        },
+    )
     log.info(f"{tools=}")
 
     specs = [tool["spec"] for tool in tools.values()]
@@ -530,17 +524,15 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware):
         }
         body["metadata"] = metadata
 
-        __user__ = {
-            "id": user.id,
-            "email": user.email,
-            "name": user.name,
-            "role": user.role,
-        }
-
         extra_params = {
-            "__user__": __user__,
             "__event_emitter__": get_event_emitter(metadata),
             "__event_call__": get_event_call(metadata),
+            "__user__": {
+                "id": user.id,
+                "email": user.email,
+                "name": user.name,
+                "role": user.role,
+            },
         }
 
         # Initialize data_items to store additional data to be sent to the client
@@ -989,11 +981,20 @@ async def get_models(user=Depends(get_verified_user)):
 @app.post("/api/chat/completions")
 async def generate_chat_completions(form_data: dict, user=Depends(get_verified_user)):
     model_id = form_data["model"]
+
     if model_id not in app.state.MODELS:
         raise HTTPException(
             status_code=status.HTTP_404_NOT_FOUND,
             detail="Model not found",
         )
+
+    if app.state.config.ENABLE_MODEL_FILTER:
+        if user.role == "user" and model_id not in app.state.config.MODEL_FILTER_LIST:
+            raise HTTPException(
+                status_code=status.HTTP_403_FORBIDDEN,
+                detail="Model not found",
+            )
+
     model = app.state.MODELS[model_id]
     if model.get("pipe"):
         return await generate_function_chat_completion(form_data, user=user)
@@ -1932,11 +1933,16 @@ async def get_app_config(request: Request):
                     "tts": {
                         "engine": audio_app.state.config.TTS_ENGINE,
                         "voice": audio_app.state.config.TTS_VOICE,
+                        "split_on": audio_app.state.config.TTS_SPLIT_ON,
                     },
                     "stt": {
                         "engine": audio_app.state.config.STT_ENGINE,
                     },
                 },
+                "file": {
+                    "max_size": rag_app.state.config.FILE_MAX_SIZE,
+                    "max_count": rag_app.state.config.FILE_MAX_COUNT,
+                },
                 "permissions": {**webui_app.state.config.USER_PERMISSIONS},
             }
             if user is not None

+ 1 - 1
backend/migrations/env.py

@@ -18,7 +18,7 @@ from apps.webui.models.users import User
 from apps.webui.models.files import File
 from apps.webui.models.functions import Function
 
-from config import DATABASE_URL
+from env import DATABASE_URL
 
 # this is the Alembic Config object, which provides
 # access to the values within the .ini file in use.

+ 43 - 0
backend/migrations/versions/ca81bd47c050_add_config_table.py

@@ -0,0 +1,43 @@
+"""Add config table
+
+Revision ID: ca81bd47c050
+Revises: 7e5b5dc7342b
+Create Date: 2024-08-25 15:26:35.241684
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+import apps.webui.internal.db
+
+
+# revision identifiers, used by Alembic.
+revision: str = "ca81bd47c050"
+down_revision: Union[str, None] = "7e5b5dc7342b"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade():
+    op.create_table(
+        "config",
+        sa.Column("id", sa.Integer, primary_key=True),
+        sa.Column("data", sa.JSON(), nullable=False),
+        sa.Column("version", sa.Integer, nullable=False),
+        sa.Column(
+            "created_at", sa.DateTime(), nullable=False, server_default=sa.func.now()
+        ),
+        sa.Column(
+            "updated_at",
+            sa.DateTime(),
+            nullable=True,
+            server_default=sa.func.now(),
+            onupdate=sa.func.now(),
+        ),
+    )
+
+
+def downgrade():
+    op.drop_table("config")

+ 6 - 5
backend/requirements.txt

@@ -34,8 +34,8 @@ anthropic
 google-generativeai==0.7.2
 tiktoken
 
-langchain==0.2.12
-langchain-community==0.2.10
+langchain==0.2.14
+langchain-community==0.2.12
 langchain-chroma==0.1.2
 
 fake-useragent==1.5.1
@@ -44,8 +44,9 @@ sentence-transformers==3.0.1
 pypdf==4.3.1
 docx2txt==0.8
 python-pptx==1.0.0
-unstructured==0.15.5
-Markdown==3.6
+unstructured==0.15.7
+nltk==3.9.1
+Markdown==3.7
 pypandoc==1.13
 pandas==2.2.2
 openpyxl==3.1.5
@@ -66,7 +67,7 @@ PyJWT[crypto]==2.9.0
 authlib==1.3.1
 
 black==24.8.0
-langfuse==2.43.3
+langfuse==2.44.0
 youtube-transcript-api==0.6.2
 pytube==15.0.0
 

+ 1 - 1
backend/utils/misc.py

@@ -82,7 +82,7 @@ def add_or_update_system_message(content: str, messages: list[dict]):
     """
 
     if messages and messages[0].get("role") == "system":
-        messages[0]["content"] += f"{content}\n{messages[0]['content']}"
+        messages[0]["content"] = f"{content}\n{messages[0]['content']}"
     else:
         # Insert at the beginning
         messages.insert(0, {"role": "system", "content": content})

+ 4 - 4
backend/utils/utils.py

@@ -6,16 +6,16 @@ from apps.webui.models.users import Users
 from typing import Union, Optional
 from constants import ERROR_MESSAGES
 from passlib.context import CryptContext
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, UTC
 import jwt
 import uuid
 import logging
-import config
+from env import WEBUI_SECRET_KEY
 
 logging.getLogger("passlib").setLevel(logging.ERROR)
 
 
-SESSION_SECRET = config.WEBUI_SECRET_KEY
+SESSION_SECRET = WEBUI_SECRET_KEY
 ALGORITHM = "HS256"
 
 ##############
@@ -40,7 +40,7 @@ def create_token(data: dict, expires_delta: Union[timedelta, None] = None) -> st
     payload = data.copy()
 
     if expires_delta:
-        expire = datetime.utcnow() + expires_delta
+        expire = datetime.now(UTC) + expires_delta
         payload.update({"exp": expire})
 
     encoded_jwt = jwt.encode(payload, SESSION_SECRET, algorithm=ALGORITHM)

+ 6 - 6
package-lock.json

@@ -1,12 +1,12 @@
 {
 	"name": "open-webui",
-	"version": "0.3.15",
+	"version": "0.3.16",
 	"lockfileVersion": 3,
 	"requires": true,
 	"packages": {
 		"": {
 			"name": "open-webui",
-			"version": "0.3.15",
+			"version": "0.3.16",
 			"dependencies": {
 				"@codemirror/lang-javascript": "^6.2.2",
 				"@codemirror/lang-python": "^6.1.6",
@@ -6576,12 +6576,12 @@
 			]
 		},
 		"node_modules/micromatch": {
-			"version": "4.0.5",
-			"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz",
-			"integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==",
+			"version": "4.0.8",
+			"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+			"integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
 			"dev": true,
 			"dependencies": {
-				"braces": "^3.0.2",
+				"braces": "^3.0.3",
 				"picomatch": "^2.3.1"
 			},
 			"engines": {

+ 1 - 1
package.json

@@ -1,6 +1,6 @@
 {
 	"name": "open-webui",
-	"version": "0.3.15",
+	"version": "0.3.16",
 	"private": true,
 	"scripts": {
 		"dev": "npm run pyodide:fetch && vite dev --host",

+ 6 - 5
pyproject.toml

@@ -41,8 +41,8 @@ dependencies = [
     "google-generativeai==0.7.2",
     "tiktoken",
 
-    "langchain==0.2.12",
-    "langchain-community==0.2.10",
+    "langchain==0.2.14",
+    "langchain-community==0.2.12",
     "langchain-chroma==0.1.2",
 
     "fake-useragent==1.5.1",
@@ -51,8 +51,9 @@ dependencies = [
     "pypdf==4.3.1",
     "docx2txt==0.8",
     "python-pptx==1.0.0",
-    "unstructured==0.15.5",
-    "Markdown==3.6",
+    "unstructured==0.15.7",
+    "nltk==3.9.1",
+    "Markdown==3.7",
     "pypandoc==1.13",
     "pandas==2.2.2",
     "openpyxl==3.1.5",
@@ -73,7 +74,7 @@ dependencies = [
     "authlib==1.3.1",
 
     "black==24.8.0",
-    "langfuse==2.43.3",
+    "langfuse==2.44.0",
     "youtube-transcript-api==0.6.2",
     "pytube==15.0.0",
 

+ 0 - 750
requirements-dev.lock

@@ -1,750 +0,0 @@
-# generated by rye
-# use `rye lock` or `rye sync` to update this lockfile
-#
-# last locked with the following flags:
-#   pre: false
-#   features: []
-#   all-features: false
-#   with-sources: false
-#   generate-hashes: false
-#   universal: false
-
--e file:.
-aiohappyeyeballs==2.3.5
-    # via aiohttp
-aiohttp==3.10.2
-    # via langchain
-    # via langchain-community
-    # via open-webui
-aiosignal==1.3.1
-    # via aiohttp
-alembic==1.13.2
-    # via open-webui
-annotated-types==0.6.0
-    # via pydantic
-anthropic==0.32.0
-    # via open-webui
-anyio==4.4.0
-    # via anthropic
-    # via httpx
-    # via langfuse
-    # via openai
-    # via starlette
-    # via watchfiles
-apscheduler==3.10.4
-    # via open-webui
-argon2-cffi==23.1.0
-    # via open-webui
-argon2-cffi-bindings==21.2.0
-    # via argon2-cffi
-asgiref==3.8.1
-    # via opentelemetry-instrumentation-asgi
-attrs==23.2.0
-    # via aiohttp
-    # via pytest-docker
-authlib==1.3.1
-    # via open-webui
-av==11.0.0
-    # via faster-whisper
-backoff==2.2.1
-    # via langfuse
-    # via posthog
-    # via unstructured
-bcrypt==4.2.0
-    # via chromadb
-    # via open-webui
-    # via passlib
-beautifulsoup4==4.12.3
-    # via extract-msg
-    # via unstructured
-bidict==0.23.1
-    # via python-socketio
-black==24.8.0
-    # via open-webui
-blinker==1.8.2
-    # via flask
-boto3==1.35.0
-    # via open-webui
-botocore==1.35.2
-    # via boto3
-    # via s3transfer
-build==1.2.1
-    # via chromadb
-cachetools==5.3.3
-    # via google-auth
-certifi==2024.2.2
-    # via httpcore
-    # via httpx
-    # via kubernetes
-    # via requests
-    # via unstructured-client
-cffi==1.16.0
-    # via argon2-cffi-bindings
-    # via cryptography
-chardet==5.2.0
-    # via unstructured
-charset-normalizer==3.3.2
-    # via requests
-    # via unstructured-client
-chroma-hnswlib==0.7.6
-    # via chromadb
-chromadb==0.5.5
-    # via langchain-chroma
-    # via open-webui
-click==8.1.7
-    # via black
-    # via duckduckgo-search
-    # via flask
-    # via nltk
-    # via peewee-migrate
-    # via typer
-    # via uvicorn
-colorclass==2.2.2
-    # via oletools
-coloredlogs==15.0.1
-    # via onnxruntime
-compressed-rtf==1.0.6
-    # via extract-msg
-cryptography==42.0.7
-    # via authlib
-    # via msoffcrypto-tool
-    # via pyjwt
-ctranslate2==4.2.1
-    # via faster-whisper
-dataclasses-json==0.6.6
-    # via langchain-community
-    # via unstructured
-    # via unstructured-client
-deepdiff==7.0.1
-    # via unstructured-client
-defusedxml==0.7.1
-    # via fpdf2
-deprecated==1.2.14
-    # via opentelemetry-api
-    # via opentelemetry-exporter-otlp-proto-grpc
-distro==1.9.0
-    # via anthropic
-    # via openai
-dnspython==2.6.1
-    # via email-validator
-    # via pymongo
-docker==7.1.0
-    # via open-webui
-docx2txt==0.8
-    # via open-webui
-duckduckgo-search==6.2.6
-    # via open-webui
-easygui==0.98.3
-    # via oletools
-ebcdic==1.1.1
-    # via extract-msg
-ecdsa==0.19.0
-    # via python-jose
-email-validator==2.1.1
-    # via fastapi
-emoji==2.11.1
-    # via unstructured
-et-xmlfile==1.1.0
-    # via openpyxl
-extract-msg==0.48.5
-    # via open-webui
-fake-useragent==1.5.1
-    # via open-webui
-fastapi==0.111.0
-    # via chromadb
-    # via langchain-chroma
-    # via open-webui
-fastapi-cli==0.0.4
-    # via fastapi
-faster-whisper==1.0.3
-    # via open-webui
-filelock==3.14.0
-    # via huggingface-hub
-    # via torch
-    # via transformers
-filetype==1.2.0
-    # via unstructured
-flask==3.0.3
-    # via flask-cors
-    # via open-webui
-flask-cors==4.0.1
-    # via open-webui
-flatbuffers==24.3.25
-    # via onnxruntime
-fonttools==4.51.0
-    # via fpdf2
-fpdf2==2.7.9
-    # via open-webui
-frozenlist==1.4.1
-    # via aiohttp
-    # via aiosignal
-fsspec==2024.3.1
-    # via huggingface-hub
-    # via torch
-google-ai-generativelanguage==0.6.6
-    # via google-generativeai
-google-api-core==2.19.0
-    # via google-ai-generativelanguage
-    # via google-api-python-client
-    # via google-generativeai
-google-api-python-client==2.129.0
-    # via google-generativeai
-google-auth==2.29.0
-    # via google-ai-generativelanguage
-    # via google-api-core
-    # via google-api-python-client
-    # via google-auth-httplib2
-    # via google-generativeai
-    # via kubernetes
-google-auth-httplib2==0.2.0
-    # via google-api-python-client
-google-generativeai==0.7.2
-    # via open-webui
-googleapis-common-protos==1.63.0
-    # via google-api-core
-    # via grpcio-status
-    # via opentelemetry-exporter-otlp-proto-grpc
-grpcio==1.63.0
-    # via chromadb
-    # via google-api-core
-    # via grpcio-status
-    # via opentelemetry-exporter-otlp-proto-grpc
-grpcio-status==1.62.2
-    # via google-api-core
-h11==0.14.0
-    # via httpcore
-    # via uvicorn
-    # via wsproto
-httpcore==1.0.5
-    # via httpx
-httplib2==0.22.0
-    # via google-api-python-client
-    # via google-auth-httplib2
-httptools==0.6.1
-    # via uvicorn
-httpx==0.27.0
-    # via anthropic
-    # via chromadb
-    # via fastapi
-    # via langfuse
-    # via openai
-huggingface-hub==0.23.0
-    # via faster-whisper
-    # via sentence-transformers
-    # via tokenizers
-    # via transformers
-humanfriendly==10.0
-    # via coloredlogs
-idna==3.7
-    # via anyio
-    # via email-validator
-    # via httpx
-    # via langfuse
-    # via requests
-    # via unstructured-client
-    # via yarl
-importlib-metadata==7.0.0
-    # via opentelemetry-api
-importlib-resources==6.4.0
-    # via chromadb
-iniconfig==2.0.0
-    # via pytest
-itsdangerous==2.2.0
-    # via flask
-jinja2==3.1.4
-    # via fastapi
-    # via flask
-    # via torch
-jiter==0.5.0
-    # via anthropic
-jmespath==1.0.1
-    # via boto3
-    # via botocore
-joblib==1.4.2
-    # via nltk
-    # via scikit-learn
-jsonpatch==1.33
-    # via langchain-core
-jsonpath-python==1.0.6
-    # via unstructured-client
-jsonpointer==2.4
-    # via jsonpatch
-kubernetes==29.0.0
-    # via chromadb
-langchain==0.2.12
-    # via langchain-community
-    # via open-webui
-langchain-chroma==0.1.2
-    # via open-webui
-langchain-community==0.2.10
-    # via open-webui
-langchain-core==0.2.28
-    # via langchain
-    # via langchain-chroma
-    # via langchain-community
-    # via langchain-text-splitters
-langchain-text-splitters==0.2.0
-    # via langchain
-langdetect==1.0.9
-    # via unstructured
-langfuse==2.43.3
-    # via open-webui
-langsmith==0.1.96
-    # via langchain
-    # via langchain-community
-    # via langchain-core
-lark==1.1.8
-    # via rtfde
-lxml==5.2.2
-    # via python-pptx
-    # via unstructured
-mako==1.3.5
-    # via alembic
-markdown==3.6
-    # via open-webui
-markdown-it-py==3.0.0
-    # via rich
-markupsafe==2.1.5
-    # via jinja2
-    # via mako
-    # via werkzeug
-marshmallow==3.21.2
-    # via dataclasses-json
-    # via unstructured-client
-mdurl==0.1.2
-    # via markdown-it-py
-mmh3==4.1.0
-    # via chromadb
-monotonic==1.6
-    # via posthog
-mpmath==1.3.0
-    # via sympy
-msoffcrypto-tool==5.4.1
-    # via oletools
-multidict==6.0.5
-    # via aiohttp
-    # via yarl
-mypy-extensions==1.0.0
-    # via black
-    # via typing-inspect
-    # via unstructured-client
-networkx==3.3
-    # via torch
-nltk==3.8.1
-    # via unstructured
-numpy==1.26.4
-    # via chroma-hnswlib
-    # via chromadb
-    # via ctranslate2
-    # via langchain
-    # via langchain-chroma
-    # via langchain-community
-    # via onnxruntime
-    # via opencv-python
-    # via opencv-python-headless
-    # via pandas
-    # via rank-bm25
-    # via rapidocr-onnxruntime
-    # via scikit-learn
-    # via scipy
-    # via sentence-transformers
-    # via shapely
-    # via transformers
-    # via unstructured
-oauthlib==3.2.2
-    # via kubernetes
-    # via requests-oauthlib
-olefile==0.47
-    # via extract-msg
-    # via msoffcrypto-tool
-    # via oletools
-oletools==0.60.1
-    # via pcodedmp
-    # via rtfde
-onnxruntime==1.17.3
-    # via chromadb
-    # via faster-whisper
-    # via rapidocr-onnxruntime
-openai==1.38.0
-    # via open-webui
-opencv-python==4.9.0.80
-    # via rapidocr-onnxruntime
-opencv-python-headless==4.10.0.84
-    # via open-webui
-openpyxl==3.1.5
-    # via open-webui
-opentelemetry-api==1.24.0
-    # via chromadb
-    # via opentelemetry-exporter-otlp-proto-grpc
-    # via opentelemetry-instrumentation
-    # via opentelemetry-instrumentation-asgi
-    # via opentelemetry-instrumentation-fastapi
-    # via opentelemetry-sdk
-opentelemetry-exporter-otlp-proto-common==1.24.0
-    # via opentelemetry-exporter-otlp-proto-grpc
-opentelemetry-exporter-otlp-proto-grpc==1.24.0
-    # via chromadb
-opentelemetry-instrumentation==0.45b0
-    # via opentelemetry-instrumentation-asgi
-    # via opentelemetry-instrumentation-fastapi
-opentelemetry-instrumentation-asgi==0.45b0
-    # via opentelemetry-instrumentation-fastapi
-opentelemetry-instrumentation-fastapi==0.45b0
-    # via chromadb
-opentelemetry-proto==1.24.0
-    # via opentelemetry-exporter-otlp-proto-common
-    # via opentelemetry-exporter-otlp-proto-grpc
-opentelemetry-sdk==1.24.0
-    # via chromadb
-    # via opentelemetry-exporter-otlp-proto-grpc
-opentelemetry-semantic-conventions==0.45b0
-    # via opentelemetry-instrumentation-asgi
-    # via opentelemetry-instrumentation-fastapi
-    # via opentelemetry-sdk
-opentelemetry-util-http==0.45b0
-    # via opentelemetry-instrumentation-asgi
-    # via opentelemetry-instrumentation-fastapi
-ordered-set==4.1.0
-    # via deepdiff
-orjson==3.10.3
-    # via chromadb
-    # via fastapi
-    # via langsmith
-overrides==7.7.0
-    # via chromadb
-packaging==23.2
-    # via black
-    # via build
-    # via huggingface-hub
-    # via langchain-core
-    # via langfuse
-    # via marshmallow
-    # via onnxruntime
-    # via pytest
-    # via transformers
-    # via unstructured-client
-pandas==2.2.2
-    # via open-webui
-passlib==1.7.4
-    # via open-webui
-pathspec==0.12.1
-    # via black
-pcodedmp==1.2.6
-    # via oletools
-peewee==3.17.6
-    # via open-webui
-    # via peewee-migrate
-peewee-migrate==1.12.2
-    # via open-webui
-pillow==10.3.0
-    # via fpdf2
-    # via python-pptx
-    # via rapidocr-onnxruntime
-    # via sentence-transformers
-platformdirs==4.2.1
-    # via black
-pluggy==1.5.0
-    # via pytest
-posthog==3.5.0
-    # via chromadb
-primp==0.5.5
-    # via duckduckgo-search
-proto-plus==1.23.0
-    # via google-ai-generativelanguage
-    # via google-api-core
-protobuf==4.25.3
-    # via google-ai-generativelanguage
-    # via google-api-core
-    # via google-generativeai
-    # via googleapis-common-protos
-    # via grpcio-status
-    # via onnxruntime
-    # via opentelemetry-proto
-    # via proto-plus
-psutil==6.0.0
-    # via open-webui
-    # via unstructured
-psycopg2-binary==2.9.9
-    # via open-webui
-pyasn1==0.6.0
-    # via pyasn1-modules
-    # via python-jose
-    # via rsa
-pyasn1-modules==0.4.0
-    # via google-auth
-pyclipper==1.3.0.post5
-    # via rapidocr-onnxruntime
-pycparser==2.22
-    # via cffi
-pydantic==2.8.2
-    # via anthropic
-    # via chromadb
-    # via fastapi
-    # via google-generativeai
-    # via langchain
-    # via langchain-core
-    # via langfuse
-    # via langsmith
-    # via open-webui
-    # via openai
-pydantic-core==2.20.1
-    # via pydantic
-pydub==0.25.1
-    # via open-webui
-pygments==2.18.0
-    # via rich
-pyjwt==2.9.0
-    # via open-webui
-pymongo==4.8.0
-    # via open-webui
-pymysql==1.1.1
-    # via open-webui
-pypandoc==1.13
-    # via open-webui
-pyparsing==2.4.7
-    # via httplib2
-    # via oletools
-pypdf==4.3.1
-    # via open-webui
-    # via unstructured-client
-pypika==0.48.9
-    # via chromadb
-pyproject-hooks==1.1.0
-    # via build
-pytest==8.2.2
-    # via open-webui
-    # via pytest-docker
-pytest-docker==3.1.1
-    # via open-webui
-python-dateutil==2.9.0.post0
-    # via botocore
-    # via kubernetes
-    # via pandas
-    # via posthog
-    # via unstructured-client
-python-dotenv==1.0.1
-    # via uvicorn
-python-engineio==4.9.0
-    # via python-socketio
-python-iso639==2024.4.27
-    # via unstructured
-python-jose==3.3.0
-    # via open-webui
-python-magic==0.4.27
-    # via unstructured
-python-multipart==0.0.9
-    # via fastapi
-    # via open-webui
-python-pptx==1.0.0
-    # via open-webui
-python-socketio==5.11.3
-    # via open-webui
-pytube==15.0.0
-    # via open-webui
-pytz==2024.1
-    # via apscheduler
-    # via pandas
-pyxlsb==1.0.10
-    # via open-webui
-pyyaml==6.0.1
-    # via chromadb
-    # via ctranslate2
-    # via huggingface-hub
-    # via kubernetes
-    # via langchain
-    # via langchain-community
-    # via langchain-core
-    # via rapidocr-onnxruntime
-    # via transformers
-    # via uvicorn
-rank-bm25==0.2.2
-    # via open-webui
-rapidfuzz==3.9.0
-    # via unstructured
-rapidocr-onnxruntime==1.3.24
-    # via open-webui
-red-black-tree-mod==1.20
-    # via extract-msg
-redis==5.0.8
-    # via open-webui
-regex==2024.5.10
-    # via nltk
-    # via tiktoken
-    # via transformers
-requests==2.32.3
-    # via docker
-    # via google-api-core
-    # via huggingface-hub
-    # via kubernetes
-    # via langchain
-    # via langchain-community
-    # via langsmith
-    # via open-webui
-    # via posthog
-    # via requests-oauthlib
-    # via tiktoken
-    # via transformers
-    # via unstructured
-    # via unstructured-client
-    # via youtube-transcript-api
-requests-oauthlib==2.0.0
-    # via kubernetes
-rich==13.7.1
-    # via typer
-rsa==4.9
-    # via google-auth
-    # via python-jose
-rtfde==0.1.1
-    # via extract-msg
-s3transfer==0.10.1
-    # via boto3
-safetensors==0.4.3
-    # via transformers
-scikit-learn==1.4.2
-    # via sentence-transformers
-scipy==1.13.0
-    # via scikit-learn
-    # via sentence-transformers
-sentence-transformers==3.0.1
-    # via open-webui
-setuptools==69.5.1
-    # via ctranslate2
-    # via opentelemetry-instrumentation
-shapely==2.0.5
-    # via rapidocr-onnxruntime
-shellingham==1.5.4
-    # via typer
-simple-websocket==1.0.0
-    # via python-engineio
-six==1.16.0
-    # via apscheduler
-    # via ecdsa
-    # via kubernetes
-    # via langdetect
-    # via posthog
-    # via python-dateutil
-    # via rapidocr-onnxruntime
-    # via unstructured-client
-sniffio==1.3.1
-    # via anthropic
-    # via anyio
-    # via httpx
-    # via openai
-soupsieve==2.5
-    # via beautifulsoup4
-sqlalchemy==2.0.32
-    # via alembic
-    # via langchain
-    # via langchain-community
-    # via open-webui
-starlette==0.37.2
-    # via fastapi
-sympy==1.12
-    # via onnxruntime
-    # via torch
-tabulate==0.9.0
-    # via unstructured
-tenacity==8.3.0
-    # via chromadb
-    # via langchain
-    # via langchain-community
-    # via langchain-core
-threadpoolctl==3.5.0
-    # via scikit-learn
-tiktoken==0.7.0
-    # via open-webui
-tokenizers==0.15.2
-    # via anthropic
-    # via chromadb
-    # via faster-whisper
-    # via transformers
-torch==2.3.0
-    # via sentence-transformers
-tqdm==4.66.4
-    # via chromadb
-    # via google-generativeai
-    # via huggingface-hub
-    # via nltk
-    # via openai
-    # via sentence-transformers
-    # via transformers
-    # via unstructured
-transformers==4.39.3
-    # via sentence-transformers
-typer==0.12.3
-    # via chromadb
-    # via fastapi-cli
-typing-extensions==4.11.0
-    # via alembic
-    # via anthropic
-    # via chromadb
-    # via fastapi
-    # via google-generativeai
-    # via huggingface-hub
-    # via langchain-core
-    # via openai
-    # via opentelemetry-sdk
-    # via pydantic
-    # via pydantic-core
-    # via python-pptx
-    # via sqlalchemy
-    # via torch
-    # via typer
-    # via typing-inspect
-    # via unstructured
-    # via unstructured-client
-typing-inspect==0.9.0
-    # via dataclasses-json
-    # via unstructured-client
-tzdata==2024.1
-    # via pandas
-tzlocal==5.2
-    # via apscheduler
-    # via extract-msg
-ujson==5.10.0
-    # via fastapi
-unstructured==0.15.5
-    # via open-webui
-unstructured-client==0.22.0
-    # via unstructured
-uritemplate==4.1.1
-    # via google-api-python-client
-urllib3==2.2.1
-    # via botocore
-    # via docker
-    # via kubernetes
-    # via requests
-    # via unstructured-client
-uvicorn==0.30.6
-    # via chromadb
-    # via fastapi
-    # via open-webui
-uvloop==0.19.0
-    # via uvicorn
-validators==0.33.0
-    # via open-webui
-watchfiles==0.21.0
-    # via uvicorn
-websocket-client==1.8.0
-    # via kubernetes
-websockets==12.0
-    # via uvicorn
-werkzeug==3.0.3
-    # via flask
-wrapt==1.16.0
-    # via deprecated
-    # via langfuse
-    # via opentelemetry-instrumentation
-    # via unstructured
-wsproto==1.2.0
-    # via simple-websocket
-xlrd==2.0.1
-    # via open-webui
-xlsxwriter==3.2.0
-    # via python-pptx
-yarl==1.9.4
-    # via aiohttp
-youtube-transcript-api==0.6.2
-    # via open-webui
-zipp==3.18.1
-    # via importlib-metadata

+ 0 - 750
requirements.lock

@@ -1,750 +0,0 @@
-# generated by rye
-# use `rye lock` or `rye sync` to update this lockfile
-#
-# last locked with the following flags:
-#   pre: false
-#   features: []
-#   all-features: false
-#   with-sources: false
-#   generate-hashes: false
-#   universal: false
-
--e file:.
-aiohappyeyeballs==2.3.5
-    # via aiohttp
-aiohttp==3.10.2
-    # via langchain
-    # via langchain-community
-    # via open-webui
-aiosignal==1.3.1
-    # via aiohttp
-alembic==1.13.2
-    # via open-webui
-annotated-types==0.6.0
-    # via pydantic
-anthropic==0.32.0
-    # via open-webui
-anyio==4.4.0
-    # via anthropic
-    # via httpx
-    # via langfuse
-    # via openai
-    # via starlette
-    # via watchfiles
-apscheduler==3.10.4
-    # via open-webui
-argon2-cffi==23.1.0
-    # via open-webui
-argon2-cffi-bindings==21.2.0
-    # via argon2-cffi
-asgiref==3.8.1
-    # via opentelemetry-instrumentation-asgi
-attrs==23.2.0
-    # via aiohttp
-    # via pytest-docker
-authlib==1.3.1
-    # via open-webui
-av==11.0.0
-    # via faster-whisper
-backoff==2.2.1
-    # via langfuse
-    # via posthog
-    # via unstructured
-bcrypt==4.2.0
-    # via chromadb
-    # via open-webui
-    # via passlib
-beautifulsoup4==4.12.3
-    # via extract-msg
-    # via unstructured
-bidict==0.23.1
-    # via python-socketio
-black==24.8.0
-    # via open-webui
-blinker==1.8.2
-    # via flask
-boto3==1.35.0
-    # via open-webui
-botocore==1.35.2
-    # via boto3
-    # via s3transfer
-build==1.2.1
-    # via chromadb
-cachetools==5.3.3
-    # via google-auth
-certifi==2024.2.2
-    # via httpcore
-    # via httpx
-    # via kubernetes
-    # via requests
-    # via unstructured-client
-cffi==1.16.0
-    # via argon2-cffi-bindings
-    # via cryptography
-chardet==5.2.0
-    # via unstructured
-charset-normalizer==3.3.2
-    # via requests
-    # via unstructured-client
-chroma-hnswlib==0.7.6
-    # via chromadb
-chromadb==0.5.5
-    # via langchain-chroma
-    # via open-webui
-click==8.1.7
-    # via black
-    # via duckduckgo-search
-    # via flask
-    # via nltk
-    # via peewee-migrate
-    # via typer
-    # via uvicorn
-colorclass==2.2.2
-    # via oletools
-coloredlogs==15.0.1
-    # via onnxruntime
-compressed-rtf==1.0.6
-    # via extract-msg
-cryptography==42.0.7
-    # via authlib
-    # via msoffcrypto-tool
-    # via pyjwt
-ctranslate2==4.2.1
-    # via faster-whisper
-dataclasses-json==0.6.6
-    # via langchain-community
-    # via unstructured
-    # via unstructured-client
-deepdiff==7.0.1
-    # via unstructured-client
-defusedxml==0.7.1
-    # via fpdf2
-deprecated==1.2.14
-    # via opentelemetry-api
-    # via opentelemetry-exporter-otlp-proto-grpc
-distro==1.9.0
-    # via anthropic
-    # via openai
-dnspython==2.6.1
-    # via email-validator
-    # via pymongo
-docker==7.1.0
-    # via open-webui
-docx2txt==0.8
-    # via open-webui
-duckduckgo-search==6.2.6
-    # via open-webui
-easygui==0.98.3
-    # via oletools
-ebcdic==1.1.1
-    # via extract-msg
-ecdsa==0.19.0
-    # via python-jose
-email-validator==2.1.1
-    # via fastapi
-emoji==2.11.1
-    # via unstructured
-et-xmlfile==1.1.0
-    # via openpyxl
-extract-msg==0.48.5
-    # via open-webui
-fake-useragent==1.5.1
-    # via open-webui
-fastapi==0.111.0
-    # via chromadb
-    # via langchain-chroma
-    # via open-webui
-fastapi-cli==0.0.4
-    # via fastapi
-faster-whisper==1.0.3
-    # via open-webui
-filelock==3.14.0
-    # via huggingface-hub
-    # via torch
-    # via transformers
-filetype==1.2.0
-    # via unstructured
-flask==3.0.3
-    # via flask-cors
-    # via open-webui
-flask-cors==4.0.1
-    # via open-webui
-flatbuffers==24.3.25
-    # via onnxruntime
-fonttools==4.51.0
-    # via fpdf2
-fpdf2==2.7.9
-    # via open-webui
-frozenlist==1.4.1
-    # via aiohttp
-    # via aiosignal
-fsspec==2024.3.1
-    # via huggingface-hub
-    # via torch
-google-ai-generativelanguage==0.6.6
-    # via google-generativeai
-google-api-core==2.19.0
-    # via google-ai-generativelanguage
-    # via google-api-python-client
-    # via google-generativeai
-google-api-python-client==2.129.0
-    # via google-generativeai
-google-auth==2.29.0
-    # via google-ai-generativelanguage
-    # via google-api-core
-    # via google-api-python-client
-    # via google-auth-httplib2
-    # via google-generativeai
-    # via kubernetes
-google-auth-httplib2==0.2.0
-    # via google-api-python-client
-google-generativeai==0.7.2
-    # via open-webui
-googleapis-common-protos==1.63.0
-    # via google-api-core
-    # via grpcio-status
-    # via opentelemetry-exporter-otlp-proto-grpc
-grpcio==1.63.0
-    # via chromadb
-    # via google-api-core
-    # via grpcio-status
-    # via opentelemetry-exporter-otlp-proto-grpc
-grpcio-status==1.62.2
-    # via google-api-core
-h11==0.14.0
-    # via httpcore
-    # via uvicorn
-    # via wsproto
-httpcore==1.0.5
-    # via httpx
-httplib2==0.22.0
-    # via google-api-python-client
-    # via google-auth-httplib2
-httptools==0.6.1
-    # via uvicorn
-httpx==0.27.0
-    # via anthropic
-    # via chromadb
-    # via fastapi
-    # via langfuse
-    # via openai
-huggingface-hub==0.23.0
-    # via faster-whisper
-    # via sentence-transformers
-    # via tokenizers
-    # via transformers
-humanfriendly==10.0
-    # via coloredlogs
-idna==3.7
-    # via anyio
-    # via email-validator
-    # via httpx
-    # via langfuse
-    # via requests
-    # via unstructured-client
-    # via yarl
-importlib-metadata==7.0.0
-    # via opentelemetry-api
-importlib-resources==6.4.0
-    # via chromadb
-iniconfig==2.0.0
-    # via pytest
-itsdangerous==2.2.0
-    # via flask
-jinja2==3.1.4
-    # via fastapi
-    # via flask
-    # via torch
-jiter==0.5.0
-    # via anthropic
-jmespath==1.0.1
-    # via boto3
-    # via botocore
-joblib==1.4.2
-    # via nltk
-    # via scikit-learn
-jsonpatch==1.33
-    # via langchain-core
-jsonpath-python==1.0.6
-    # via unstructured-client
-jsonpointer==2.4
-    # via jsonpatch
-kubernetes==29.0.0
-    # via chromadb
-langchain==0.2.12
-    # via langchain-community
-    # via open-webui
-langchain-chroma==0.1.2
-    # via open-webui
-langchain-community==0.2.10
-    # via open-webui
-langchain-core==0.2.28
-    # via langchain
-    # via langchain-chroma
-    # via langchain-community
-    # via langchain-text-splitters
-langchain-text-splitters==0.2.0
-    # via langchain
-langdetect==1.0.9
-    # via unstructured
-langfuse==2.43.3
-    # via open-webui
-langsmith==0.1.96
-    # via langchain
-    # via langchain-community
-    # via langchain-core
-lark==1.1.8
-    # via rtfde
-lxml==5.2.2
-    # via python-pptx
-    # via unstructured
-mako==1.3.5
-    # via alembic
-markdown==3.6
-    # via open-webui
-markdown-it-py==3.0.0
-    # via rich
-markupsafe==2.1.5
-    # via jinja2
-    # via mako
-    # via werkzeug
-marshmallow==3.21.2
-    # via dataclasses-json
-    # via unstructured-client
-mdurl==0.1.2
-    # via markdown-it-py
-mmh3==4.1.0
-    # via chromadb
-monotonic==1.6
-    # via posthog
-mpmath==1.3.0
-    # via sympy
-msoffcrypto-tool==5.4.1
-    # via oletools
-multidict==6.0.5
-    # via aiohttp
-    # via yarl
-mypy-extensions==1.0.0
-    # via black
-    # via typing-inspect
-    # via unstructured-client
-networkx==3.3
-    # via torch
-nltk==3.8.1
-    # via unstructured
-numpy==1.26.4
-    # via chroma-hnswlib
-    # via chromadb
-    # via ctranslate2
-    # via langchain
-    # via langchain-chroma
-    # via langchain-community
-    # via onnxruntime
-    # via opencv-python
-    # via opencv-python-headless
-    # via pandas
-    # via rank-bm25
-    # via rapidocr-onnxruntime
-    # via scikit-learn
-    # via scipy
-    # via sentence-transformers
-    # via shapely
-    # via transformers
-    # via unstructured
-oauthlib==3.2.2
-    # via kubernetes
-    # via requests-oauthlib
-olefile==0.47
-    # via extract-msg
-    # via msoffcrypto-tool
-    # via oletools
-oletools==0.60.1
-    # via pcodedmp
-    # via rtfde
-onnxruntime==1.17.3
-    # via chromadb
-    # via faster-whisper
-    # via rapidocr-onnxruntime
-openai==1.38.0
-    # via open-webui
-opencv-python==4.9.0.80
-    # via rapidocr-onnxruntime
-opencv-python-headless==4.10.0.84
-    # via open-webui
-openpyxl==3.1.5
-    # via open-webui
-opentelemetry-api==1.24.0
-    # via chromadb
-    # via opentelemetry-exporter-otlp-proto-grpc
-    # via opentelemetry-instrumentation
-    # via opentelemetry-instrumentation-asgi
-    # via opentelemetry-instrumentation-fastapi
-    # via opentelemetry-sdk
-opentelemetry-exporter-otlp-proto-common==1.24.0
-    # via opentelemetry-exporter-otlp-proto-grpc
-opentelemetry-exporter-otlp-proto-grpc==1.24.0
-    # via chromadb
-opentelemetry-instrumentation==0.45b0
-    # via opentelemetry-instrumentation-asgi
-    # via opentelemetry-instrumentation-fastapi
-opentelemetry-instrumentation-asgi==0.45b0
-    # via opentelemetry-instrumentation-fastapi
-opentelemetry-instrumentation-fastapi==0.45b0
-    # via chromadb
-opentelemetry-proto==1.24.0
-    # via opentelemetry-exporter-otlp-proto-common
-    # via opentelemetry-exporter-otlp-proto-grpc
-opentelemetry-sdk==1.24.0
-    # via chromadb
-    # via opentelemetry-exporter-otlp-proto-grpc
-opentelemetry-semantic-conventions==0.45b0
-    # via opentelemetry-instrumentation-asgi
-    # via opentelemetry-instrumentation-fastapi
-    # via opentelemetry-sdk
-opentelemetry-util-http==0.45b0
-    # via opentelemetry-instrumentation-asgi
-    # via opentelemetry-instrumentation-fastapi
-ordered-set==4.1.0
-    # via deepdiff
-orjson==3.10.3
-    # via chromadb
-    # via fastapi
-    # via langsmith
-overrides==7.7.0
-    # via chromadb
-packaging==23.2
-    # via black
-    # via build
-    # via huggingface-hub
-    # via langchain-core
-    # via langfuse
-    # via marshmallow
-    # via onnxruntime
-    # via pytest
-    # via transformers
-    # via unstructured-client
-pandas==2.2.2
-    # via open-webui
-passlib==1.7.4
-    # via open-webui
-pathspec==0.12.1
-    # via black
-pcodedmp==1.2.6
-    # via oletools
-peewee==3.17.6
-    # via open-webui
-    # via peewee-migrate
-peewee-migrate==1.12.2
-    # via open-webui
-pillow==10.3.0
-    # via fpdf2
-    # via python-pptx
-    # via rapidocr-onnxruntime
-    # via sentence-transformers
-platformdirs==4.2.1
-    # via black
-pluggy==1.5.0
-    # via pytest
-posthog==3.5.0
-    # via chromadb
-primp==0.5.5
-    # via duckduckgo-search
-proto-plus==1.23.0
-    # via google-ai-generativelanguage
-    # via google-api-core
-protobuf==4.25.3
-    # via google-ai-generativelanguage
-    # via google-api-core
-    # via google-generativeai
-    # via googleapis-common-protos
-    # via grpcio-status
-    # via onnxruntime
-    # via opentelemetry-proto
-    # via proto-plus
-psutil==6.0.0
-    # via open-webui
-    # via unstructured
-psycopg2-binary==2.9.9
-    # via open-webui
-pyasn1==0.6.0
-    # via pyasn1-modules
-    # via python-jose
-    # via rsa
-pyasn1-modules==0.4.0
-    # via google-auth
-pyclipper==1.3.0.post5
-    # via rapidocr-onnxruntime
-pycparser==2.22
-    # via cffi
-pydantic==2.8.2
-    # via anthropic
-    # via chromadb
-    # via fastapi
-    # via google-generativeai
-    # via langchain
-    # via langchain-core
-    # via langfuse
-    # via langsmith
-    # via open-webui
-    # via openai
-pydantic-core==2.20.1
-    # via pydantic
-pydub==0.25.1
-    # via open-webui
-pygments==2.18.0
-    # via rich
-pyjwt==2.9.0
-    # via open-webui
-pymongo==4.8.0
-    # via open-webui
-pymysql==1.1.1
-    # via open-webui
-pypandoc==1.13
-    # via open-webui
-pyparsing==2.4.7
-    # via httplib2
-    # via oletools
-pypdf==4.3.1
-    # via open-webui
-    # via unstructured-client
-pypika==0.48.9
-    # via chromadb
-pyproject-hooks==1.1.0
-    # via build
-pytest==8.2.2
-    # via open-webui
-    # via pytest-docker
-pytest-docker==3.1.1
-    # via open-webui
-python-dateutil==2.9.0.post0
-    # via botocore
-    # via kubernetes
-    # via pandas
-    # via posthog
-    # via unstructured-client
-python-dotenv==1.0.1
-    # via uvicorn
-python-engineio==4.9.0
-    # via python-socketio
-python-iso639==2024.4.27
-    # via unstructured
-python-jose==3.3.0
-    # via open-webui
-python-magic==0.4.27
-    # via unstructured
-python-multipart==0.0.9
-    # via fastapi
-    # via open-webui
-python-pptx==1.0.0
-    # via open-webui
-python-socketio==5.11.3
-    # via open-webui
-pytube==15.0.0
-    # via open-webui
-pytz==2024.1
-    # via apscheduler
-    # via pandas
-pyxlsb==1.0.10
-    # via open-webui
-pyyaml==6.0.1
-    # via chromadb
-    # via ctranslate2
-    # via huggingface-hub
-    # via kubernetes
-    # via langchain
-    # via langchain-community
-    # via langchain-core
-    # via rapidocr-onnxruntime
-    # via transformers
-    # via uvicorn
-rank-bm25==0.2.2
-    # via open-webui
-rapidfuzz==3.9.0
-    # via unstructured
-rapidocr-onnxruntime==1.3.24
-    # via open-webui
-red-black-tree-mod==1.20
-    # via extract-msg
-redis==5.0.8
-    # via open-webui
-regex==2024.5.10
-    # via nltk
-    # via tiktoken
-    # via transformers
-requests==2.32.3
-    # via docker
-    # via google-api-core
-    # via huggingface-hub
-    # via kubernetes
-    # via langchain
-    # via langchain-community
-    # via langsmith
-    # via open-webui
-    # via posthog
-    # via requests-oauthlib
-    # via tiktoken
-    # via transformers
-    # via unstructured
-    # via unstructured-client
-    # via youtube-transcript-api
-requests-oauthlib==2.0.0
-    # via kubernetes
-rich==13.7.1
-    # via typer
-rsa==4.9
-    # via google-auth
-    # via python-jose
-rtfde==0.1.1
-    # via extract-msg
-s3transfer==0.10.1
-    # via boto3
-safetensors==0.4.3
-    # via transformers
-scikit-learn==1.4.2
-    # via sentence-transformers
-scipy==1.13.0
-    # via scikit-learn
-    # via sentence-transformers
-sentence-transformers==3.0.1
-    # via open-webui
-setuptools==69.5.1
-    # via ctranslate2
-    # via opentelemetry-instrumentation
-shapely==2.0.5
-    # via rapidocr-onnxruntime
-shellingham==1.5.4
-    # via typer
-simple-websocket==1.0.0
-    # via python-engineio
-six==1.16.0
-    # via apscheduler
-    # via ecdsa
-    # via kubernetes
-    # via langdetect
-    # via posthog
-    # via python-dateutil
-    # via rapidocr-onnxruntime
-    # via unstructured-client
-sniffio==1.3.1
-    # via anthropic
-    # via anyio
-    # via httpx
-    # via openai
-soupsieve==2.5
-    # via beautifulsoup4
-sqlalchemy==2.0.32
-    # via alembic
-    # via langchain
-    # via langchain-community
-    # via open-webui
-starlette==0.37.2
-    # via fastapi
-sympy==1.12
-    # via onnxruntime
-    # via torch
-tabulate==0.9.0
-    # via unstructured
-tenacity==8.3.0
-    # via chromadb
-    # via langchain
-    # via langchain-community
-    # via langchain-core
-threadpoolctl==3.5.0
-    # via scikit-learn
-tiktoken==0.7.0
-    # via open-webui
-tokenizers==0.15.2
-    # via anthropic
-    # via chromadb
-    # via faster-whisper
-    # via transformers
-torch==2.3.0
-    # via sentence-transformers
-tqdm==4.66.4
-    # via chromadb
-    # via google-generativeai
-    # via huggingface-hub
-    # via nltk
-    # via openai
-    # via sentence-transformers
-    # via transformers
-    # via unstructured
-transformers==4.39.3
-    # via sentence-transformers
-typer==0.12.3
-    # via chromadb
-    # via fastapi-cli
-typing-extensions==4.11.0
-    # via alembic
-    # via anthropic
-    # via chromadb
-    # via fastapi
-    # via google-generativeai
-    # via huggingface-hub
-    # via langchain-core
-    # via openai
-    # via opentelemetry-sdk
-    # via pydantic
-    # via pydantic-core
-    # via python-pptx
-    # via sqlalchemy
-    # via torch
-    # via typer
-    # via typing-inspect
-    # via unstructured
-    # via unstructured-client
-typing-inspect==0.9.0
-    # via dataclasses-json
-    # via unstructured-client
-tzdata==2024.1
-    # via pandas
-tzlocal==5.2
-    # via apscheduler
-    # via extract-msg
-ujson==5.10.0
-    # via fastapi
-unstructured==0.15.5
-    # via open-webui
-unstructured-client==0.22.0
-    # via unstructured
-uritemplate==4.1.1
-    # via google-api-python-client
-urllib3==2.2.1
-    # via botocore
-    # via docker
-    # via kubernetes
-    # via requests
-    # via unstructured-client
-uvicorn==0.30.6
-    # via chromadb
-    # via fastapi
-    # via open-webui
-uvloop==0.19.0
-    # via uvicorn
-validators==0.33.0
-    # via open-webui
-watchfiles==0.21.0
-    # via uvicorn
-websocket-client==1.8.0
-    # via kubernetes
-websockets==12.0
-    # via uvicorn
-werkzeug==3.0.3
-    # via flask
-wrapt==1.16.0
-    # via deprecated
-    # via langfuse
-    # via opentelemetry-instrumentation
-    # via unstructured
-wsproto==1.2.0
-    # via simple-websocket
-xlrd==2.0.1
-    # via open-webui
-xlsxwriter==3.2.0
-    # via python-pptx
-yarl==1.9.4
-    # via aiohttp
-youtube-transcript-api==0.6.2
-    # via open-webui
-zipp==3.18.1
-    # via importlib-metadata

+ 5 - 1
src/lib/apis/audio/index.ts

@@ -132,7 +132,11 @@ export const synthesizeOpenAISpeech = async (
 	return res;
 };
 
-export const getModels = async (token: string = '') => {
+interface AvailableModelsResponse {
+	models: { name: string; id: string }[] | { id: string }[];
+}
+
+export const getModels = async (token: string = ''): Promise<AvailableModelsResponse> => {
 	let error = null;
 
 	const res = await fetch(`${AUDIO_API_BASE_URL}/models`, {

+ 1 - 1
src/lib/apis/memories/index.ts

@@ -156,7 +156,7 @@ export const deleteMemoryById = async (token: string, id: string) => {
 export const deleteMemoriesByUserId = async (token: string) => {
 	let error = null;
 
-	const res = await fetch(`${WEBUI_API_BASE_URL}/memories/user`, {
+	const res = await fetch(`${WEBUI_API_BASE_URL}/memories/delete/user`, {
 		method: 'DELETE',
 		headers: {
 			Accept: 'application/json',

+ 2 - 2
src/lib/apis/rag/index.ts

@@ -400,7 +400,7 @@ export const resetUploadDir = async (token: string) => {
 	let error = null;
 
 	const res = await fetch(`${RAG_API_BASE_URL}/reset/uploads`, {
-		method: 'GET',
+		method: 'POST',
 		headers: {
 			Accept: 'application/json',
 			authorization: `Bearer ${token}`
@@ -426,7 +426,7 @@ export const resetVectorDB = async (token: string) => {
 	let error = null;
 
 	const res = await fetch(`${RAG_API_BASE_URL}/reset/db`, {
-		method: 'GET',
+		method: 'POST',
 		headers: {
 			Accept: 'application/json',
 			authorization: `Bearer ${token}`

+ 4 - 1
src/lib/components/admin/Settings.svelte

@@ -359,8 +359,11 @@
 			<Models />
 		{:else if selectedTab === 'documents'}
 			<Documents
-				saveHandler={() => {
+				on:save={async () => {
 					toast.success($i18n.t('Settings saved successfully!'));
+
+					await tick();
+					await config.set(await getBackendConfig());
 				}}
 			/>
 		{:else if selectedTab === 'web'}

+ 49 - 16
src/lib/components/admin/Settings/Audio.svelte

@@ -10,31 +10,36 @@
 		getModels as _getModels,
 		getVoices as _getVoices
 	} from '$lib/apis/audio';
-	import { user, settings, config } from '$lib/stores';
+	import { config } from '$lib/stores';
 
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 
-	const i18n = getContext('i18n');
+	import { TTS_RESPONSE_SPLIT } from '$lib/types';
 
-	export let saveHandler: Function;
+	import type { Writable } from 'svelte/store';
+	import type { i18n as i18nType } from 'i18next';
 
-	// Audio
+	const i18n = getContext<Writable<i18nType>>('i18n');
+
+	export let saveHandler: () => void;
 
+	// Audio
 	let TTS_OPENAI_API_BASE_URL = '';
 	let TTS_OPENAI_API_KEY = '';
 	let TTS_API_KEY = '';
 	let TTS_ENGINE = '';
 	let TTS_MODEL = '';
 	let TTS_VOICE = '';
+	let TTS_SPLIT_ON: TTS_RESPONSE_SPLIT = TTS_RESPONSE_SPLIT.PUNCTUATION;
 
 	let STT_OPENAI_API_BASE_URL = '';
 	let STT_OPENAI_API_KEY = '';
 	let STT_ENGINE = '';
 	let STT_MODEL = '';
 
-	let voices = [];
-	let models = [];
-	let nonLocalVoices = false;
+	// eslint-disable-next-line no-undef
+	let voices: SpeechSynthesisVoice[] = [];
+	let models: Awaited<ReturnType<typeof _getModels>>['models'] = [];
 
 	const getModels = async () => {
 		if (TTS_ENGINE === '') {
@@ -53,8 +58,8 @@
 
 	const getVoices = async () => {
 		if (TTS_ENGINE === '') {
-			const getVoicesLoop = setInterval(async () => {
-				voices = await speechSynthesis.getVoices();
+			const getVoicesLoop = setInterval(() => {
+				voices = speechSynthesis.getVoices();
 
 				// do your loop
 				if (voices.length > 0) {
@@ -81,7 +86,8 @@
 				API_KEY: TTS_API_KEY,
 				ENGINE: TTS_ENGINE,
 				MODEL: TTS_MODEL,
-				VOICE: TTS_VOICE
+				VOICE: TTS_VOICE,
+				SPLIT_ON: TTS_SPLIT_ON
 			},
 			stt: {
 				OPENAI_API_BASE_URL: STT_OPENAI_API_BASE_URL,
@@ -92,9 +98,10 @@
 		});
 
 		if (res) {
-			toast.success($i18n.t('Audio settings updated successfully'));
-
-			config.set(await getBackendConfig());
+			saveHandler();
+			getBackendConfig()
+				.then(config.set)
+				.catch(() => {});
 		}
 	};
 
@@ -111,6 +118,8 @@
 			TTS_MODEL = res.tts.MODEL;
 			TTS_VOICE = res.tts.VOICE;
 
+			TTS_SPLIT_ON = res.tts.SPLIT_ON || TTS_RESPONSE_SPLIT.PUNCTUATION;
+
 			STT_OPENAI_API_BASE_URL = res.stt.OPENAI_API_BASE_URL;
 			STT_OPENAI_API_KEY = res.stt.OPENAI_API_KEY;
 
@@ -139,7 +148,7 @@
 					<div class=" self-center text-xs font-medium">{$i18n.t('Speech-to-Text Engine')}</div>
 					<div class="flex items-center relative">
 						<select
-							class="dark:bg-gray-900 w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
+							class="dark:bg-gray-900 cursor-pointer w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
 							bind:value={STT_ENGINE}
 							placeholder="Select an engine"
 						>
@@ -195,7 +204,7 @@
 					<div class=" self-center text-xs font-medium">{$i18n.t('Text-to-Speech Engine')}</div>
 					<div class="flex items-center relative">
 						<select
-							class=" dark:bg-gray-900 w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
+							class=" dark:bg-gray-900 w-fit pr-8 cursor-pointer rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
 							bind:value={TTS_ENGINE}
 							placeholder="Select a mode"
 							on:change={async (e) => {
@@ -203,7 +212,7 @@
 								await getVoices();
 								await getModels();
 
-								if (e.target.value === 'openai') {
+								if (e.target?.value === 'openai') {
 									TTS_VOICE = 'alloy';
 									TTS_MODEL = 'tts-1';
 								} else {
@@ -351,6 +360,30 @@
 						</div>
 					</div>
 				{/if}
+
+				<hr class="dark:border-gray-850 my-2" />
+
+				<div class="pt-0.5 flex w-full justify-between">
+					<div class="self-center text-xs font-medium">{$i18n.t('Response splitting')}</div>
+					<div class="flex items-center relative">
+						<select
+							class="dark:bg-gray-900 w-fit pr-8 cursor-pointer rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
+							aria-label="Select how to split message text for TTS requests"
+							bind:value={TTS_SPLIT_ON}
+						>
+							{#each Object.values(TTS_RESPONSE_SPLIT) as split}
+								<option value={split}
+									>{$i18n.t(split.charAt(0).toUpperCase() + split.slice(1))}</option
+								>
+							{/each}
+						</select>
+					</div>
+				</div>
+				<div class="mt-2 mb-1 text-xs text-gray-400 dark:text-gray-500">
+					{$i18n.t(
+						"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string."
+					)}
+				</div>
 			</div>
 		</div>
 	</div>

+ 75 - 8
src/lib/components/admin/Settings/Documents.svelte

@@ -1,4 +1,8 @@
 <script lang="ts">
+	import { onMount, getContext, createEventDispatcher } from 'svelte';
+
+	const dispatch = createEventDispatcher();
+
 	import { getDocs } from '$lib/apis/documents';
 	import { deleteAllFiles, deleteFileById } from '$lib/apis/files';
 	import {
@@ -18,14 +22,12 @@
 	import ResetVectorDBConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 
 	import { documents, models } from '$lib/stores';
-	import { onMount, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
+	import Tooltip from '$lib/components/common/Tooltip.svelte';
 
 	const i18n = getContext('i18n');
 
-	export let saveHandler: Function;
-
 	let scanDirLoading = false;
 	let updateEmbeddingModelLoading = false;
 	let updateRerankingModelLoading = false;
@@ -37,6 +39,9 @@
 	let embeddingModel = '';
 	let rerankingModel = '';
 
+	let fileMaxSize = null;
+	let fileMaxCount = null;
+
 	let contentExtractionEngine = 'default';
 	let tikaServerUrl = '';
 	let showTikaServerUrl = false;
@@ -161,19 +166,22 @@
 	};
 
 	const submitHandler = async () => {
-		embeddingModelUpdateHandler();
+		await embeddingModelUpdateHandler();
 
 		if (querySettings.hybrid) {
-			rerankingModelUpdateHandler();
+			await rerankingModelUpdateHandler();
 		}
 
 		if (contentExtractionEngine === 'tika' && tikaServerUrl === '') {
 			toast.error($i18n.t('Tika Server URL required.'));
 			return;
 		}
-
 		const res = await updateRAGConfig(localStorage.token, {
 			pdf_extract_images: pdfExtractImages,
+			file: {
+				max_size: fileMaxSize === '' ? null : fileMaxSize,
+				max_count: fileMaxCount === '' ? null : fileMaxCount
+			},
 			chunk: {
 				chunk_overlap: chunkOverlap,
 				chunk_size: chunkSize
@@ -185,6 +193,8 @@
 		});
 
 		await updateQuerySettings(localStorage.token, querySettings);
+
+		dispatch('save');
 	};
 
 	const setEmbeddingConfig = async () => {
@@ -218,7 +228,6 @@
 		await setRerankingConfig();
 
 		querySettings = await getQuerySettings(localStorage.token);
-
 		const res = await getRAGConfig(localStorage.token);
 
 		if (res) {
@@ -230,6 +239,9 @@
 			contentExtractionEngine = res.content_extraction.engine;
 			tikaServerUrl = res.content_extraction.tika_server_url;
 			showTikaServerUrl = contentExtractionEngine === 'tika';
+
+			fileMaxSize = res?.file.max_size ?? '';
+			fileMaxCount = res?.file.max_count ?? '';
 		}
 	});
 </script>
@@ -266,7 +278,6 @@
 	class="flex flex-col h-full justify-between space-y-3 text-sm"
 	on:submit|preventDefault={() => {
 		submitHandler();
-		saveHandler();
 	}}
 >
 	<div class=" space-y-2.5 overflow-y-scroll scrollbar-hidden h-full pr-1.5">
@@ -610,6 +621,62 @@
 				</div>
 			{/if}
 		</div>
+
+		<hr class=" dark:border-gray-850" />
+
+		<div class="">
+			<div class="text-sm font-medium">{$i18n.t('Files')}</div>
+
+			<div class=" my-2 flex gap-1.5">
+				<div class="w-full">
+					<div class=" self-center text-xs font-medium min-w-fit mb-1">
+						{$i18n.t('Max Upload Size')}
+					</div>
+
+					<div class="self-center">
+						<Tooltip
+							content={$i18n.t(
+								'The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.'
+							)}
+							placement="top-start"
+						>
+							<input
+								class="w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+								type="number"
+								placeholder={$i18n.t('Leave empty for unlimited')}
+								bind:value={fileMaxSize}
+								autocomplete="off"
+								min="0"
+							/>
+						</Tooltip>
+					</div>
+				</div>
+
+				<div class="  w-full">
+					<div class="self-center text-xs font-medium min-w-fit mb-1">
+						{$i18n.t('Max Upload Count')}
+					</div>
+					<div class="self-center">
+						<Tooltip
+							content={$i18n.t(
+								'The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.'
+							)}
+							placement="top-start"
+						>
+							<input
+								class=" w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
+								type="number"
+								placeholder={$i18n.t('Leave empty for unlimited')}
+								bind:value={fileMaxCount}
+								autocomplete="off"
+								min="0"
+							/>
+						</Tooltip>
+					</div>
+				</div>
+			</div>
+		</div>
+
 		<hr class=" dark:border-gray-850" />
 
 		<div class=" ">

+ 196 - 100
src/lib/components/chat/Chat.svelte

@@ -3,13 +3,13 @@
 	import { toast } from 'svelte-sonner';
 	import mermaid from 'mermaid';
 
-	import { getContext, onMount, tick } from 'svelte';
+	import { getContext, onDestroy, onMount, tick } from 'svelte';
 	import { goto } from '$app/navigation';
 	import { page } from '$app/stores';
 
-	import type { Writable } from 'svelte/store';
+	import type { Unsubscriber, Writable } from 'svelte/store';
 	import type { i18n as i18nType } from 'i18next';
-	import { OLLAMA_API_BASE_URL, OPENAI_API_BASE_URL, WEBUI_BASE_URL } from '$lib/constants';
+	import { WEBUI_BASE_URL } from '$lib/constants';
 
 	import {
 		chatId,
@@ -19,31 +19,26 @@
 		models,
 		settings,
 		showSidebar,
-		tags as _tags,
 		WEBUI_NAME,
 		banners,
 		user,
 		socket,
 		showCallOverlay,
-		tools,
 		currentChatPage,
 		temporaryChatEnabled
 	} from '$lib/stores';
 	import {
 		convertMessagesToHistory,
 		copyToClipboard,
+		getMessageContentParts,
 		extractSentencesForAudio,
-		getUserPosition,
 		promptTemplate,
 		splitStream
 	} from '$lib/utils';
 
 	import { generateChatCompletion } from '$lib/apis/ollama';
 	import {
-		addTagById,
 		createNewChat,
-		deleteTagById,
-		getAllChatTags,
 		getChatById,
 		getChatList,
 		getTagsById,
@@ -66,8 +61,6 @@
 	import MessageInput from '$lib/components/chat/MessageInput.svelte';
 	import Messages from '$lib/components/chat/Messages.svelte';
 	import Navbar from '$lib/components/layout/Navbar.svelte';
-	import CallOverlay from './MessageInput/CallOverlay.svelte';
-	import { error } from '@sveltejs/kit';
 	import ChatControls from './ChatControls.svelte';
 	import EventConfirmDialog from '../common/ConfirmDialog.svelte';
 
@@ -118,6 +111,8 @@
 
 	let params = {};
 
+	let chatIdUnsubscriber: Unsubscriber | undefined;
+
 	$: if (history.currentId !== null) {
 		let _messages = [];
 
@@ -207,47 +202,51 @@
 		}
 	};
 
-	onMount(async () => {
-		const onMessageHandler = async (event) => {
-			if (event.origin === window.origin) {
-				// Replace with your iframe's origin
-				console.log('Message received from iframe:', event.data);
-				if (event.data.type === 'input:prompt') {
-					console.log(event.data.text);
-
-					const inputElement = document.getElementById('chat-textarea');
-
-					if (inputElement) {
-						prompt = event.data.text;
-						inputElement.focus();
-					}
-				}
+	const onMessageHandler = async (event: {
+		origin: string;
+		data: { type: string; text: string };
+	}) => {
+		if (event.origin !== window.origin) {
+			return;
+		}
 
-				if (event.data.type === 'action:submit') {
-					console.log(event.data.text);
+		// Replace with your iframe's origin
+		if (event.data.type === 'input:prompt') {
+			console.debug(event.data.text);
 
-					if (prompt !== '') {
-						await tick();
-						submitPrompt(prompt);
-					}
-				}
+			const inputElement = document.getElementById('chat-textarea');
+
+			if (inputElement) {
+				prompt = event.data.text;
+				inputElement.focus();
+			}
+		}
 
-				if (event.data.type === 'input:prompt:submit') {
-					console.log(event.data.text);
+		if (event.data.type === 'action:submit') {
+			console.debug(event.data.text);
 
-					if (prompt !== '') {
-						await tick();
-						submitPrompt(event.data.text);
-					}
-				}
+			if (prompt !== '') {
+				await tick();
+				submitPrompt(prompt);
 			}
-		};
-		window.addEventListener('message', onMessageHandler);
+		}
 
-		$socket.on('chat-events', chatEventHandler);
+		if (event.data.type === 'input:prompt:submit') {
+			console.debug(event.data.text);
+
+			if (prompt !== '') {
+				await tick();
+				submitPrompt(event.data.text);
+			}
+		}
+	};
+
+	onMount(async () => {
+		window.addEventListener('message', onMessageHandler);
+		$socket?.on('chat-events', chatEventHandler);
 
 		if (!$chatId) {
-			chatId.subscribe(async (value) => {
+			chatIdUnsubscriber = chatId.subscribe(async (value) => {
 				if (!value) {
 					await initNewChat();
 				}
@@ -257,12 +256,12 @@
 				await goto('/');
 			}
 		}
+	});
 
-		return () => {
-			window.removeEventListener('message', onMessageHandler);
-
-			$socket.off('chat-events');
-		};
+	onDestroy(() => {
+		chatIdUnsubscriber?.();
+		window.removeEventListener('message', onMessageHandler);
+		$socket?.off('chat-events');
 	});
 
 	//////////////////////////
@@ -311,6 +310,10 @@
 			}
 		}
 
+		if ($page.url.searchParams.get('call') === 'true') {
+			showCallOverlay.set(true);
+		}
+
 		selectedModels = selectedModels.map((modelId) =>
 			$models.map((m) => m.id).includes(modelId) ? modelId : ''
 		);
@@ -539,6 +542,16 @@
 					`Oops! Hold tight! Your files are still in the processing oven. We're cooking them up to perfection. Please be patient and we'll let you know once they're ready.`
 				)
 			);
+		} else if (
+			($config?.file?.max_count ?? null) !== null &&
+			files.length + chatFiles.length > $config?.file?.max_count
+		) {
+			console.log(chatFiles.length, files.length);
+			toast.error(
+				$i18n.t(`You can only chat with a maximum of {{maxCount}} file(s) at a time.`, {
+					maxCount: $config?.file?.max_count
+				})
+			);
 		} else {
 			// Reset chat input textarea
 			const chatTextAreaElement = document.getElementById('chat-textarea');
@@ -591,11 +604,11 @@
 	};
 
 	const sendPrompt = async (
-		prompt,
-		parentId,
+		prompt: string,
+		parentId: string,
 		{ modelId = null, modelIdx = null, newChat = false } = {}
 	) => {
-		let _responses = [];
+		let _responses: string[] = [];
 
 		// If modelId is provided, use it, else use selected model
 		let selectedModelIds = modelId
@@ -605,7 +618,7 @@
 				: selectedModels;
 
 		// Create response messages for each selected model
-		const responseMessageIds = {};
+		const responseMessageIds: Record<PropertyKey, string> = {};
 		for (const [_modelIdx, modelId] of selectedModelIds.entries()) {
 			const model = $models.filter((m) => m.id === modelId).at(0);
 
@@ -735,13 +748,13 @@
 		);
 
 		currentChatPage.set(1);
-		await chats.set(await getChatList(localStorage.token, $currentChatPage));
+		chats.set(await getChatList(localStorage.token, $currentChatPage));
 
 		return _responses;
 	};
 
 	const sendPromptOllama = async (model, userPrompt, responseMessageId, _chatId) => {
-		let _response = null;
+		let _response: string | null = null;
 
 		const responseMessage = history.messages[responseMessageId];
 		const userMessage = history.messages[responseMessage.parentId];
@@ -772,7 +785,7 @@
 			...messages
 		]
 			.filter((message) => message?.content?.trim())
-			.map((message, idx, arr) => {
+			.map((message) => {
 				// Prepare the base message object
 				const baseMessage = {
 					role: message.role,
@@ -809,7 +822,18 @@
 
 		let files = JSON.parse(JSON.stringify(chatFiles));
 		if (model?.info?.meta?.knowledge ?? false) {
+			// Only initialize and add status if knowledge exists
+			responseMessage.statusHistory = [
+				{
+					action: 'knowledge_search',
+					description: $i18n.t(`Searching Knowledge for "{{searchQuery}}"`, {
+						searchQuery: userMessage.content
+					}),
+					done: false
+				}
+			];
 			files.push(...model.info.meta.knowledge);
+			messages = messages; // Trigger Svelte update
 		}
 		files.push(
 			...(userMessage?.files ?? []).filter((item) =>
@@ -818,6 +842,8 @@
 			...(responseMessage?.files ?? []).filter((item) => ['web_search_results'].includes(item.type))
 		);
 
+		scrollToBottom();
+
 		eventTarget.dispatchEvent(
 			new CustomEvent('chat:start', {
 				detail: {
@@ -888,6 +914,12 @@
 
 							if ('citations' in data) {
 								responseMessage.citations = data.citations;
+								// Only remove status if it was initially set
+								if (model?.info?.meta?.knowledge ?? false) {
+									responseMessage.statusHistory = responseMessage.statusHistory.filter(
+										(status) => status.action !== 'knowledge_search'
+									);
+								}
 								continue;
 							}
 
@@ -905,18 +937,26 @@
 										navigator.vibrate(5);
 									}
 
-									const sentences = extractSentencesForAudio(responseMessage.content);
-									sentences.pop();
+									const messageContentParts = getMessageContentParts(
+										responseMessage.content,
+										$config?.audio?.tts?.split_on ?? 'punctuation'
+									);
+									messageContentParts.pop();
 
 									// dispatch only last sentence and make sure it hasn't been dispatched before
 									if (
-										sentences.length > 0 &&
-										sentences[sentences.length - 1] !== responseMessage.lastSentence
+										messageContentParts.length > 0 &&
+										messageContentParts[messageContentParts.length - 1] !==
+											responseMessage.lastSentence
 									) {
-										responseMessage.lastSentence = sentences[sentences.length - 1];
+										responseMessage.lastSentence =
+											messageContentParts[messageContentParts.length - 1];
 										eventTarget.dispatchEvent(
 											new CustomEvent('chat', {
-												detail: { id: responseMessageId, content: sentences[sentences.length - 1] }
+												detail: {
+													id: responseMessageId,
+													content: messageContentParts[messageContentParts.length - 1]
+												}
 											})
 										);
 									}
@@ -977,7 +1017,20 @@
 				}
 			}
 
-			await saveChatHandler(_chatId);
+			if ($chatId == _chatId) {
+				if ($settings.saveChatHistory ?? true) {
+					chat = await updateChatById(localStorage.token, _chatId, {
+						messages: messages,
+						history: history,
+						models: selectedModels,
+						params: params,
+						files: chatFiles
+					});
+
+					currentChatPage.set(1);
+					await chats.set(await getChatList(localStorage.token, $currentChatPage));
+				}
+			}
 		} else {
 			if (res !== null) {
 				const error = await res.json();
@@ -1000,20 +1053,32 @@
 				};
 			}
 			responseMessage.done = true;
+
+			if (responseMessage.statusHistory) {
+				responseMessage.statusHistory = responseMessage.statusHistory.filter(
+					(status) => status.action !== 'knowledge_search'
+				);
+			}
+
 			messages = messages;
 		}
 
 		stopResponseFlag = false;
 		await tick();
 
-		let lastSentence = extractSentencesForAudio(responseMessage.content)?.at(-1) ?? '';
-		if (lastSentence) {
+		let lastMessageContentPart =
+			getMessageContentParts(
+				responseMessage.content,
+				$config?.audio?.tts?.split_on ?? 'punctuation'
+			)?.at(-1) ?? '';
+		if (lastMessageContentPart) {
 			eventTarget.dispatchEvent(
 				new CustomEvent('chat', {
-					detail: { id: responseMessageId, content: lastSentence }
+					detail: { id: responseMessageId, content: lastMessageContentPart }
 				})
 			);
 		}
+
 		eventTarget.dispatchEvent(
 			new CustomEvent('chat:finish', {
 				detail: {
@@ -1044,7 +1109,18 @@
 
 		let files = JSON.parse(JSON.stringify(chatFiles));
 		if (model?.info?.meta?.knowledge ?? false) {
+			// Only initialize and add status if knowledge exists
+			responseMessage.statusHistory = [
+				{
+					action: 'knowledge_search',
+					description: $i18n.t(`Searching Knowledge for "{{searchQuery}}"`, {
+						searchQuery: userMessage.content
+					}),
+					done: false
+				}
+			];
 			files.push(...model.info.meta.knowledge);
+			messages = messages; // Trigger Svelte update
 		}
 		files.push(
 			...(userMessage?.files ?? []).filter((item) =>
@@ -1184,6 +1260,12 @@
 
 					if (citations) {
 						responseMessage.citations = citations;
+						// Only remove status if it was initially set
+						if (model?.info?.meta?.knowledge ?? false) {
+							responseMessage.statusHistory = responseMessage.statusHistory.filter(
+								(status) => status.action !== 'knowledge_search'
+							);
+						}
 						continue;
 					}
 
@@ -1196,18 +1278,24 @@
 							navigator.vibrate(5);
 						}
 
-						const sentences = extractSentencesForAudio(responseMessage.content);
-						sentences.pop();
+						const messageContentParts = getMessageContentParts(
+							responseMessage.content,
+							$config?.audio?.tts?.split_on ?? 'punctuation'
+						);
+						messageContentParts.pop();
 
 						// dispatch only last sentence and make sure it hasn't been dispatched before
 						if (
-							sentences.length > 0 &&
-							sentences[sentences.length - 1] !== responseMessage.lastSentence
+							messageContentParts.length > 0 &&
+							messageContentParts[messageContentParts.length - 1] !== responseMessage.lastSentence
 						) {
-							responseMessage.lastSentence = sentences[sentences.length - 1];
+							responseMessage.lastSentence = messageContentParts[messageContentParts.length - 1];
 							eventTarget.dispatchEvent(
 								new CustomEvent('chat', {
-									detail: { id: responseMessageId, content: sentences[sentences.length - 1] }
+									detail: {
+										id: responseMessageId,
+										content: messageContentParts[messageContentParts.length - 1]
+									}
 								})
 							);
 						}
@@ -1238,7 +1326,7 @@
 				}
 
 				if ($chatId == _chatId) {
-					if (!$temporaryChatEnabled) {
+					if ($settings.saveChatHistory ?? true) {
 						chat = await updateChatById(localStorage.token, _chatId, {
 							models: selectedModels,
 							messages: messages,
@@ -1262,11 +1350,15 @@
 		stopResponseFlag = false;
 		await tick();
 
-		let lastSentence = extractSentencesForAudio(responseMessage.content)?.at(-1) ?? '';
-		if (lastSentence) {
+		let lastMessageContentPart =
+			getMessageContentParts(
+				responseMessage.content,
+				$config?.audio?.tts?.split_on ?? 'punctuation'
+			)?.at(-1) ?? '';
+		if (lastMessageContentPart) {
 			eventTarget.dispatchEvent(
 				new CustomEvent('chat', {
-					detail: { id: responseMessageId, content: lastSentence }
+					detail: { id: responseMessageId, content: lastMessageContentPart }
 				})
 			);
 		}
@@ -1330,6 +1422,12 @@
 		};
 		responseMessage.done = true;
 
+		if (responseMessage.statusHistory) {
+			responseMessage.statusHistory = responseMessage.statusHistory.filter(
+				(status) => status.action !== 'knowledge_search'
+			);
+		}
+
 		messages = messages;
 	};
 
@@ -1600,17 +1698,6 @@
 	}}
 />
 
-{#if $showCallOverlay}
-	<CallOverlay
-		{submitPrompt}
-		{stopResponse}
-		bind:files
-		modelId={selectedModelIds?.at(0) ?? null}
-		chatId={$chatId}
-		{eventTarget}
-	/>
-{/if}
-
 {#if !chatIdProp || (loaded && chatIdProp)}
 	<div
 		class="h-screen max-h-[100dvh] {$showSidebar
@@ -1721,21 +1808,30 @@
 					{messages}
 					{submitPrompt}
 					{stopResponse}
+					on:call={() => {
+						showControls = true;
+					}}
 				/>
 			</div>
 		</div>
-
-		<ChatControls
-			models={selectedModelIds.reduce((a, e, i, arr) => {
-				const model = $models.find((m) => m.id === e);
-				if (model) {
-					return [...a, model];
-				}
-				return a;
-			}, [])}
-			bind:show={showControls}
-			bind:chatFiles
-			bind:params
-		/>
 	</div>
 {/if}
+
+<ChatControls
+	models={selectedModelIds.reduce((a, e, i, arr) => {
+		const model = $models.find((m) => m.id === e);
+		if (model) {
+			return [...a, model];
+		}
+		return a;
+	}, [])}
+	bind:show={showControls}
+	bind:chatFiles
+	bind:params
+	bind:files
+	{submitPrompt}
+	{stopResponse}
+	modelId={selectedModelIds?.at(0) ?? null}
+	chatId={$chatId}
+	{eventTarget}
+/>

+ 45 - 8
src/lib/components/chat/ChatControls.svelte

@@ -3,6 +3,8 @@
 	import Modal from '../common/Modal.svelte';
 	import Controls from './Controls/Controls.svelte';
 	import { onMount } from 'svelte';
+	import { mobile, showCallOverlay } from '$lib/stores';
+	import CallOverlay from './MessageInput/CallOverlay.svelte';
 
 	export let show = false;
 
@@ -12,6 +14,12 @@
 	export let chatFiles = [];
 	export let params = {};
 
+	export let eventTarget: EventTarget;
+	export let submitPrompt: Function;
+	export let stopResponse: Function;
+	export let files;
+	export let modelId;
+
 	let largeScreen = false;
 	onMount(() => {
 		// listen to resize 1024px
@@ -42,18 +50,47 @@
 				<div
 					class="w-full h-full px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850 border border-gray-50 dark:border-gray-800 rounded-xl z-50 pointer-events-auto overflow-y-auto scrollbar-hidden"
 				>
-					<Controls
-						on:close={() => {
-							show = false;
-						}}
-						{models}
-						bind:chatFiles
-						bind:params
-					/>
+					{#if $showCallOverlay}
+						<CallOverlay
+							bind:files
+							{submitPrompt}
+							{stopResponse}
+							{modelId}
+							{chatId}
+							{eventTarget}
+						/>
+					{:else}
+						<Controls
+							on:close={() => {
+								show = false;
+							}}
+							{models}
+							bind:chatFiles
+							bind:params
+						/>
+					{/if}
 				</div>
 			</div>
 		</div>
 	{/if}
+{:else if $showCallOverlay}
+	<div class=" absolute w-full h-screen max-h-[100dvh] flex z-[999] overflow-hidden">
+		<div
+			class="absolute w-full h-screen max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center"
+		>
+			<CallOverlay
+				bind:files
+				{submitPrompt}
+				{stopResponse}
+				{modelId}
+				{chatId}
+				{eventTarget}
+				on:close={() => {
+					show = false;
+				}}
+			/>
+		</div>
+	</div>
 {:else}
 	<Modal bind:show>
 		<div class="  px-6 py-4 h-full">

+ 1 - 3
src/lib/components/chat/Controls/Controls.svelte

@@ -1,4 +1,4 @@
-<script>
+<script lang="ts">
 	import { createEventDispatcher, getContext } from 'svelte';
 	const dispatch = createEventDispatcher();
 	const i18n = getContext('i18n');
@@ -10,9 +10,7 @@
 	import Collapsible from '$lib/components/common/Collapsible.svelte';
 
 	import { user } from '$lib/stores';
-
 	export let models = [];
-
 	export let chatFiles = [];
 	export let params = {};
 </script>

+ 91 - 170
src/lib/components/chat/MessageInput.svelte

@@ -1,6 +1,8 @@
 <script lang="ts">
 	import { toast } from 'svelte-sonner';
-	import { onMount, tick, getContext } from 'svelte';
+	import { onMount, tick, getContext, createEventDispatcher } from 'svelte';
+	const dispatch = createEventDispatcher();
+
 	import {
 		type Model,
 		mobile,
@@ -12,16 +14,12 @@
 		tools,
 		user as _user
 	} from '$lib/stores';
-	import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils';
-
-	import {
-		processDocToVectorDB,
-		uploadDocToVectorDB,
-		uploadWebToVectorDB,
-		uploadYoutubeTranscriptionToVectorDB
-	} from '$lib/apis/rag';
+	import { blobToFile, findWordIndices } from '$lib/utils';
 
+	import { transcribeAudio } from '$lib/apis/audio';
+	import { processDocToVectorDB } from '$lib/apis/rag';
 	import { uploadFile } from '$lib/apis/files';
+
 	import {
 		SUPPORTED_FILE_TYPE,
 		SUPPORTED_FILE_EXTENSIONS,
@@ -29,19 +27,14 @@
 		WEBUI_API_BASE_URL
 	} from '$lib/constants';
 
-	import Prompts from './MessageInput/PromptCommands.svelte';
-	import Suggestions from './MessageInput/Suggestions.svelte';
-	import AddFilesPlaceholder from '../AddFilesPlaceholder.svelte';
-	import Documents from './MessageInput/Documents.svelte';
-	import Models from './MessageInput/Models.svelte';
 	import Tooltip from '../common/Tooltip.svelte';
-	import XMark from '$lib/components/icons/XMark.svelte';
 	import InputMenu from './MessageInput/InputMenu.svelte';
 	import Headphone from '../icons/Headphone.svelte';
 	import VoiceRecording from './MessageInput/VoiceRecording.svelte';
-	import { transcribeAudio } from '$lib/apis/audio';
 	import FileItem from '../common/FileItem.svelte';
 	import FilesOverlay from './MessageInput/FilesOverlay.svelte';
+	import Commands from './MessageInput/Commands.svelte';
+	import XMark from '../icons/XMark.svelte';
 
 	const i18n = getContext('i18n');
 
@@ -50,7 +43,7 @@
 	export let submitPrompt: Function;
 	export let stopResponse: Function;
 
-	export let autoScroll = true;
+	export let autoScroll = false;
 
 	export let atSelectedModel: Model | undefined;
 	export let selectedModels: [''];
@@ -60,9 +53,7 @@
 	let chatTextAreaElement: HTMLTextAreaElement;
 	let filesInputElement;
 
-	let promptsElement;
-	let documentsElement;
-	let modelsElement;
+	let commandsElement;
 
 	let inputFiles;
 	let dragged = false;
@@ -93,7 +84,10 @@
 
 	const scrollToBottom = () => {
 		const element = document.getElementById('messages-container');
-		element.scrollTop = element.scrollHeight;
+		element.scrollTo({
+			top: element.scrollHeight,
+			behavior: 'smooth'
+		});
 	};
 
 	const uploadFileHandler = async (file) => {
@@ -177,60 +171,42 @@
 		}
 	};
 
-	const uploadWeb = async (url) => {
-		console.log(url);
-
-		const doc = {
-			type: 'doc',
-			name: url,
-			collection_name: '',
-			status: false,
-			url: url,
-			error: ''
-		};
-
-		try {
-			files = [...files, doc];
-			const res = await uploadWebToVectorDB(localStorage.token, '', url);
-
-			if (res) {
-				doc.status = 'processed';
-				doc.collection_name = res.collection_name;
-				files = files;
+	const inputFilesHandler = async (inputFiles) => {
+		inputFiles.forEach((file) => {
+			console.log(file, file.name.split('.').at(-1));
+
+			if (
+				($config?.file?.max_size ?? null) !== null &&
+				file.size > ($config?.file?.max_size ?? 0) * 1024 * 1024
+			) {
+				toast.error(
+					$i18n.t(`File size should not exceed {{maxSize}} MB.`, {
+						maxSize: $config?.file?.max_size
+					})
+				);
+				return;
 			}
-		} catch (e) {
-			// Remove the failed doc from the files array
-			files = files.filter((f) => f.name !== url);
-			toast.error(e);
-		}
-	};
 
-	const uploadYoutubeTranscription = async (url) => {
-		console.log(url);
-
-		const doc = {
-			type: 'doc',
-			name: url,
-			collection_name: '',
-			status: false,
-			url: url,
-			error: ''
-		};
-
-		try {
-			files = [...files, doc];
-			const res = await uploadYoutubeTranscriptionToVectorDB(localStorage.token, url);
-
-			if (res) {
-				doc.status = 'processed';
-				doc.collection_name = res.collection_name;
-				files = files;
+			if (['image/gif', 'image/webp', 'image/jpeg', 'image/png'].includes(file['type'])) {
+				if (visionCapableModels.length === 0) {
+					toast.error($i18n.t('Selected model(s) do not support image inputs'));
+					return;
+				}
+				let reader = new FileReader();
+				reader.onload = (event) => {
+					files = [
+						...files,
+						{
+							type: 'image',
+							url: `${event.target.result}`
+						}
+					];
+				};
+				reader.readAsDataURL(file);
+			} else {
+				uploadFileHandler(file);
 			}
-		} catch (e) {
-			// Remove the failed doc from the files array
-			files = files.filter((f) => f.name !== url);
-			toast.error(e);
-		}
+		});
 	};
 
 	onMount(() => {
@@ -260,30 +236,9 @@
 
 			if (e.dataTransfer?.files) {
 				const inputFiles = Array.from(e.dataTransfer?.files);
-
 				if (inputFiles && inputFiles.length > 0) {
-					inputFiles.forEach((file) => {
-						console.log(file, file.name.split('.').at(-1));
-						if (['image/gif', 'image/webp', 'image/jpeg', 'image/png'].includes(file['type'])) {
-							if (visionCapableModels.length === 0) {
-								toast.error($i18n.t('Selected model(s) do not support image inputs'));
-								return;
-							}
-							let reader = new FileReader();
-							reader.onload = (event) => {
-								files = [
-									...files,
-									{
-										type: 'image',
-										url: `${event.target.result}`
-									}
-								];
-							};
-							reader.readAsDataURL(file);
-						} else {
-							uploadFileHandler(file);
-						}
-					});
+					console.log(inputFiles);
+					inputFilesHandler(inputFiles);
 				} else {
 					toast.error($i18n.t(`File not found.`));
 				}
@@ -343,48 +298,9 @@
 			</div>
 
 			<div class="w-full relative">
-				{#if prompt.charAt(0) === '/'}
-					<Prompts bind:this={promptsElement} bind:prompt bind:files />
-				{:else if prompt.charAt(0) === '#'}
-					<Documents
-						bind:this={documentsElement}
-						bind:prompt
-						on:youtube={(e) => {
-							console.log(e);
-							uploadYoutubeTranscription(e.detail);
-						}}
-						on:url={(e) => {
-							console.log(e);
-							uploadWeb(e.detail);
-						}}
-						on:select={(e) => {
-							console.log(e);
-							files = [
-								...files,
-								{
-									type: e?.detail?.type ?? 'file',
-									...e.detail,
-									status: 'processed'
-								}
-							];
-						}}
-					/>
-				{/if}
-
-				<Models
-					bind:this={modelsElement}
-					bind:prompt
-					bind:chatInputPlaceholder
-					{messages}
-					on:select={(e) => {
-						atSelectedModel = e.detail;
-						chatTextAreaElement?.focus();
-					}}
-				/>
-
 				{#if atSelectedModel !== undefined}
 					<div
-						class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900 z-50"
+						class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0.5 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900 z-10"
 					>
 						<div class="flex items-center gap-2 text-sm dark:text-gray-500">
 							<img
@@ -413,6 +329,21 @@
 						</div>
 					</div>
 				{/if}
+
+				<Commands
+					bind:this={commandsElement}
+					bind:prompt
+					bind:files
+					on:select={(e) => {
+						const data = e.detail;
+
+						if (data?.type === 'model') {
+							atSelectedModel = data.data;
+						}
+
+						chatTextAreaElement?.focus();
+					}}
+				/>
 			</div>
 		</div>
 	</div>
@@ -429,27 +360,7 @@
 					on:change={async () => {
 						if (inputFiles && inputFiles.length > 0) {
 							const _inputFiles = Array.from(inputFiles);
-							_inputFiles.forEach((file) => {
-								if (['image/gif', 'image/webp', 'image/jpeg', 'image/png'].includes(file['type'])) {
-									if (visionCapableModels.length === 0) {
-										toast.error($i18n.t('Selected model(s) do not support image inputs'));
-										return;
-									}
-									let reader = new FileReader();
-									reader.onload = (event) => {
-										files = [
-											...files,
-											{
-												type: 'image',
-												url: `${event.target.result}`
-											}
-										];
-									};
-									reader.readAsDataURL(file);
-								} else {
-									uploadFileHandler(file);
-								}
-							});
+							inputFilesHandler(_inputFiles);
 						} else {
 							toast.error($i18n.t(`File not found.`));
 						}
@@ -638,6 +549,7 @@
 									}}
 									on:keydown={async (e) => {
 										const isCtrlPressed = e.ctrlKey || e.metaKey; // metaKey is for Cmd key on Mac
+										const commandsContainerElement = document.getElementById('commands-container');
 
 										// Check if Ctrl + R is pressed
 										if (prompt === '' && isCtrlPressed && e.key.toLowerCase() === 'r') {
@@ -668,10 +580,9 @@
 											editButton?.click();
 										}
 
-										if (['/', '#', '@'].includes(prompt.charAt(0)) && e.key === 'ArrowUp') {
+										if (commandsContainerElement && e.key === 'ArrowUp') {
 											e.preventDefault();
-
-											(promptsElement || documentsElement || modelsElement).selectUp();
+											commandsElement.selectUp();
 
 											const commandOptionButton = [
 												...document.getElementsByClassName('selected-command-option-button')
@@ -679,10 +590,9 @@
 											commandOptionButton.scrollIntoView({ block: 'center' });
 										}
 
-										if (['/', '#', '@'].includes(prompt.charAt(0)) && e.key === 'ArrowDown') {
+										if (commandsContainerElement && e.key === 'ArrowDown') {
 											e.preventDefault();
-
-											(promptsElement || documentsElement || modelsElement).selectDown();
+											commandsElement.selectDown();
 
 											const commandOptionButton = [
 												...document.getElementsByClassName('selected-command-option-button')
@@ -690,7 +600,7 @@
 											commandOptionButton.scrollIntoView({ block: 'center' });
 										}
 
-										if (['/', '#', '@'].includes(prompt.charAt(0)) && e.key === 'Enter') {
+										if (commandsContainerElement && e.key === 'Enter') {
 											e.preventDefault();
 
 											const commandOptionButton = [
@@ -706,7 +616,7 @@
 											}
 										}
 
-										if (['/', '#', '@'].includes(prompt.charAt(0)) && e.key === 'Tab') {
+										if (commandsContainerElement && e.key === 'Tab') {
 											e.preventDefault();
 
 											const commandOptionButton = [
@@ -742,16 +652,16 @@
 										}
 									}}
 									rows="1"
-									on:input={(e) => {
+									on:input={async (e) => {
 										e.target.style.height = '';
 										e.target.style.height = Math.min(e.target.scrollHeight, 200) + 'px';
 										user = null;
 									}}
-									on:focus={(e) => {
+									on:focus={async (e) => {
 										e.target.style.height = '';
 										e.target.style.height = Math.min(e.target.scrollHeight, 200) + 'px';
 									}}
-									on:paste={(e) => {
+									on:paste={async (e) => {
 										const clipboardData = e.clipboardData || window.clipboardData;
 
 										if (clipboardData && clipboardData.items) {
@@ -786,7 +696,7 @@
 												type="button"
 												on:click={async () => {
 													try {
-														const res = await navigator.mediaDevices
+														let stream = await navigator.mediaDevices
 															.getUserMedia({ audio: true })
 															.catch(function (err) {
 																toast.error(
@@ -800,9 +710,12 @@
 																return null;
 															});
 
-														if (res) {
+														if (stream) {
 															recording = true;
+															const tracks = stream.getTracks();
+															tracks.forEach((track) => track.stop());
 														}
+														stream = null;
 													} catch {
 														toast.error($i18n.t('Permission denied when accessing microphone'));
 													}
@@ -849,10 +762,18 @@
 													}
 													// check if user has access to getUserMedia
 													try {
-														await navigator.mediaDevices.getUserMedia({ audio: true });
+														let stream = await navigator.mediaDevices.getUserMedia({ audio: true });
 														// If the user grants the permission, proceed to show the call overlay
 
+														if (stream) {
+															const tracks = stream.getTracks();
+															tracks.forEach((track) => track.stop());
+														}
+
+														stream = null;
+
 														showCallOverlay.set(true);
+														dispatch('call');
 													} catch (err) {
 														// If the user denies the permission or an error occurs, show an error message
 														toast.error($i18n.t('Permission denied when accessing media devices'));

+ 327 - 281
src/lib/components/chat/MessageInput/CallOverlay.svelte

@@ -1,6 +1,8 @@
 <script lang="ts">
 	import { config, models, settings, showCallOverlay } from '$lib/stores';
-	import { onMount, tick, getContext } from 'svelte';
+	import { onMount, tick, getContext, onDestroy, createEventDispatcher } from 'svelte';
+
+	const dispatch = createEventDispatcher();
 
 	import {
 		blobToFile,
@@ -19,15 +21,14 @@
 	const i18n = getContext('i18n');
 
 	export let eventTarget: EventTarget;
-
 	export let submitPrompt: Function;
 	export let stopResponse: Function;
-
 	export let files;
-
 	export let chatId;
 	export let modelId;
 
+	let wakeLock = null;
+
 	let model = null;
 
 	let loading = false;
@@ -45,6 +46,7 @@
 	let rmsLevel = 0;
 	let hasStartedSpeaking = false;
 	let mediaRecorder;
+	let audioStream = null;
 	let audioChunks = [];
 
 	let videoInputDevices = [];
@@ -210,17 +212,23 @@
 		} else {
 			audioChunks = [];
 			mediaRecorder = false;
+
+			if (audioStream) {
+				const tracks = audioStream.getTracks();
+				tracks.forEach((track) => track.stop());
+			}
+			audioStream = null;
 		}
 	};
 
 	const startRecording = async () => {
-		const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
-		mediaRecorder = new MediaRecorder(stream);
+		audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
+		mediaRecorder = new MediaRecorder(audioStream);
 
 		mediaRecorder.onstart = () => {
 			console.log('Recording started');
 			audioChunks = [];
-			analyseAudio(stream);
+			analyseAudio(audioStream);
 		};
 
 		mediaRecorder.ondataavailable = (event) => {
@@ -237,6 +245,15 @@
 		mediaRecorder.start();
 	};
 
+	const stopAudioStream = async () => {
+		if (audioStream) {
+			const tracks = audioStream.getTracks();
+			tracks.forEach((track) => track.stop());
+		}
+
+		audioStream = null;
+	};
+
 	// Function to calculate the RMS level from time domain data
 	const calculateRMS = (data: Uint8Array) => {
 		let sumSquares = 0;
@@ -509,6 +526,34 @@
 	};
 
 	onMount(async () => {
+		const setWakeLock = async () => {
+			try {
+				wakeLock = await navigator.wakeLock.request('screen');
+			} catch (err) {
+				// The Wake Lock request has failed - usually system related, such as battery.
+				console.log(err);
+			}
+
+			if (wakeLock) {
+				// Add a listener to release the wake lock when the page is unloaded
+				wakeLock.addEventListener('release', () => {
+					// the wake lock has been released
+					console.log('Wake Lock released');
+				});
+			}
+		};
+
+		if ('wakeLock' in navigator) {
+			await setWakeLock();
+
+			document.addEventListener('visibilitychange', async () => {
+				// Re-request the wake lock if the document becomes visible
+				if (wakeLock !== null && document.visibilityState === 'visible') {
+					await setWakeLock();
+				}
+			});
+		}
+
 		model = $models.find((m) => m.id === modelId);
 
 		startRecording();
@@ -585,315 +630,316 @@
 			await stopCamera();
 		};
 	});
+
+	onDestroy(async () => {
+		await stopAllAudio();
+		await stopRecordingCallback(false);
+		await stopCamera();
+	});
 </script>
 
 {#if $showCallOverlay}
-	<div class=" absolute w-full h-screen max-h-[100dvh] flex z-[999] overflow-hidden">
-		<div
-			class="absolute w-full h-screen max-h-[100dvh] bg-white text-gray-700 dark:bg-black dark:text-gray-300 flex justify-center"
-		>
-			<div class="max-w-lg w-full h-screen max-h-[100dvh] flex flex-col justify-between p-3 md:p-6">
-				{#if camera}
-					<button
-						type="button"
-						class="flex justify-center items-center w-full h-20 min-h-20"
-						on:click={() => {
-							if (assistantSpeaking) {
-								stopAllAudio();
+	<div class="max-w-lg w-full h-full max-h-[100dvh] flex flex-col justify-between p-3 md:p-6">
+		{#if camera}
+			<button
+				type="button"
+				class="flex justify-center items-center w-full h-20 min-h-20"
+				on:click={() => {
+					if (assistantSpeaking) {
+						stopAllAudio();
+					}
+				}}
+			>
+				{#if emoji}
+					<div
+						class="  transition-all rounded-full"
+						style="font-size:{rmsLevel * 100 > 4
+							? '4.5'
+							: rmsLevel * 100 > 2
+								? '4.25'
+								: rmsLevel * 100 > 1
+									? '3.75'
+									: '3.5'}rem;width: 100%; text-align:center;"
+					>
+						{emoji}
+					</div>
+				{:else if loading || assistantSpeaking}
+					<svg
+						class="size-12 text-gray-900 dark:text-gray-400"
+						viewBox="0 0 24 24"
+						fill="currentColor"
+						xmlns="http://www.w3.org/2000/svg"
+						><style>
+							.spinner_qM83 {
+								animation: spinner_8HQG 1.05s infinite;
 							}
-						}}
+							.spinner_oXPr {
+								animation-delay: 0.1s;
+							}
+							.spinner_ZTLf {
+								animation-delay: 0.2s;
+							}
+							@keyframes spinner_8HQG {
+								0%,
+								57.14% {
+									animation-timing-function: cubic-bezier(0.33, 0.66, 0.66, 1);
+									transform: translate(0);
+								}
+								28.57% {
+									animation-timing-function: cubic-bezier(0.33, 0, 0.66, 0.33);
+									transform: translateY(-6px);
+								}
+								100% {
+									transform: translate(0);
+								}
+							}
+						</style><circle class="spinner_qM83" cx="4" cy="12" r="3" /><circle
+							class="spinner_qM83 spinner_oXPr"
+							cx="12"
+							cy="12"
+							r="3"
+						/><circle class="spinner_qM83 spinner_ZTLf" cx="20" cy="12" r="3" /></svg
 					>
-						{#if emoji}
-							<div
-								class="  transition-all rounded-full"
-								style="font-size:{rmsLevel * 100 > 4
-									? '4.5'
-									: rmsLevel * 100 > 2
-										? '4.25'
-										: rmsLevel * 100 > 1
-											? '3.75'
-											: '3.5'}rem;width: 100%; text-align:center;"
-							>
-								{emoji}
-							</div>
-						{:else if loading || assistantSpeaking}
-							<svg
-								class="size-12 text-gray-900 dark:text-gray-400"
-								viewBox="0 0 24 24"
-								fill="currentColor"
-								xmlns="http://www.w3.org/2000/svg"
-								><style>
-									.spinner_qM83 {
-										animation: spinner_8HQG 1.05s infinite;
-									}
-									.spinner_oXPr {
-										animation-delay: 0.1s;
+				{:else}
+					<div
+						class=" {rmsLevel * 100 > 4
+							? ' size-[4.5rem]'
+							: rmsLevel * 100 > 2
+								? ' size-16'
+								: rmsLevel * 100 > 1
+									? 'size-14'
+									: 'size-12'}  transition-all rounded-full {(model?.info?.meta
+							?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
+							? ' bg-cover bg-center bg-no-repeat'
+							: 'bg-black dark:bg-white'}  bg-black dark:bg-white"
+						style={(model?.info?.meta?.profile_image_url ?? '/static/favicon.png') !==
+						'/static/favicon.png'
+							? `background-image: url('${model?.info?.meta?.profile_image_url}');`
+							: ''}
+					/>
+				{/if}
+				<!-- navbar -->
+			</button>
+		{/if}
+
+		<div class="flex justify-center items-center flex-1 h-full w-full max-h-full">
+			{#if !camera}
+				<button
+					type="button"
+					on:click={() => {
+						if (assistantSpeaking) {
+							stopAllAudio();
+						}
+					}}
+				>
+					{#if emoji}
+						<div
+							class="  transition-all rounded-full"
+							style="font-size:{rmsLevel * 100 > 4
+								? '13'
+								: rmsLevel * 100 > 2
+									? '12'
+									: rmsLevel * 100 > 1
+										? '11.5'
+										: '11'}rem;width:100%;text-align:center;"
+						>
+							{emoji}
+						</div>
+					{:else if loading || assistantSpeaking}
+						<svg
+							class="size-44 text-gray-900 dark:text-gray-400"
+							viewBox="0 0 24 24"
+							fill="currentColor"
+							xmlns="http://www.w3.org/2000/svg"
+							><style>
+								.spinner_qM83 {
+									animation: spinner_8HQG 1.05s infinite;
+								}
+								.spinner_oXPr {
+									animation-delay: 0.1s;
+								}
+								.spinner_ZTLf {
+									animation-delay: 0.2s;
+								}
+								@keyframes spinner_8HQG {
+									0%,
+									57.14% {
+										animation-timing-function: cubic-bezier(0.33, 0.66, 0.66, 1);
+										transform: translate(0);
 									}
-									.spinner_ZTLf {
-										animation-delay: 0.2s;
+									28.57% {
+										animation-timing-function: cubic-bezier(0.33, 0, 0.66, 0.33);
+										transform: translateY(-6px);
 									}
-									@keyframes spinner_8HQG {
-										0%,
-										57.14% {
-											animation-timing-function: cubic-bezier(0.33, 0.66, 0.66, 1);
-											transform: translate(0);
-										}
-										28.57% {
-											animation-timing-function: cubic-bezier(0.33, 0, 0.66, 0.33);
-											transform: translateY(-6px);
-										}
-										100% {
-											transform: translate(0);
-										}
+									100% {
+										transform: translate(0);
 									}
-								</style><circle class="spinner_qM83" cx="4" cy="12" r="3" /><circle
-									class="spinner_qM83 spinner_oXPr"
-									cx="12"
-									cy="12"
-									r="3"
-								/><circle class="spinner_qM83 spinner_ZTLf" cx="20" cy="12" r="3" /></svg
-							>
-						{:else}
-							<div
-								class=" {rmsLevel * 100 > 4
-									? ' size-[4.5rem]'
-									: rmsLevel * 100 > 2
-										? ' size-16'
-										: rmsLevel * 100 > 1
-											? 'size-14'
-											: 'size-12'}  transition-all rounded-full {(model?.info?.meta
-									?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
-									? ' bg-cover bg-center bg-no-repeat'
-									: 'bg-black dark:bg-white'}  bg-black dark:bg-white"
-								style={(model?.info?.meta?.profile_image_url ?? '/static/favicon.png') !==
-								'/static/favicon.png'
-									? `background-image: url('${model?.info?.meta?.profile_image_url}');`
-									: ''}
-							/>
-						{/if}
-						<!-- navbar -->
-					</button>
-				{/if}
-
-				<div class="flex justify-center items-center flex-1 h-full w-full max-h-full">
-					{#if !camera}
-						<button
-							type="button"
-							on:click={() => {
-								if (assistantSpeaking) {
-									stopAllAudio();
 								}
-							}}
+							</style><circle class="spinner_qM83" cx="4" cy="12" r="3" /><circle
+								class="spinner_qM83 spinner_oXPr"
+								cx="12"
+								cy="12"
+								r="3"
+							/><circle class="spinner_qM83 spinner_ZTLf" cx="20" cy="12" r="3" /></svg
 						>
-							{#if emoji}
-								<div
-									class="  transition-all rounded-full"
-									style="font-size:{rmsLevel * 100 > 4
-										? '13'
-										: rmsLevel * 100 > 2
-											? '12'
-											: rmsLevel * 100 > 1
-												? '11.5'
-												: '11'}rem;width:100%;text-align:center;"
-								>
-									{emoji}
-								</div>
-							{:else if loading || assistantSpeaking}
-								<svg
-									class="size-44 text-gray-900 dark:text-gray-400"
-									viewBox="0 0 24 24"
-									fill="currentColor"
-									xmlns="http://www.w3.org/2000/svg"
-									><style>
-										.spinner_qM83 {
-											animation: spinner_8HQG 1.05s infinite;
-										}
-										.spinner_oXPr {
-											animation-delay: 0.1s;
-										}
-										.spinner_ZTLf {
-											animation-delay: 0.2s;
-										}
-										@keyframes spinner_8HQG {
-											0%,
-											57.14% {
-												animation-timing-function: cubic-bezier(0.33, 0.66, 0.66, 1);
-												transform: translate(0);
-											}
-											28.57% {
-												animation-timing-function: cubic-bezier(0.33, 0, 0.66, 0.33);
-												transform: translateY(-6px);
-											}
-											100% {
-												transform: translate(0);
-											}
-										}
-									</style><circle class="spinner_qM83" cx="4" cy="12" r="3" /><circle
-										class="spinner_qM83 spinner_oXPr"
-										cx="12"
-										cy="12"
-										r="3"
-									/><circle class="spinner_qM83 spinner_ZTLf" cx="20" cy="12" r="3" /></svg
-								>
-							{:else}
-								<div
-									class=" {rmsLevel * 100 > 4
-										? ' size-52'
-										: rmsLevel * 100 > 2
-											? 'size-48'
-											: rmsLevel * 100 > 1
-												? 'size-[11.5rem]'
-												: 'size-44'}  transition-all rounded-full {(model?.info?.meta
-										?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
-										? ' bg-cover bg-center bg-no-repeat'
-										: 'bg-black dark:bg-white'} "
-									style={(model?.info?.meta?.profile_image_url ?? '/static/favicon.png') !==
-									'/static/favicon.png'
-										? `background-image: url('${model?.info?.meta?.profile_image_url}');`
-										: ''}
-								/>
-							{/if}
-						</button>
 					{:else}
 						<div
-							class="relative flex video-container w-full max-h-full pt-2 pb-4 md:py-6 px-2 h-full"
-						>
-							<video
-								id="camera-feed"
-								autoplay
-								class="rounded-2xl h-full min-w-full object-cover object-center"
-								playsinline
-							/>
-
-							<canvas id="camera-canvas" style="display:none;" />
-
-							<div class=" absolute top-4 md:top-8 left-4">
-								<button
-									type="button"
-									class="p-1.5 text-white cursor-pointer backdrop-blur-xl bg-black/10 rounded-full"
-									on:click={() => {
-										stopCamera();
-									}}
-								>
-									<svg
-										xmlns="http://www.w3.org/2000/svg"
-										viewBox="0 0 16 16"
-										fill="currentColor"
-										class="size-6"
-									>
-										<path
-											d="M5.28 4.22a.75.75 0 0 0-1.06 1.06L6.94 8l-2.72 2.72a.75.75 0 1 0 1.06 1.06L8 9.06l2.72 2.72a.75.75 0 1 0 1.06-1.06L9.06 8l2.72-2.72a.75.75 0 0 0-1.06-1.06L8 6.94 5.28 4.22Z"
-										/>
-									</svg>
-								</button>
-							</div>
-						</div>
+							class=" {rmsLevel * 100 > 4
+								? ' size-52'
+								: rmsLevel * 100 > 2
+									? 'size-48'
+									: rmsLevel * 100 > 1
+										? 'size-44'
+										: 'size-40'}  transition-all rounded-full {(model?.info?.meta
+								?.profile_image_url ?? '/static/favicon.png') !== '/static/favicon.png'
+								? ' bg-cover bg-center bg-no-repeat'
+								: 'bg-black dark:bg-white'} "
+							style={(model?.info?.meta?.profile_image_url ?? '/static/favicon.png') !==
+							'/static/favicon.png'
+								? `background-image: url('${model?.info?.meta?.profile_image_url}');`
+								: ''}
+						/>
 					{/if}
-				</div>
-
-				<div class="flex justify-between items-center pb-2 w-full">
-					<div>
-						{#if camera}
-							<VideoInputMenu
-								devices={videoInputDevices}
-								on:change={async (e) => {
-									console.log(e.detail);
-									selectedVideoInputDeviceId = e.detail;
-									await stopVideoStream();
-									await startVideoStream();
-								}}
-							>
-								<button class=" p-3 rounded-full bg-gray-50 dark:bg-gray-900" type="button">
-									<svg
-										xmlns="http://www.w3.org/2000/svg"
-										viewBox="0 0 20 20"
-										fill="currentColor"
-										class="size-5"
-									>
-										<path
-											fill-rule="evenodd"
-											d="M15.312 11.424a5.5 5.5 0 0 1-9.201 2.466l-.312-.311h2.433a.75.75 0 0 0 0-1.5H3.989a.75.75 0 0 0-.75.75v4.242a.75.75 0 0 0 1.5 0v-2.43l.31.31a7 7 0 0 0 11.712-3.138.75.75 0 0 0-1.449-.39Zm1.23-3.723a.75.75 0 0 0 .219-.53V2.929a.75.75 0 0 0-1.5 0V5.36l-.31-.31A7 7 0 0 0 3.239 8.188a.75.75 0 1 0 1.448.389A5.5 5.5 0 0 1 13.89 6.11l.311.31h-2.432a.75.75 0 0 0 0 1.5h4.243a.75.75 0 0 0 .53-.219Z"
-											clip-rule="evenodd"
-										/>
-									</svg>
-								</button>
-							</VideoInputMenu>
-						{:else}
-							<Tooltip content={$i18n.t('Camera')}>
-								<button
-									class=" p-3 rounded-full bg-gray-50 dark:bg-gray-900"
-									type="button"
-									on:click={async () => {
-										await navigator.mediaDevices.getUserMedia({ video: true });
-										startCamera();
-									}}
-								>
-									<svg
-										xmlns="http://www.w3.org/2000/svg"
-										fill="none"
-										viewBox="0 0 24 24"
-										stroke-width="1.5"
-										stroke="currentColor"
-										class="size-5"
-									>
-										<path
-											stroke-linecap="round"
-											stroke-linejoin="round"
-											d="M6.827 6.175A2.31 2.31 0 0 1 5.186 7.23c-.38.054-.757.112-1.134.175C2.999 7.58 2.25 8.507 2.25 9.574V18a2.25 2.25 0 0 0 2.25 2.25h15A2.25 2.25 0 0 0 21.75 18V9.574c0-1.067-.75-1.994-1.802-2.169a47.865 47.865 0 0 0-1.134-.175 2.31 2.31 0 0 1-1.64-1.055l-.822-1.316a2.192 2.192 0 0 0-1.736-1.039 48.774 48.774 0 0 0-5.232 0 2.192 2.192 0 0 0-1.736 1.039l-.821 1.316Z"
-										/>
-										<path
-											stroke-linecap="round"
-											stroke-linejoin="round"
-											d="M16.5 12.75a4.5 4.5 0 1 1-9 0 4.5 4.5 0 0 1 9 0ZM18.75 10.5h.008v.008h-.008V10.5Z"
-										/>
-									</svg>
-								</button>
-							</Tooltip>
-						{/if}
-					</div>
-
-					<div>
+				</button>
+			{:else}
+				<div class="relative flex video-container w-full max-h-full pt-2 pb-4 md:py-6 px-2 h-full">
+					<video
+						id="camera-feed"
+						autoplay
+						class="rounded-2xl h-full min-w-full object-cover object-center"
+						playsinline
+					/>
+
+					<canvas id="camera-canvas" style="display:none;" />
+
+					<div class=" absolute top-4 md:top-8 left-4">
 						<button
 							type="button"
+							class="p-1.5 text-white cursor-pointer backdrop-blur-xl bg-black/10 rounded-full"
 							on:click={() => {
-								if (assistantSpeaking) {
-									stopAllAudio();
-								}
+								stopCamera();
 							}}
 						>
-							<div class=" line-clamp-1 text-sm font-medium">
-								{#if loading}
-									{$i18n.t('Thinking...')}
-								{:else if assistantSpeaking}
-									{$i18n.t('Tap to interrupt')}
-								{:else}
-									{$i18n.t('Listening...')}
-								{/if}
-							</div>
+							<svg
+								xmlns="http://www.w3.org/2000/svg"
+								viewBox="0 0 16 16"
+								fill="currentColor"
+								class="size-6"
+							>
+								<path
+									d="M5.28 4.22a.75.75 0 0 0-1.06 1.06L6.94 8l-2.72 2.72a.75.75 0 1 0 1.06 1.06L8 9.06l2.72 2.72a.75.75 0 1 0 1.06-1.06L9.06 8l2.72-2.72a.75.75 0 0 0-1.06-1.06L8 6.94 5.28 4.22Z"
+								/>
+							</svg>
 						</button>
 					</div>
+				</div>
+			{/if}
+		</div>
 
-					<div>
+		<div class="flex justify-between items-center pb-2 w-full">
+			<div>
+				{#if camera}
+					<VideoInputMenu
+						devices={videoInputDevices}
+						on:change={async (e) => {
+							console.log(e.detail);
+							selectedVideoInputDeviceId = e.detail;
+							await stopVideoStream();
+							await startVideoStream();
+						}}
+					>
+						<button class=" p-3 rounded-full bg-gray-50 dark:bg-gray-900" type="button">
+							<svg
+								xmlns="http://www.w3.org/2000/svg"
+								viewBox="0 0 20 20"
+								fill="currentColor"
+								class="size-5"
+							>
+								<path
+									fill-rule="evenodd"
+									d="M15.312 11.424a5.5 5.5 0 0 1-9.201 2.466l-.312-.311h2.433a.75.75 0 0 0 0-1.5H3.989a.75.75 0 0 0-.75.75v4.242a.75.75 0 0 0 1.5 0v-2.43l.31.31a7 7 0 0 0 11.712-3.138.75.75 0 0 0-1.449-.39Zm1.23-3.723a.75.75 0 0 0 .219-.53V2.929a.75.75 0 0 0-1.5 0V5.36l-.31-.31A7 7 0 0 0 3.239 8.188a.75.75 0 1 0 1.448.389A5.5 5.5 0 0 1 13.89 6.11l.311.31h-2.432a.75.75 0 0 0 0 1.5h4.243a.75.75 0 0 0 .53-.219Z"
+									clip-rule="evenodd"
+								/>
+							</svg>
+						</button>
+					</VideoInputMenu>
+				{:else}
+					<Tooltip content={$i18n.t('Camera')}>
 						<button
 							class=" p-3 rounded-full bg-gray-50 dark:bg-gray-900"
+							type="button"
 							on:click={async () => {
-								showCallOverlay.set(false);
+								await navigator.mediaDevices.getUserMedia({ video: true });
+								startCamera();
 							}}
-							type="button"
 						>
 							<svg
 								xmlns="http://www.w3.org/2000/svg"
-								viewBox="0 0 20 20"
-								fill="currentColor"
+								fill="none"
+								viewBox="0 0 24 24"
+								stroke-width="1.5"
+								stroke="currentColor"
 								class="size-5"
 							>
 								<path
-									d="M6.28 5.22a.75.75 0 0 0-1.06 1.06L8.94 10l-3.72 3.72a.75.75 0 1 0 1.06 1.06L10 11.06l3.72 3.72a.75.75 0 1 0 1.06-1.06L11.06 10l3.72-3.72a.75.75 0 0 0-1.06-1.06L10 8.94 6.28 5.22Z"
+									stroke-linecap="round"
+									stroke-linejoin="round"
+									d="M6.827 6.175A2.31 2.31 0 0 1 5.186 7.23c-.38.054-.757.112-1.134.175C2.999 7.58 2.25 8.507 2.25 9.574V18a2.25 2.25 0 0 0 2.25 2.25h15A2.25 2.25 0 0 0 21.75 18V9.574c0-1.067-.75-1.994-1.802-2.169a47.865 47.865 0 0 0-1.134-.175 2.31 2.31 0 0 1-1.64-1.055l-.822-1.316a2.192 2.192 0 0 0-1.736-1.039 48.774 48.774 0 0 0-5.232 0 2.192 2.192 0 0 0-1.736 1.039l-.821 1.316Z"
+								/>
+								<path
+									stroke-linecap="round"
+									stroke-linejoin="round"
+									d="M16.5 12.75a4.5 4.5 0 1 1-9 0 4.5 4.5 0 0 1 9 0ZM18.75 10.5h.008v.008h-.008V10.5Z"
 								/>
 							</svg>
 						</button>
+					</Tooltip>
+				{/if}
+			</div>
+
+			<div>
+				<button
+					type="button"
+					on:click={() => {
+						if (assistantSpeaking) {
+							stopAllAudio();
+						}
+					}}
+				>
+					<div class=" line-clamp-1 text-sm font-medium">
+						{#if loading}
+							{$i18n.t('Thinking...')}
+						{:else if assistantSpeaking}
+							{$i18n.t('Tap to interrupt')}
+						{:else}
+							{$i18n.t('Listening...')}
+						{/if}
 					</div>
-				</div>
+				</button>
+			</div>
+
+			<div>
+				<button
+					class=" p-3 rounded-full bg-gray-50 dark:bg-gray-900"
+					on:click={async () => {
+						stopAudioStream();
+						stopVideoStream();
+						showCallOverlay.set(false);
+						dispatch('close');
+					}}
+					type="button"
+				>
+					<svg
+						xmlns="http://www.w3.org/2000/svg"
+						viewBox="0 0 20 20"
+						fill="currentColor"
+						class="size-5"
+					>
+						<path
+							d="M6.28 5.22a.75.75 0 0 0-1.06 1.06L8.94 10l-3.72 3.72a.75.75 0 1 0 1.06 1.06L10 11.06l3.72 3.72a.75.75 0 1 0 1.06-1.06L11.06 10l3.72-3.72a.75.75 0 0 0-1.06-1.06L10 8.94 6.28 5.22Z"
+						/>
+					</svg>
+				</button>
 			</div>
 		</div>
 	</div>

+ 131 - 0
src/lib/components/chat/MessageInput/Commands.svelte

@@ -0,0 +1,131 @@
+<script>
+	import { createEventDispatcher } from 'svelte';
+	import { toast } from 'svelte-sonner';
+
+	const dispatch = createEventDispatcher();
+
+	import Prompts from './Commands/Prompts.svelte';
+	import Documents from './Commands/Documents.svelte';
+	import Models from './Commands/Models.svelte';
+
+	import { removeLastWordFromString } from '$lib/utils';
+	import { uploadWebToVectorDB, uploadYoutubeTranscriptionToVectorDB } from '$lib/apis/rag';
+
+	export let prompt = '';
+	export let files = [];
+
+	let commandElement = null;
+
+	export const selectUp = () => {
+		commandElement?.selectUp();
+	};
+
+	export const selectDown = () => {
+		commandElement?.selectDown();
+	};
+
+	let command = '';
+	$: command = (prompt?.trim() ?? '').split(' ')?.at(-1) ?? '';
+
+	const uploadWeb = async (url) => {
+		console.log(url);
+
+		const doc = {
+			type: 'doc',
+			name: url,
+			collection_name: '',
+			status: false,
+			url: url,
+			error: ''
+		};
+
+		try {
+			files = [...files, doc];
+			const res = await uploadWebToVectorDB(localStorage.token, '', url);
+
+			if (res) {
+				doc.status = 'processed';
+				doc.collection_name = res.collection_name;
+				files = files;
+			}
+		} catch (e) {
+			// Remove the failed doc from the files array
+			files = files.filter((f) => f.name !== url);
+			toast.error(e);
+		}
+	};
+
+	const uploadYoutubeTranscription = async (url) => {
+		console.log(url);
+
+		const doc = {
+			type: 'doc',
+			name: url,
+			collection_name: '',
+			status: false,
+			url: url,
+			error: ''
+		};
+
+		try {
+			files = [...files, doc];
+			const res = await uploadYoutubeTranscriptionToVectorDB(localStorage.token, url);
+
+			if (res) {
+				doc.status = 'processed';
+				doc.collection_name = res.collection_name;
+				files = files;
+			}
+		} catch (e) {
+			// Remove the failed doc from the files array
+			files = files.filter((f) => f.name !== url);
+			toast.error(e);
+		}
+	};
+</script>
+
+{#if ['/', '#', '@'].includes(command?.charAt(0))}
+	{#if command?.charAt(0) === '/'}
+		<Prompts bind:this={commandElement} bind:prompt bind:files {command} />
+	{:else if command?.charAt(0) === '#'}
+		<Documents
+			bind:this={commandElement}
+			bind:prompt
+			{command}
+			on:youtube={(e) => {
+				console.log(e);
+				uploadYoutubeTranscription(e.detail);
+			}}
+			on:url={(e) => {
+				console.log(e);
+				uploadWeb(e.detail);
+			}}
+			on:select={(e) => {
+				console.log(e);
+				files = [
+					...files,
+					{
+						type: e?.detail?.type ?? 'file',
+						...e.detail,
+						status: 'processed'
+					}
+				];
+
+				dispatch('select');
+			}}
+		/>
+	{:else if command?.charAt(0) === '@'}
+		<Models
+			bind:this={commandElement}
+			{command}
+			on:select={(e) => {
+				prompt = removeLastWordFromString(prompt, command);
+
+				dispatch('select', {
+					type: 'model',
+					data: e.detail
+				});
+			}}
+		/>
+	{/if}
+{/if}

+ 14 - 10
src/lib/components/chat/MessageInput/Documents.svelte → src/lib/components/chat/MessageInput/Commands/Documents.svelte

@@ -2,13 +2,14 @@
 	import { createEventDispatcher } from 'svelte';
 
 	import { documents } from '$lib/stores';
-	import { removeFirstHashWord, isValidHttpUrl } from '$lib/utils';
+	import { removeLastWordFromString, isValidHttpUrl } from '$lib/utils';
 	import { tick, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 
 	const i18n = getContext('i18n');
 
 	export let prompt = '';
+	export let command = '';
 
 	const dispatch = createEventDispatcher();
 	let selectedIdx = 0;
@@ -43,16 +44,16 @@
 	];
 
 	$: filteredCollections = collections
-		.filter((collection) => findByName(collection, prompt))
+		.filter((collection) => findByName(collection, command))
 		.sort((a, b) => a.name.localeCompare(b.name));
 
 	$: filteredDocs = $documents
-		.filter((doc) => findByName(doc, prompt))
+		.filter((doc) => findByName(doc, command))
 		.sort((a, b) => a.title.localeCompare(b.title));
 
 	$: filteredItems = [...filteredCollections, ...filteredDocs];
 
-	$: if (prompt) {
+	$: if (command) {
 		selectedIdx = 0;
 
 		console.log(filteredCollections);
@@ -62,9 +63,9 @@
 		name: string;
 	};
 
-	const findByName = (obj: ObjectWithName, prompt: string) => {
+	const findByName = (obj: ObjectWithName, command: string) => {
 		const name = obj.name.toLowerCase();
-		return name.includes(prompt.toLowerCase().split(' ')?.at(0)?.substring(1) ?? '');
+		return name.includes(command.toLowerCase().split(' ')?.at(0)?.substring(1) ?? '');
 	};
 
 	export const selectUp = () => {
@@ -78,7 +79,7 @@
 	const confirmSelect = async (doc) => {
 		dispatch('select', doc);
 
-		prompt = removeFirstHashWord(prompt);
+		prompt = removeLastWordFromString(prompt, command);
 		const chatInputElement = document.getElementById('chat-textarea');
 
 		await tick();
@@ -89,7 +90,7 @@
 	const confirmSelectWeb = async (url) => {
 		dispatch('url', url);
 
-		prompt = removeFirstHashWord(prompt);
+		prompt = removeLastWordFromString(prompt, command);
 		const chatInputElement = document.getElementById('chat-textarea');
 
 		await tick();
@@ -100,7 +101,7 @@
 	const confirmSelectYoutube = async (url) => {
 		dispatch('youtube', url);
 
-		prompt = removeFirstHashWord(prompt);
+		prompt = removeLastWordFromString(prompt, command);
 		const chatInputElement = document.getElementById('chat-textarea');
 
 		await tick();
@@ -110,7 +111,10 @@
 </script>
 
 {#if filteredItems.length > 0 || prompt.split(' ')?.at(0)?.substring(1).startsWith('http')}
-	<div class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10">
+	<div
+		id="commands-container"
+		class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
+	>
 		<div class="flex w-full dark:border dark:border-gray-850 rounded-lg">
 			<div class=" bg-gray-50 dark:bg-gray-850 w-10 rounded-l-lg text-center">
 				<div class=" text-lg font-semibold mt-2">#</div>

+ 90 - 0
src/lib/components/chat/MessageInput/Commands/Models.svelte

@@ -0,0 +1,90 @@
+<script lang="ts">
+	import { createEventDispatcher, onMount } from 'svelte';
+	import { tick, getContext } from 'svelte';
+
+	import { models } from '$lib/stores';
+
+	const i18n = getContext('i18n');
+
+	const dispatch = createEventDispatcher();
+
+	export let command = '';
+
+	let selectedIdx = 0;
+	let filteredModels = [];
+
+	$: filteredModels = $models
+		.filter((p) =>
+			p.name.toLowerCase().includes(command.toLowerCase().split(' ')?.at(0)?.substring(1) ?? '')
+		)
+		.sort((a, b) => a.name.localeCompare(b.name));
+
+	$: if (command) {
+		selectedIdx = 0;
+	}
+
+	export const selectUp = () => {
+		selectedIdx = Math.max(0, selectedIdx - 1);
+	};
+
+	export const selectDown = () => {
+		selectedIdx = Math.min(selectedIdx + 1, filteredModels.length - 1);
+	};
+
+	const confirmSelect = async (model) => {
+		command = '';
+		dispatch('select', model);
+	};
+
+	onMount(async () => {
+		await tick();
+		const chatInputElement = document.getElementById('chat-textarea');
+		await tick();
+		chatInputElement?.focus();
+		await tick();
+	});
+</script>
+
+{#if filteredModels.length > 0}
+	<div
+		id="commands-container"
+		class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
+	>
+		<div class="flex w-full dark:border dark:border-gray-850 rounded-lg">
+			<div class=" bg-gray-50 dark:bg-gray-850 w-10 rounded-l-lg text-center">
+				<div class=" text-lg font-semibold mt-2">@</div>
+			</div>
+
+			<div
+				class="max-h-60 flex flex-col w-full rounded-r-lg bg-white dark:bg-gray-900 dark:text-gray-100"
+			>
+				<div class="m-1 overflow-y-auto p-1 rounded-r-lg space-y-0.5 scrollbar-hidden">
+					{#each filteredModels as model, modelIdx}
+						<button
+							class="px-3 py-1.5 rounded-xl w-full text-left {modelIdx === selectedIdx
+								? 'bg-gray-50 dark:bg-gray-850 selected-command-option-button'
+								: ''}"
+							type="button"
+							on:click={() => {
+								confirmSelect(model);
+							}}
+							on:mousemove={() => {
+								selectedIdx = modelIdx;
+							}}
+							on:focus={() => {}}
+						>
+							<div class="flex font-medium text-black dark:text-gray-100 line-clamp-1">
+								<img
+									src={model?.info?.meta?.profile_image_url ?? '/static/favicon.png'}
+									alt={model?.name ?? model.id}
+									class="rounded-full size-6 items-center mr-2"
+								/>
+								{model.name}
+							</div>
+						</button>
+					{/each}
+				</div>
+			</div>
+		</div>
+	</div>
+{/if}

+ 23 - 18
src/lib/components/chat/MessageInput/PromptCommands.svelte → src/lib/components/chat/MessageInput/Commands/Prompts.svelte

@@ -7,27 +7,30 @@
 	const i18n = getContext('i18n');
 
 	export let files;
+
 	export let prompt = '';
-	let selectedCommandIdx = 0;
-	let filteredPromptCommands = [];
+	export let command = '';
+
+	let selectedPromptIdx = 0;
+	let filteredPrompts = [];
 
-	$: filteredPromptCommands = $prompts
-		.filter((p) => p.command.toLowerCase().includes(prompt.toLowerCase()))
+	$: filteredPrompts = $prompts
+		.filter((p) => p.command.toLowerCase().includes(command.toLowerCase()))
 		.sort((a, b) => a.title.localeCompare(b.title));
 
-	$: if (prompt) {
-		selectedCommandIdx = 0;
+	$: if (command) {
+		selectedPromptIdx = 0;
 	}
 
 	export const selectUp = () => {
-		selectedCommandIdx = Math.max(0, selectedCommandIdx - 1);
+		selectedPromptIdx = Math.max(0, selectedPromptIdx - 1);
 	};
 
 	export const selectDown = () => {
-		selectedCommandIdx = Math.min(selectedCommandIdx + 1, filteredPromptCommands.length - 1);
+		selectedPromptIdx = Math.min(selectedPromptIdx + 1, filteredPrompts.length - 1);
 	};
 
-	const confirmCommand = async (command) => {
+	const confirmPrompt = async (command) => {
 		let text = command.content;
 
 		if (command.content.includes('{{CLIPBOARD}}')) {
@@ -79,7 +82,6 @@
 		await tick();
 
 		const words = findWordIndices(prompt);
-
 		if (words.length > 0) {
 			const word = words.at(0);
 			chatInputElement.setSelectionRange(word?.startIndex, word.endIndex + 1);
@@ -87,8 +89,11 @@
 	};
 </script>
 
-{#if filteredPromptCommands.length > 0}
-	<div class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10">
+{#if filteredPrompts.length > 0}
+	<div
+		id="commands-container"
+		class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
+	>
 		<div class="flex w-full dark:border dark:border-gray-850 rounded-lg">
 			<div class="  bg-gray-50 dark:bg-gray-850 w-10 rounded-l-lg text-center">
 				<div class=" text-lg font-semibold mt-2">/</div>
@@ -98,26 +103,26 @@
 				class="max-h-60 flex flex-col w-full rounded-r-lg bg-white dark:bg-gray-900 dark:text-gray-100"
 			>
 				<div class="m-1 overflow-y-auto p-1 rounded-r-lg space-y-0.5 scrollbar-hidden">
-					{#each filteredPromptCommands as command, commandIdx}
+					{#each filteredPrompts as prompt, promptIdx}
 						<button
-							class=" px-3 py-1.5 rounded-xl w-full text-left {commandIdx === selectedCommandIdx
+							class=" px-3 py-1.5 rounded-xl w-full text-left {promptIdx === selectedPromptIdx
 								? '  bg-gray-50 dark:bg-gray-850 selected-command-option-button'
 								: ''}"
 							type="button"
 							on:click={() => {
-								confirmCommand(command);
+								confirmPrompt(prompt);
 							}}
 							on:mousemove={() => {
-								selectedCommandIdx = commandIdx;
+								selectedPromptIdx = promptIdx;
 							}}
 							on:focus={() => {}}
 						>
 							<div class=" font-medium text-black dark:text-gray-100">
-								{command.command}
+								{prompt.command}
 							</div>
 
 							<div class=" text-xs text-gray-600 dark:text-gray-100">
-								{command.title}
+								{prompt.title}
 							</div>
 						</button>
 					{/each}

+ 0 - 181
src/lib/components/chat/MessageInput/Models.svelte

@@ -1,181 +0,0 @@
-<script lang="ts">
-	import { createEventDispatcher } from 'svelte';
-
-	import { generatePrompt } from '$lib/apis/ollama';
-	import { models } from '$lib/stores';
-	import { splitStream } from '$lib/utils';
-	import { tick, getContext } from 'svelte';
-	import { toast } from 'svelte-sonner';
-
-	const i18n = getContext('i18n');
-
-	const dispatch = createEventDispatcher();
-
-	export let prompt = '';
-	export let user = null;
-
-	export let chatInputPlaceholder = '';
-	export let messages = [];
-
-	let selectedIdx = 0;
-	let filteredModels = [];
-
-	$: filteredModels = $models
-		.filter((p) =>
-			p.name.toLowerCase().includes(prompt.toLowerCase().split(' ')?.at(0)?.substring(1) ?? '')
-		)
-		.sort((a, b) => a.name.localeCompare(b.name));
-
-	$: if (prompt) {
-		selectedIdx = 0;
-	}
-
-	export const selectUp = () => {
-		selectedIdx = Math.max(0, selectedIdx - 1);
-	};
-
-	export const selectDown = () => {
-		selectedIdx = Math.min(selectedIdx + 1, filteredModels.length - 1);
-	};
-
-	const confirmSelect = async (model) => {
-		prompt = '';
-		dispatch('select', model);
-	};
-
-	const confirmSelectCollaborativeChat = async (model) => {
-		// dispatch('select', model);
-		prompt = '';
-		user = JSON.parse(JSON.stringify(model.name));
-		await tick();
-
-		chatInputPlaceholder = $i18n.t('{{modelName}} is thinking...', { modelName: model.name });
-
-		const chatInputElement = document.getElementById('chat-textarea');
-
-		await tick();
-		chatInputElement?.focus();
-		await tick();
-
-		const convoText = messages.reduce((a, message, i, arr) => {
-			return `${a}### ${message.role.toUpperCase()}\n${message.content}\n\n`;
-		}, '');
-
-		const res = await generatePrompt(localStorage.token, model.name, convoText);
-
-		if (res && res.ok) {
-			const reader = res.body
-				.pipeThrough(new TextDecoderStream())
-				.pipeThrough(splitStream('\n'))
-				.getReader();
-
-			while (true) {
-				const { value, done } = await reader.read();
-				if (done) {
-					break;
-				}
-
-				try {
-					let lines = value.split('\n');
-
-					for (const line of lines) {
-						if (line !== '') {
-							console.log(line);
-							let data = JSON.parse(line);
-
-							if ('detail' in data) {
-								throw data;
-							}
-
-							if ('id' in data) {
-								console.log(data);
-							} else {
-								if (data.done == false) {
-									if (prompt == '' && data.response == '\n') {
-										continue;
-									} else {
-										prompt += data.response;
-										console.log(data.response);
-										chatInputElement.scrollTop = chatInputElement.scrollHeight;
-										await tick();
-									}
-								}
-							}
-						}
-					}
-				} catch (error) {
-					console.log(error);
-					if ('detail' in error) {
-						toast.error(error.detail);
-					}
-					break;
-				}
-			}
-		} else {
-			if (res !== null) {
-				const error = await res.json();
-				console.log(error);
-				if ('detail' in error) {
-					toast.error(error.detail);
-				} else {
-					toast.error(error.error);
-				}
-			} else {
-				toast.error(
-					$i18n.t('Uh-oh! There was an issue connecting to {{provider}}.', { provider: 'llama' })
-				);
-			}
-		}
-
-		chatInputPlaceholder = '';
-
-		console.log(user);
-	};
-</script>
-
-{#if prompt.charAt(0) === '@'}
-	{#if filteredModels.length > 0}
-		<div class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10">
-			<div class="flex w-full dark:border dark:border-gray-850 rounded-lg">
-				<div class=" bg-gray-50 dark:bg-gray-850 w-10 rounded-l-lg text-center">
-					<div class=" text-lg font-semibold mt-2">@</div>
-				</div>
-
-				<div
-					class="max-h-60 flex flex-col w-full rounded-r-lg bg-white dark:bg-gray-900 dark:text-gray-100"
-				>
-					<div class="m-1 overflow-y-auto p-1 rounded-r-lg space-y-0.5 scrollbar-hidden">
-						{#each filteredModels as model, modelIdx}
-							<button
-								class="px-3 py-1.5 rounded-xl w-full text-left {modelIdx === selectedIdx
-									? 'bg-gray-50 dark:bg-gray-850 selected-command-option-button'
-									: ''}"
-								type="button"
-								on:click={() => {
-									confirmSelect(model);
-								}}
-								on:mousemove={() => {
-									selectedIdx = modelIdx;
-								}}
-								on:focus={() => {}}
-							>
-								<div class="flex font-medium text-black dark:text-gray-100 line-clamp-1">
-									<img
-										src={model?.info?.meta?.profile_image_url ?? '/static/favicon.png'}
-										alt={model?.name ?? model.id}
-										class="rounded-full size-6 items-center mr-2"
-									/>
-									{model.name}
-								</div>
-
-								<!-- <div class=" text-xs text-gray-600 line-clamp-1">
-								{doc.title}
-								</div> -->
-							</button>
-						{/each}
-					</div>
-				</div>
-			</div>
-		</div>
-	{/if}
-{/if}

+ 1 - 0
src/lib/components/chat/Messages/Markdown/KatexRenderer.svelte

@@ -1,6 +1,7 @@
 <script lang="ts">
 	import katex from 'katex';
 	import 'katex/contrib/mhchem';
+	import 'katex/dist/katex.min.css';
 
 	export let content: string;
 	export let displayMode: boolean = false;

+ 18 - 5
src/lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte

@@ -1,12 +1,17 @@
 <script lang="ts">
 	import DOMPurify from 'dompurify';
+	import { toast } from 'svelte-sonner';
+
 	import type { Token } from 'marked';
-	import { revertSanitizedResponseContent, unescapeHtml } from '$lib/utils';
-	import { onMount } from 'svelte';
-	import Image from '$lib/components/common/Image.svelte';
+	import { getContext } from 'svelte';
+
+	const i18n = getContext('i18n');
 
-	import KatexRenderer from './KatexRenderer.svelte';
 	import { WEBUI_BASE_URL } from '$lib/constants';
+	import { copyToClipboard, revertSanitizedResponseContent, unescapeHtml } from '$lib/utils';
+
+	import Image from '$lib/components/common/Image.svelte';
+	import KatexRenderer from './KatexRenderer.svelte';
 
 	export let id: string;
 	export let tokens: Token[];
@@ -37,7 +42,15 @@
 			<svelte:self id={`${id}-em`} tokens={token.tokens} />
 		</em>
 	{:else if token.type === 'codespan'}
-		<code class="codespan">{unescapeHtml(token.text)}</code>
+		<!-- svelte-ignore a11y-click-events-have-key-events -->
+		<!-- svelte-ignore a11y-no-noninteractive-element-interactions -->
+		<code
+			class="codespan cursor-pointer"
+			on:click={() => {
+				copyToClipboard(unescapeHtml(token.text));
+				toast.success($i18n.t('Copied to clipboard'));
+			}}>{unescapeHtml(token.text)}</code
+		>
 	{:else if token.type === 'br'}
 		<br />
 	{:else if token.type === 'del'}

+ 1 - 1
src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte

@@ -142,7 +142,7 @@
 			/>
 		{/if}
 	{:else if token.type === 'space'}
-		{''}
+		<div class="my-0.5" />
 	{:else}
 		{console.log('Unknown token', token)}
 	{/if}

+ 217 - 129
src/lib/components/chat/Messages/ResponseMessage.svelte

@@ -2,11 +2,10 @@
 	import { toast } from 'svelte-sonner';
 	import dayjs from 'dayjs';
 
-	import { fade } from 'svelte/transition';
 	import { createEventDispatcher } from 'svelte';
 	import { onMount, tick, getContext } from 'svelte';
 
-	const i18n = getContext('i18n');
+	const i18n = getContext<Writable<i18nType>>('i18n');
 
 	const dispatch = createEventDispatcher();
 
@@ -15,20 +14,19 @@
 	import { imageGenerations } from '$lib/apis/images';
 	import {
 		approximateToHumanReadable,
-		extractSentences,
-		replaceTokens,
-		processResponseContent
+		extractParagraphsForAudio,
+		extractSentencesForAudio,
+		cleanText,
+		getMessageContentParts
 	} from '$lib/utils';
 	import { WEBUI_BASE_URL } from '$lib/constants';
 
 	import Name from './Name.svelte';
 	import ProfileImage from './ProfileImage.svelte';
 	import Skeleton from './Skeleton.svelte';
-	import CodeBlock from './CodeBlock.svelte';
 	import Image from '$lib/components/common/Image.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import RateComment from './RateComment.svelte';
-	import CitationsModal from '$lib/components/chat/Messages/CitationsModal.svelte';
 	import Spinner from '$lib/components/common/Spinner.svelte';
 	import WebSearchResults from './ResponseMessage/WebSearchResults.svelte';
 	import Sparkles from '$lib/components/icons/Sparkles.svelte';
@@ -36,7 +34,49 @@
 	import Error from './Error.svelte';
 	import Citations from './Citations.svelte';
 
-	export let message;
+	import type { Writable } from 'svelte/store';
+	import type { i18n as i18nType } from 'i18next';
+
+	interface MessageType {
+		id: string;
+		model: string;
+		content: string;
+		files?: { type: string; url: string }[];
+		timestamp: number;
+		role: string;
+		statusHistory?: {
+			done: boolean;
+			action: string;
+			description: string;
+			urls?: string[];
+			query?: string;
+		}[];
+		status?: {
+			done: boolean;
+			action: string;
+			description: string;
+			urls?: string[];
+			query?: string;
+		};
+		done: boolean;
+		error?: boolean | { content: string };
+		citations?: string[];
+		info?: {
+			openai?: boolean;
+			prompt_tokens?: number;
+			completion_tokens?: number;
+			total_tokens?: number;
+			eval_count?: number;
+			eval_duration?: number;
+			prompt_eval_count?: number;
+			prompt_eval_duration?: number;
+			total_duration?: number;
+			load_duration?: number;
+		};
+		annotation?: { type: string; rating: number };
+	}
+
+	export let message: MessageType;
 	export let siblings;
 
 	export let isLastMessage = true;
@@ -60,28 +100,33 @@
 	let editedContent = '';
 	let editTextAreaElement: HTMLTextAreaElement;
 
-	let sentencesAudio = {};
-	let speaking = null;
-	let speakingIdx = null;
+	let audioParts: Record<number, HTMLAudioElement | null> = {};
+	let speaking = false;
+	let speakingIdx: number | undefined;
 
 	let loadingSpeech = false;
 	let generatingImage = false;
 
 	let showRateComment = false;
 
-	const playAudio = (idx) => {
-		return new Promise((res) => {
+	const playAudio = (idx: number) => {
+		return new Promise<void>((res) => {
 			speakingIdx = idx;
-			const audio = sentencesAudio[idx];
+			const audio = audioParts[idx];
+
+			if (!audio) {
+				return res();
+			}
+
 			audio.play();
-			audio.onended = async (e) => {
+			audio.onended = async () => {
 				await new Promise((r) => setTimeout(r, 300));
 
-				if (Object.keys(sentencesAudio).length - 1 === idx) {
-					speaking = null;
+				if (Object.keys(audioParts).length - 1 === idx) {
+					speaking = false;
 				}
 
-				res(e);
+				res();
 			};
 		});
 	};
@@ -91,113 +136,111 @@
 			try {
 				speechSynthesis.cancel();
 
-				sentencesAudio[speakingIdx].pause();
-				sentencesAudio[speakingIdx].currentTime = 0;
+				if (speakingIdx !== undefined && audioParts[speakingIdx]) {
+					audioParts[speakingIdx]!.pause();
+					audioParts[speakingIdx]!.currentTime = 0;
+				}
 			} catch {}
 
-			speaking = null;
-			speakingIdx = null;
+			speaking = false;
+			speakingIdx = undefined;
+			return;
+		}
+
+		if (!(message?.content ?? '').trim().length) {
+			toast.info($i18n.t('No content to speak'));
+			return;
+		}
+
+		speaking = true;
+
+		if ($config.audio.tts.engine !== '') {
+			loadingSpeech = true;
+
+			const messageContentParts: string[] = getMessageContentParts(
+				message.content,
+				$config?.audio?.tts?.split_on ?? 'punctuation'
+			);
+
+			if (!messageContentParts.length) {
+				console.log('No content to speak');
+				toast.info($i18n.t('No content to speak'));
+
+				speaking = false;
+				loadingSpeech = false;
+				return;
+			}
+
+			console.debug('Prepared message content for TTS', messageContentParts);
+
+			audioParts = messageContentParts.reduce(
+				(acc, _sentence, idx) => {
+					acc[idx] = null;
+					return acc;
+				},
+				{} as typeof audioParts
+			);
+
+			let lastPlayedAudioPromise = Promise.resolve(); // Initialize a promise that resolves immediately
+
+			for (const [idx, sentence] of messageContentParts.entries()) {
+				const res = await synthesizeOpenAISpeech(
+					localStorage.token,
+					$settings?.audio?.tts?.defaultVoice === $config.audio.tts.voice
+						? ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice)
+						: $config?.audio?.tts?.voice,
+					sentence
+				).catch((error) => {
+					console.error(error);
+					toast.error(error);
+
+					speaking = false;
+					loadingSpeech = false;
+				});
+
+				if (res) {
+					const blob = await res.blob();
+					const blobUrl = URL.createObjectURL(blob);
+					const audio = new Audio(blobUrl);
+					audioParts[idx] = audio;
+					loadingSpeech = false;
+					lastPlayedAudioPromise = lastPlayedAudioPromise.then(() => playAudio(idx));
+				}
+			}
 		} else {
-			if ((message?.content ?? '').trim() !== '') {
-				speaking = true;
-
-				if ($config.audio.tts.engine !== '') {
-					loadingSpeech = true;
-
-					const sentences = extractSentences(message.content).reduce((mergedTexts, currentText) => {
-						const lastIndex = mergedTexts.length - 1;
-						if (lastIndex >= 0) {
-							const previousText = mergedTexts[lastIndex];
-							const wordCount = previousText.split(/\s+/).length;
-							if (wordCount < 2) {
-								mergedTexts[lastIndex] = previousText + ' ' + currentText;
-							} else {
-								mergedTexts.push(currentText);
-							}
-						} else {
-							mergedTexts.push(currentText);
-						}
-						return mergedTexts;
-					}, []);
-
-					console.log(sentences);
-
-					if (sentences.length > 0) {
-						sentencesAudio = sentences.reduce((a, e, i, arr) => {
-							a[i] = null;
-							return a;
-						}, {});
-
-						let lastPlayedAudioPromise = Promise.resolve(); // Initialize a promise that resolves immediately
-
-						for (const [idx, sentence] of sentences.entries()) {
-							const res = await synthesizeOpenAISpeech(
-								localStorage.token,
-								$settings?.audio?.tts?.defaultVoice === $config.audio.tts.voice
-									? ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice)
-									: $config?.audio?.tts?.voice,
-								sentence
-							).catch((error) => {
-								toast.error(error);
-
-								speaking = null;
-								loadingSpeech = false;
-
-								return null;
-							});
-
-							if (res) {
-								const blob = await res.blob();
-								const blobUrl = URL.createObjectURL(blob);
-								const audio = new Audio(blobUrl);
-								sentencesAudio[idx] = audio;
-								loadingSpeech = false;
-								lastPlayedAudioPromise = lastPlayedAudioPromise.then(() => playAudio(idx));
-							}
+			let voices = [];
+			const getVoicesLoop = setInterval(() => {
+				voices = speechSynthesis.getVoices();
+				if (voices.length > 0) {
+					clearInterval(getVoicesLoop);
+
+					const voice =
+						voices
+							?.filter(
+								(v) => v.voiceURI === ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice)
+							)
+							?.at(0) ?? undefined;
+
+					console.log(voice);
+
+					const speak = new SpeechSynthesisUtterance(message.content);
+
+					console.log(speak);
+
+					speak.onend = () => {
+						speaking = false;
+						if ($settings.conversationMode) {
+							document.getElementById('voice-input-button')?.click();
 						}
-					} else {
-						speaking = null;
-						loadingSpeech = false;
+					};
+
+					if (voice) {
+						speak.voice = voice;
 					}
-				} else {
-					let voices = [];
-					const getVoicesLoop = setInterval(async () => {
-						voices = await speechSynthesis.getVoices();
-						if (voices.length > 0) {
-							clearInterval(getVoicesLoop);
-
-							const voice =
-								voices
-									?.filter(
-										(v) =>
-											v.voiceURI === ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice)
-									)
-									?.at(0) ?? undefined;
-
-							console.log(voice);
-
-							const speak = new SpeechSynthesisUtterance(message.content);
-
-							console.log(speak);
-
-							speak.onend = () => {
-								speaking = null;
-								if ($settings.conversationMode) {
-									document.getElementById('voice-input-button')?.click();
-								}
-							};
-
-							if (voice) {
-								speak.voice = voice;
-							}
-
-							speechSynthesis.speak(speak);
-						}
-					}, 100);
+
+					speechSynthesis.speak(speak);
 				}
-			} else {
-				toast.error($i18n.t('No content to speak'));
-			}
+			}, 100);
 		}
 	};
 
@@ -230,7 +273,7 @@
 		await tick();
 	};
 
-	const generateImage = async (message) => {
+	const generateImage = async (message: MessageType) => {
 		generatingImage = true;
 		const res = await imageGenerations(localStorage.token, message.content).catch((error) => {
 			toast.error(error);
@@ -285,7 +328,7 @@
 			</Name>
 
 			<div>
-				{#if (message?.files ?? []).filter((f) => f.type === 'image').length > 0}
+				{#if message?.files && message.files?.filter((f) => f.type === 'image').length > 0}
 					<div class="my-2.5 w-full flex overflow-x-auto gap-2 flex-wrap">
 						{#each message.files as file}
 							<div>
@@ -303,8 +346,8 @@
 							{@const status = (
 								message?.statusHistory ?? [...(message?.status ? [message?.status] : [])]
 							).at(-1)}
-							<div class="flex items-center gap-2 pt-0.5 pb-1">
-								{#if status.done === false}
+							<div class="status-description flex items-center gap-2 pt-0.5 pb-1">
+								{#if status?.done === false}
 									<div class="">
 										<Spinner className="size-4" />
 									</div>
@@ -313,14 +356,16 @@
 								{#if status?.action === 'web_search' && status?.urls}
 									<WebSearchResults {status}>
 										<div class="flex flex-col justify-center -space-y-0.5">
-											<div class="text-base line-clamp-1 text-wrap">
+											<div class="shimmer text-base line-clamp-1 text-wrap">
 												{status?.description}
 											</div>
 										</div>
 									</WebSearchResults>
 								{:else}
 									<div class="flex flex-col justify-center -space-y-0.5">
-										<div class=" text-gray-500 dark:text-gray-500 text-base line-clamp-1 text-wrap">
+										<div
+											class="shimmer text-gray-500 dark:text-gray-500 text-base line-clamp-1 text-wrap"
+										>
 											{status?.description}
 										</div>
 									</div>
@@ -521,7 +566,7 @@
 											: 'invisible group-hover:visible'} p-1.5 hover:bg-black/5 dark:hover:bg-white/5 rounded-lg dark:hover:text-white hover:text-black transition"
 										on:click={() => {
 											if (!loadingSpeech) {
-												toggleSpeakMessage(message);
+												toggleSpeakMessage();
 											}
 										}}
 									>
@@ -661,7 +706,7 @@
 													`${
 														Math.round(
 															((message.info.eval_count ?? 0) /
-																(message.info.eval_duration / 1000000000)) *
+																((message.info.eval_duration ?? 0) / 1000000000)) *
 																100
 														) / 100
 													} tokens` ?? 'N/A'
@@ -669,7 +714,7 @@
 					prompt_token/s: ${
 						Math.round(
 							((message.info.prompt_eval_count ?? 0) /
-								(message.info.prompt_eval_duration / 1000000000)) *
+								((message.info.prompt_eval_duration ?? 0) / 1000000000)) *
 								100
 						) / 100 ?? 'N/A'
 					} tokens<br/>
@@ -688,7 +733,7 @@
 		            eval_duration: ${
 									Math.round(((message.info.eval_duration ?? 0) / 1000000) * 100) / 100 ?? 'N/A'
 								}ms<br/>
-		            approximate_total: ${approximateToHumanReadable(message.info.total_duration)}`}
+		            approximate_total: ${approximateToHumanReadable(message.info.total_duration ?? 0)}`}
 										placement="top"
 									>
 										<Tooltip content={$i18n.t('Generation Info')} placement="bottom">
@@ -984,4 +1029,47 @@
 		-ms-overflow-style: none; /* IE and Edge */
 		scrollbar-width: none; /* Firefox */
 	}
+	@keyframes shimmer {
+		0% {
+			background-position: 200% 0;
+		}
+		100% {
+			background-position: -200% 0;
+		}
+	}
+
+	.shimmer {
+		background: linear-gradient(90deg, #9a9b9e 25%, #2a2929 50%, #9a9b9e 75%);
+		background-size: 200% 100%;
+		background-clip: text;
+		-webkit-background-clip: text;
+		-webkit-text-fill-color: transparent;
+		animation: shimmer 4s linear infinite;
+		color: #818286; /* Fallback color */
+	}
+
+	:global(.dark) .shimmer {
+		background: linear-gradient(90deg, #818286 25%, #eae5e5 50%, #818286 75%);
+		background-size: 200% 100%;
+		background-clip: text;
+		-webkit-background-clip: text;
+		-webkit-text-fill-color: transparent;
+		animation: shimmer 4s linear infinite;
+		color: #a1a3a7; /* Darker fallback color for dark mode */
+	}
+
+	@keyframes smoothFadeIn {
+		0% {
+			opacity: 0;
+			transform: translateY(-10px);
+		}
+		100% {
+			opacity: 1;
+			transform: translateY(0);
+		}
+	}
+
+	.status-description {
+		animation: smoothFadeIn 0.2s forwards;
+	}
 </style>

+ 1 - 0
src/lib/components/workspace/Documents.svelte

@@ -24,6 +24,7 @@
 	let importFiles = '';
 
 	let inputFiles = '';
+
 	let query = '';
 	let documentsImportInputElement: HTMLInputElement;
 	let tags = [];

+ 11 - 2
src/lib/i18n/locales/ar-BH/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(الأخير)",
 	"{{ models }}": "{{ نماذج }}",
 	"{{ owner }}: You cannot delete a base model": "{{ المالك }}: لا يمكنك حذف نموذج أساسي",
-	"{{modelName}} is thinking...": "{{modelName}} ...يفكر",
 	"{{user}}'s Chats": "دردشات {{user}}",
 	"{{webUIName}} Backend Required": "{{webUIName}} مطلوب",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "أرفق ملف",
 	"Attention to detail": "انتبه للتفاصيل",
 	"Audio": "صوتي",
-	"Audio settings updated successfully": "",
 	"August": "أغسطس",
 	"Auto-playback response": "استجابة التشغيل التلقائي",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "طول السياق",
 	"Continue Response": "متابعة الرد",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "تم نسخ عنوان URL للدردشة المشتركة إلى الحافظة",
+	"Copied to clipboard": "",
 	"Copy": "نسخ",
 	"Copy Code": "",
 	"Copy last code block": "انسخ كتلة التعليمات البرمجية الأخيرة",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "وضع الملف",
 	"File not found.": "لم يتم العثور على الملف.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "آخر نشاط",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "فاتح",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "يمكن أن تصدر بعض الأخطاء. لذلك يجب التحقق من المعلومات المهمة",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "إدارة خطوط الأنابيب",
 	"March": "مارس",
 	"Max Tokens (num_predict)": "ماكس توكنز (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "يمكن تنزيل 3 نماذج كحد أقصى في وقت واحد. الرجاء معاودة المحاولة في وقت لاحق.",
 	"May": "مايو",
 	"Memories accessible by LLMs will be shown here.": "سيتم عرض الذكريات التي يمكن الوصول إليها بواسطة LLMs هنا.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "إعادة تعيين تخزين المتجهات",
 	"Response AutoCopy to Clipboard": "النسخ التلقائي للاستجابة إلى الحافظة",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "منصب",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -539,6 +544,7 @@
 	"Searched {{count}} sites_many": "تم البحث في {{count}} sites_many",
 	"Searched {{count}} sites_other": "تم البحث في {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "عنوان URL لاستعلام Searxng",
 	"See readme.md for instructions": "readme.md للحصول على التعليمات",
 	"See what's new": "ما الجديد",
@@ -615,6 +621,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "شكرا لملاحظاتك!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "يجب أن تكون النتيجة قيمة تتراوح بين 0.0 (0%) و1.0 (100%).",
 	"Theme": "الثيم",
 	"Thinking...": "",
@@ -714,6 +722,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "اكتب ملخصًا في 50 كلمة يلخص [الموضوع أو الكلمة الرئيسية]",
 	"Yesterday": "أمس",
 	"You": "انت",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "لا يمكنك استنساخ نموذج أساسي",
 	"You have no archived conversations.": "لا تملك محادثات محفوظه",

+ 11 - 2
src/lib/i18n/locales/bg-BG/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(последна)",
 	"{{ models }}": "{{ модели }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Не можете да изтриете базов модел",
-	"{{modelName}} is thinking...": "{{modelName}} мисли ...",
 	"{{user}}'s Chats": "{{user}}'s чатове",
 	"{{webUIName}} Backend Required": "{{webUIName}} Изисква се Бекенд",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Прикачване на файл",
 	"Attention to detail": "Внимание към детайлите",
 	"Audio": "Аудио",
-	"Audio settings updated successfully": "",
 	"August": "Август",
 	"Auto-playback response": "Аувтоматично възпроизвеждане на Отговора",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Дължина на Контекста",
 	"Continue Response": "Продължи отговора",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Копирана е връзката за чат!",
+	"Copied to clipboard": "",
 	"Copy": "Копирай",
 	"Copy Code": "",
 	"Copy last code block": "Копиране на последен код блок",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Файл Мод",
 	"File not found.": "Файл не е намерен.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Последни активни",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Светъл",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "LLMs могат да правят грешки. Проверете важните данни.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Управление на тръбопроводи",
 	"March": "Март",
 	"Max Tokens (num_predict)": "Макс токени (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Максимум 3 модели могат да бъдат сваляни едновременно. Моля, опитайте отново по-късно.",
 	"May": "Май",
 	"Memories accessible by LLMs will be shown here.": "Мемории достъпни от LLMs ще бъдат показани тук.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Ресет Vector Storage",
 	"Response AutoCopy to Clipboard": "Аувтоматично копиране на отговор в клипборда",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Роля",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Търси се в {{count}} sites_one",
 	"Searched {{count}} sites_other": "Търси се в {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL адрес на заявка на Searxng",
 	"See readme.md for instructions": "Виж readme.md за инструкции",
 	"See what's new": "Виж какво е новото",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Благодарим ви за вашия отзив!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "The score should be a value between 0.0 (0%) and 1.0 (100%).",
 	"Theme": "Тема",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Напиши описание в 50 знака, което описва [тема или ключова дума].",
 	"Yesterday": "вчера",
 	"You": "вие",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "Не можете да клонирате базов модел",
 	"You have no archived conversations.": "Нямате архивирани разговори.",

+ 11 - 2
src/lib/i18n/locales/bn-BD/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(সর্বশেষ)",
 	"{{ models }}": "{{ মডেল}}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner}}: আপনি একটি বেস মডেল মুছতে পারবেন না",
-	"{{modelName}} is thinking...": "{{modelName}} চিন্তা করছে...",
 	"{{user}}'s Chats": "{{user}}র চ্যাটস",
 	"{{webUIName}} Backend Required": "{{webUIName}} ব্যাকএন্ড আবশ্যক",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "ফাইল যুক্ত করুন",
 	"Attention to detail": "বিস্তারিত বিশেষতা",
 	"Audio": "অডিও",
-	"Audio settings updated successfully": "",
 	"August": "আগস্ট",
 	"Auto-playback response": "রেসপন্স অটো-প্লেব্যাক",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "কনটেক্সটের দৈর্ঘ্য",
 	"Continue Response": "যাচাই করুন",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "শেয়ারকৃত কথা-ব্যবহারের URL ক্লিপবোর্ডে কপি করা হয়েছে!",
+	"Copied to clipboard": "",
 	"Copy": "অনুলিপি",
 	"Copy Code": "",
 	"Copy last code block": "সর্বশেষ কোড ব্লক কপি করুন",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "ফাইল মোড",
 	"File not found.": "ফাইল পাওয়া যায়নি",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "সর্বশেষ সক্রিয়",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "লাইট",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "LLM ভুল করতে পারে। গুরুত্বপূর্ণ তথ্য যাচাই করে নিন।",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "পাইপলাইন পরিচালনা করুন",
 	"March": "মার্চ",
 	"Max Tokens (num_predict)": "সর্বোচ্চ টোকেন (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "একসঙ্গে সর্বোচ্চ তিনটি মডেল ডাউনলোড করা যায়। দয়া করে পরে আবার চেষ্টা করুন।",
 	"May": "মে",
 	"Memories accessible by LLMs will be shown here.": "LLMs দ্বারা অ্যাক্সেসযোগ্য মেমোরিগুলি এখানে দেখানো হবে।",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "ভেক্টর স্টোরেজ রিসেট করুন",
 	"Response AutoCopy to Clipboard": "রেসপন্সগুলো স্বয়ংক্রিভাবে ক্লিপবোর্ডে কপি হবে",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "পদবি",
 	"Rosé Pine": "রোজ পাইন",
 	"Rosé Pine Dawn": "ভোরের রোজ পাইন",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "{{কাউন্ট}} অনুসন্ধান করা হয়েছে sites_one",
 	"Searched {{count}} sites_other": "{{কাউন্ট}} অনুসন্ধান করা হয়েছে sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng ক্যোয়ারী URL",
 	"See readme.md for instructions": "নির্দেশিকার জন্য readme.md দেখুন",
 	"See what's new": "নতুন কী আছে দেখুন",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "আপনার মতামত ধন্যবাদ!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "স্কোর একটি 0.0 (0%) এবং 1.0 (100%) এর মধ্যে একটি মান হওয়া উচিত।",
 	"Theme": "থিম",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "৫০ শব্দের মধ্যে [topic or keyword] এর একটি সারসংক্ষেপ লিখুন।",
 	"Yesterday": "আগামী",
 	"You": "আপনি",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "আপনি একটি বেস মডেল ক্লোন করতে পারবেন না",
 	"You have no archived conversations.": "আপনার কোনও আর্কাইভ করা কথোপকথন নেই।",

+ 27 - 18
src/lib/i18n/locales/ca-ES/translation.json

@@ -6,10 +6,9 @@
 	"(latest)": "(últim)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: No es pot eliminar un model base",
-	"{{modelName}} is thinking...": "{{modelName}} està pensant...",
 	"{{user}}'s Chats": "Els xats de {{user}}",
 	"{{webUIName}} Backend Required": "El Backend de {{webUIName}} és necessari",
-	"*Prompt node ID(s) are required for image generation": "",
+	"*Prompt node ID(s) are required for image generation": "*Els identificadors de nodes d'indicacions són necessaris per a la generació d'imatges",
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Un model de tasca s'utilitza quan es realitzen tasques com ara generar títols per a xats i consultes de cerca per a la web",
 	"a user": "un usuari",
 	"About": "Sobre",
@@ -45,9 +44,9 @@
 	"All Users": "Tots els usuaris",
 	"Allow": "Permetre",
 	"Allow Chat Deletion": "Permetre la supressió del xat",
-	"Allow Chat Editing": "",
+	"Allow Chat Editing": "Permetre l'edició del xat",
 	"Allow non-local voices": "Permetre veus no locals",
-	"Allow Temporary Chat": "",
+	"Allow Temporary Chat": "Permetre el xat temporal",
 	"Allow User Location": "Permetre la ubicació de l'usuari",
 	"Allow Voice Interruption in Call": "Permetre la interrupció de la veu en una trucada",
 	"alphanumeric characters and hyphens": "caràcters alfanumèrics i guions",
@@ -68,10 +67,9 @@
 	"Attach file": "Adjuntar arxiu",
 	"Attention to detail": "Atenció al detall",
 	"Audio": "Àudio",
-	"Audio settings updated successfully": "Les preferències d'àudio s'han actualitzat correctament",
 	"August": "Agost",
 	"Auto-playback response": "Reproduir la resposta automàticament",
-	"Automatic1111": "",
+	"Automatic1111": "Automatic1111",
 	"AUTOMATIC1111 Api Auth String": "Cadena d'autenticació de l'API d'AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL": "URL Base d'AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "Es requereix l'URL Base d'AUTOMATIC1111.",
@@ -113,7 +111,7 @@
 	"Click here to select a csv file.": "Clica aquí per seleccionar un fitxer csv.",
 	"Click here to select a py file.": "Clica aquí per seleccionar un fitxer py.",
 	"Click here to select documents.": "Clica aquí per seleccionar documents.",
-	"Click here to upload a workflow.json file.": "",
+	"Click here to upload a workflow.json file.": "Clica aquí per pujar un arxiu workflow.json",
 	"click here.": "clica aquí.",
 	"Click on the user role button to change a user's role.": "Clica sobre el botó de rol d'usuari per canviar el rol d'un usuari.",
 	"Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Permís d'escriptura al porta-retalls denegat. Comprova els ajustos de navegador per donar l'accés necessari.",
@@ -124,8 +122,8 @@
 	"ComfyUI": "ComfyUI",
 	"ComfyUI Base URL": "URL base de ComfyUI",
 	"ComfyUI Base URL is required.": "L'URL base de ComfyUI és obligatòria.",
-	"ComfyUI Workflow": "",
-	"ComfyUI Workflow Nodes": "",
+	"ComfyUI Workflow": "Flux de treball de ComfyUI",
+	"ComfyUI Workflow Nodes": "Nodes del flux de treball de ComfyUI",
 	"Command": "Comanda",
 	"Concurrent Requests": "Peticions simultànies",
 	"Confirm": "Confirmar",
@@ -138,9 +136,11 @@
 	"Context Length": "Mida del context",
 	"Continue Response": "Continuar la resposta",
 	"Continue with {{provider}}": "Continuar amb {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "Controls",
 	"Copied": "Copiat",
 	"Copied shared chat URL to clipboard!": "S'ha copiat l'URL compartida al porta-retalls!",
+	"Copied to clipboard": "",
 	"Copy": "Copiar",
 	"Copy Code": "Copiar el codi",
 	"Copy last code block": "Copiar l'últim bloc de codi",
@@ -164,7 +164,7 @@
 	"Database": "Base de dades",
 	"December": "Desembre",
 	"Default": "Per defecte",
-	"Default (Open AI)": "",
+	"Default (Open AI)": "Per defecte (Open AI)",
 	"Default (SentenceTransformers)": "Per defecte (SentenceTransformers)",
 	"Default Model": "Model per defecte",
 	"Default model updated": "Model per defecte actualitzat",
@@ -227,7 +227,7 @@
 	"Embedding Model Engine": "Motor de model d'incrustació",
 	"Embedding model set to \"{{embedding_model}}\"": "Model d'incrustació configurat a \"{{embedding_model}}\"",
 	"Enable Community Sharing": "Activar l'ús compartit amb la comunitat",
-	"Enable Message Rating": "",
+	"Enable Message Rating": "Permetre la qualificació de missatges",
 	"Enable New Sign Ups": "Permetre nous registres",
 	"Enable Web Search": "Activar la cerca web",
 	"Enabled": "Habilitat",
@@ -244,7 +244,7 @@
 	"Enter Google PSE Engine Id": "Introdueix l'identificador del motor PSE de Google",
 	"Enter Image Size (e.g. 512x512)": "Introdueix la mida de la imatge (p. ex. 512x512)",
 	"Enter language codes": "Introdueix els codis de llenguatge",
-	"Enter Model ID": "",
+	"Enter Model ID": "Introdueix l'identificador del model",
 	"Enter model tag (e.g. {{modelTag}})": "Introdueix l'etiqueta del model (p. ex. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Introdueix el nombre de passos (p. ex. 50)",
 	"Enter Score": "Introdueix la puntuació",
@@ -285,6 +285,7 @@
 	"File": "Arxiu",
 	"File Mode": "Mode d'arxiu",
 	"File not found.": "No s'ha trobat l'arxiu.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Arxius",
 	"Filter is now globally disabled": "El filtre ha estat desactivat globalment",
 	"Filter is now globally enabled": "El filtre ha estat activat globalment",
@@ -319,7 +320,7 @@
 	"Google PSE API Key": "Clau API PSE de Google",
 	"Google PSE Engine Id": "Identificador del motor PSE de Google",
 	"h:mm a": "h:mm a",
-	"Haptic Feedback": "",
+	"Haptic Feedback": "Retorn hàptic",
 	"has no conversations.": "no té converses.",
 	"Hello, {{name}}": "Hola, {{name}}",
 	"Help": "Ajuda",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "models de llenguatge extensos, localment",
 	"Last Active": "Activitat recent",
 	"Last Modified": "Modificació",
+	"Leave empty for unlimited": "",
 	"Light": "Clar",
 	"Listening...": "Escoltant...",
 	"LLMs can make mistakes. Verify important information.": "Els models de llenguatge poden cometre errors. Verifica la informació important.",
@@ -368,13 +370,15 @@
 	"LTR": "LTR",
 	"Made by OpenWebUI Community": "Creat per la Comunitat OpenWebUI",
 	"Make sure to enclose them with": "Assegura't d'envoltar-los amb",
-	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
+	"Make sure to export a workflow.json file as API format from ComfyUI.": "Assegura't d'exportar un fitxer workflow.json com a format API des de ComfyUI.",
 	"Manage": "Gestionar",
 	"Manage Models": "Gestionar els models",
 	"Manage Ollama Models": "Gestionar els models Ollama",
 	"Manage Pipelines": "Gestionar les Pipelines",
 	"March": "Març",
 	"Max Tokens (num_predict)": "Nombre màxim de Tokens (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Es poden descarregar un màxim de 3 models simultàniament. Si us plau, prova-ho més tard.",
 	"May": "Maig",
 	"Memories accessible by LLMs will be shown here.": "Les memòries accessibles pels models de llenguatge es mostraran aquí.",
@@ -383,7 +387,7 @@
 	"Memory cleared successfully": "Memòria eliminada correctament",
 	"Memory deleted successfully": "Memòria eliminada correctament",
 	"Memory updated successfully": "Memòria actualitzada correctament",
-	"Merge Responses": "",
+	"Merge Responses": "Fusionar les respostes",
 	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Els missatges enviats després de crear el teu enllaç no es compartiran. Els usuaris amb l'URL podran veure el xat compartit.",
 	"Min P": "Min P",
 	"Minimum Score": "Puntuació mínima",
@@ -426,7 +430,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Nota: Si s'estableix una puntuació mínima, la cerca només retornarà documents amb una puntuació major o igual a la puntuació mínima.",
 	"Notifications": "Notificacions",
 	"November": "Novembre",
-	"num_gpu (Ollama)": "",
+	"num_gpu (Ollama)": "num_gpu (Ollama)",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "ID OAuth",
 	"October": "Octubre",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Restableix l'emmagatzematge de vectors",
 	"Response AutoCopy to Clipboard": "Copiar la resposta automàticament al porta-retalls",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de resposta no es poden activar perquè els permisos del lloc web han estat rebutjats. Comprova les preferències del navegador per donar l'accés necessari.",
+	"Response splitting": "",
 	"Role": "Rol",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Albada Rosé Pine",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_many": "S'han cercat {{count}} pàgines",
 	"Searched {{count}} sites_other": "S'han cercat {{count}} pàgines",
 	"Searching \"{{searchQuery}}\"": "Cercant \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL de consulta de Searxng",
 	"See readme.md for instructions": "Consulta l'arxiu readme.md per obtenir instruccions",
 	"See what's new": "Veure què hi ha de nou",
@@ -549,7 +555,7 @@
 	"Select a tool": "Seleccionar una eina",
 	"Select an Ollama instance": "Seleccionar una instància d'Ollama",
 	"Select Documents": "Seleccionar documents",
-	"Select Engine": "",
+	"Select Engine": "Seleccionar el motor",
 	"Select model": "Seleccionar un model",
 	"Select only one model to call": "Seleccionar només un model per trucar",
 	"Selected model(s) do not support image inputs": "El(s) model(s) seleccionats no admeten l'entrada d'imatges",
@@ -606,12 +612,14 @@
 	"Tell us more:": "Dona'ns més informació:",
 	"Temperature": "Temperatura",
 	"Template": "Plantilla",
-	"Temporary Chat": "",
+	"Temporary Chat": "Xat temporal",
 	"Text Completion": "Completament de text",
 	"Text-to-Speech Engine": "Motor de text a veu",
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Gràcies pel teu comentari!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Els desenvolupadors d'aquest complement són voluntaris apassionats de la comunitat. Si trobeu útil aquest complement, considereu contribuir al seu desenvolupament.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "El valor de puntuació hauria de ser entre 0.0 (0%) i 1.0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "Pensant...",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Escriu un resum en 50 paraules que resumeixi [tema o paraula clau].",
 	"Yesterday": "Ahir",
 	"You": "Tu",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Pots personalitzar les teves interaccions amb els models de llenguatge afegint memòries mitjançant el botó 'Gestiona' que hi ha a continuació, fent-les més útils i adaptades a tu.",
 	"You cannot clone a base model": "No es pot clonar un model base",
 	"You have no archived conversations.": "No tens converses arxivades.",

+ 11 - 2
src/lib/i18n/locales/ceb-PH/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "",
 	"{{ models }}": "",
 	"{{ owner }}: You cannot delete a base model": "",
-	"{{modelName}} is thinking...": "{{modelName}} hunahunaa...",
 	"{{user}}'s Chats": "",
 	"{{webUIName}} Backend Required": "Backend {{webUIName}} gikinahanglan",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Ilakip ang usa ka file",
 	"Attention to detail": "Pagtagad sa mga detalye",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "",
 	"August": "",
 	"Auto-playback response": "Autoplay nga tubag",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Ang gitas-on sa konteksto",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
+	"Copied to clipboard": "",
 	"Copy": "",
 	"Copy Code": "",
 	"Copy last code block": "Kopyaha ang katapusang bloke sa code",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "File mode",
 	"File not found.": "Wala makit-an ang file.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Kahayag",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "Ang mga LLM mahimong masayop. ",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "",
 	"March": "",
 	"Max Tokens (num_predict)": "",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Ang labing taas nga 3 nga mga disenyo mahimong ma-download nga dungan. ",
 	"May": "",
 	"Memories accessible by LLMs will be shown here.": "",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "I-reset ang pagtipig sa vector",
 	"Response AutoCopy to Clipboard": "Awtomatikong kopya sa tubag sa clipboard",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Papel",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Aube Pine Rosé",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "",
 	"Searched {{count}} sites_other": "",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "",
 	"See readme.md for instructions": "Tan-awa ang readme.md alang sa mga panudlo",
 	"See what's new": "Tan-awa unsay bag-o",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
 	"Theme": "Tema",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Pagsulat og 50 ka pulong nga summary nga nagsumaryo [topic o keyword].",
 	"Yesterday": "",
 	"You": "",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "",
 	"You have no archived conversations.": "",

+ 11 - 2
src/lib/i18n/locales/de-DE/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(neueste)",
 	"{{ models }}": "{{ Modelle }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Sie können ein Basismodell nicht löschen",
-	"{{modelName}} is thinking...": "{{modelName}} denkt nach...",
 	"{{user}}'s Chats": "{{user}}s Unterhaltungen",
 	"{{webUIName}} Backend Required": "{{webUIName}}-Backend erforderlich",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Datei anhängen",
 	"Attention to detail": "Aufmerksamkeit für Details",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "Audioeinstellungen erfolgreich aktualisiert",
 	"August": "August",
 	"Auto-playback response": "Antwort automatisch abspielen",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Kontextlänge",
 	"Continue Response": "Antwort fortsetzen",
 	"Continue with {{provider}}": "Mit {{provider}} fortfahren",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Freigabelink in die Zwischenablage kopiert!",
+	"Copied to clipboard": "",
 	"Copy": "Kopieren",
 	"Copy Code": "",
 	"Copy last code block": "Letzten Codeblock kopieren",
@@ -285,6 +285,7 @@
 	"File": "Datei",
 	"File Mode": "Datei-Modus",
 	"File not found.": "Datei nicht gefunden.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "Filter ist jetzt global deaktiviert",
 	"Filter is now globally enabled": "Filter ist jetzt global aktiviert",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Zuletzt aktiv",
 	"Last Modified": "Zuletzt bearbeitet",
+	"Leave empty for unlimited": "",
 	"Light": "Hell",
 	"Listening...": "Höre zu...",
 	"LLMs can make mistakes. Verify important information.": "LLMs können Fehler machen. Überprüfe wichtige Informationen.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Pipelines verwalten",
 	"March": "März",
 	"Max Tokens (num_predict)": "Maximale Tokenanzahl (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Es können maximal 3 Modelle gleichzeitig heruntergeladen werden. Bitte versuchen Sie es später erneut.",
 	"May": "Mai",
 	"Memories accessible by LLMs will be shown here.": "Erinnerungen, die für Modelle zugänglich sind, werden hier angezeigt.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Vektorspeicher zurücksetzen",
 	"Response AutoCopy to Clipboard": "Antwort automatisch in die Zwischenablage kopieren",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Benachrichtigungen können nicht aktiviert werden, da die Website-Berechtigungen abgelehnt wurden. Bitte besuchen Sie Ihre Browser-Einstellungen, um den erforderlichen Zugriff zu gewähren.",
+	"Response splitting": "",
 	"Role": "Rolle",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "{{count}} Seite durchsucht",
 	"Searched {{count}} sites_other": "{{count}} Seiten durchsucht",
 	"Searching \"{{searchQuery}}\"": "Suche nach \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng-Abfrage-URL",
 	"See readme.md for instructions": "Anleitung in readme.md anzeigen",
 	"See what's new": "Entdecken Sie die Neuigkeiten",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Danke für Ihr Feedback!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Die Punktzahl sollte ein Wert zwischen 0,0 (0 %) und 1,0 (100 %) sein.",
 	"Theme": "Design",
 	"Thinking...": "Denke nach...",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Schreibe eine kurze Zusammenfassung in 50 Wörtern, die [Thema oder Schlüsselwort] zusammenfasst.",
 	"Yesterday": "Gestern",
 	"You": "Sie",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Personalisieren Sie Interaktionen mit LLMs, indem Sie über die Schaltfläche \"Verwalten\" Erinnerungen hinzufügen.",
 	"You cannot clone a base model": "Sie können Basismodelle nicht klonen",
 	"You have no archived conversations.": "Du hast keine archivierten Unterhaltungen.",

+ 11 - 2
src/lib/i18n/locales/dg-DG/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(much latest)",
 	"{{ models }}": "",
 	"{{ owner }}: You cannot delete a base model": "",
-	"{{modelName}} is thinking...": "{{modelName}} is thinkin'...",
 	"{{user}}'s Chats": "",
 	"{{webUIName}} Backend Required": "{{webUIName}} Backend Much Required",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Attach file",
 	"Attention to detail": "",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "",
 	"August": "",
 	"Auto-playback response": "Auto-playback response",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Context Length",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
+	"Copied to clipboard": "",
 	"Copy": "",
 	"Copy Code": "",
 	"Copy last code block": "Copy last code block",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Bark Mode",
 	"File not found.": "Bark not found.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Light",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "LLMs can make borks. Verify important info.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "",
 	"March": "",
 	"Max Tokens (num_predict)": "",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maximum of 3 models can be downloaded simultaneously. Please try again later.",
 	"May": "",
 	"Memories accessible by LLMs will be shown here.": "",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Reset Vector Storage",
 	"Response AutoCopy to Clipboard": "Copy Bark Auto Bark",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Role",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -537,6 +542,7 @@
 	"Searched {{count}} sites_many": "",
 	"Searched {{count}} sites_other": "",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "",
 	"See readme.md for instructions": "See readme.md for instructions wow",
 	"See what's new": "See what's new so amaze",
@@ -613,6 +619,8 @@
 	"Tfs Z": "Tfs Z much Z",
 	"Thanks for your feedback!": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
 	"Theme": "Theme much theme",
 	"Thinking...": "",
@@ -712,6 +720,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Write a summary in 50 words that summarizes [topic or keyword]. Much summarize.",
 	"Yesterday": "",
 	"You": "",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "",
 	"You have no archived conversations.": "",

+ 11 - 2
src/lib/i18n/locales/en-GB/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "",
 	"{{ models }}": "",
 	"{{ owner }}: You cannot delete a base model": "",
-	"{{modelName}} is thinking...": "",
 	"{{user}}'s Chats": "",
 	"{{webUIName}} Backend Required": "",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "",
 	"Attention to detail": "",
 	"Audio": "",
-	"Audio settings updated successfully": "",
 	"August": "",
 	"Auto-playback response": "",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
+	"Copied to clipboard": "",
 	"Copy": "",
 	"Copy Code": "",
 	"Copy last code block": "",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "",
 	"File not found.": "",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "",
 	"March": "",
 	"Max Tokens (num_predict)": "",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "",
 	"May": "",
 	"Memories accessible by LLMs will be shown here.": "",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "",
 	"Response AutoCopy to Clipboard": "",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "",
 	"Rosé Pine": "",
 	"Rosé Pine Dawn": "",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "",
 	"Searched {{count}} sites_other": "",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "",
 	"See readme.md for instructions": "",
 	"See what's new": "",
@@ -611,6 +617,8 @@
 	"Tfs Z": "",
 	"Thanks for your feedback!": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
 	"Theme": "",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "",
 	"Yesterday": "",
 	"You": "",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "",
 	"You have no archived conversations.": "",

+ 11 - 2
src/lib/i18n/locales/en-US/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "",
 	"{{ models }}": "",
 	"{{ owner }}: You cannot delete a base model": "",
-	"{{modelName}} is thinking...": "",
 	"{{user}}'s Chats": "",
 	"{{webUIName}} Backend Required": "",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "",
 	"Attention to detail": "",
 	"Audio": "",
-	"Audio settings updated successfully": "",
 	"August": "",
 	"Auto-playback response": "",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
+	"Copied to clipboard": "",
 	"Copy": "",
 	"Copy Code": "",
 	"Copy last code block": "",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "",
 	"File not found.": "",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "",
 	"March": "",
 	"Max Tokens (num_predict)": "",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "",
 	"May": "",
 	"Memories accessible by LLMs will be shown here.": "",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "",
 	"Response AutoCopy to Clipboard": "",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "",
 	"Rosé Pine": "",
 	"Rosé Pine Dawn": "",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "",
 	"Searched {{count}} sites_other": "",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "",
 	"See readme.md for instructions": "",
 	"See what's new": "",
@@ -611,6 +617,8 @@
 	"Tfs Z": "",
 	"Thanks for your feedback!": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
 	"Theme": "",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "",
 	"Yesterday": "",
 	"You": "",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "",
 	"You have no archived conversations.": "",

+ 11 - 2
src/lib/i18n/locales/es-ES/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(latest)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: No se puede eliminar un modelo base",
-	"{{modelName}} is thinking...": "{{modelName}} está pensando...",
 	"{{user}}'s Chats": "{{user}}'s Chats",
 	"{{webUIName}} Backend Required": "{{webUIName}} Servidor Requerido",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Adjuntar archivo",
 	"Attention to detail": "Detalle preciso",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "Opciones de audio actualizadas correctamente",
 	"August": "Agosto",
 	"Auto-playback response": "Respuesta de reproducción automática",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Longitud del contexto",
 	"Continue Response": "Continuar Respuesta",
 	"Continue with {{provider}}": "Continuar con {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "¡URL de chat compartido copiado al portapapeles!",
+	"Copied to clipboard": "",
 	"Copy": "Copiar",
 	"Copy Code": "",
 	"Copy last code block": "Copia el último bloque de código",
@@ -285,6 +285,7 @@
 	"File": "Archivo",
 	"File Mode": "Modo de archivo",
 	"File not found.": "Archivo no encontrado.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Última Actividad",
 	"Last Modified": "Modificado por última vez",
+	"Leave empty for unlimited": "",
 	"Light": "Claro",
 	"Listening...": "Escuchando...",
 	"LLMs can make mistakes. Verify important information.": "Los LLM pueden cometer errores. Verifica la información importante.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Administrar Pipelines",
 	"March": "Marzo",
 	"Max Tokens (num_predict)": "Máximo de fichas (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Se pueden descargar un máximo de 3 modelos simultáneamente. Por favor, inténtelo de nuevo más tarde.",
 	"May": "Mayo",
 	"Memories accessible by LLMs will be shown here.": "Las memorias accesibles por los LLMs se mostrarán aquí.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Restablecer almacenamiento vectorial",
 	"Response AutoCopy to Clipboard": "Copiar respuesta automáticamente al portapapeles",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Las notificaciones de respuesta no pueden activarse debido a que los permisos del sitio web han sido denegados. Por favor, visite las configuraciones de su navegador para otorgar el acceso necesario.",
+	"Response splitting": "",
 	"Role": "Rol",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_many": "Buscado {{count}} sites_many",
 	"Searched {{count}} sites_other": "Buscó {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "Buscando \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng URL de consulta",
 	"See readme.md for instructions": "Vea el readme.md para instrucciones",
 	"See what's new": "Ver las novedades",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "¡Gracias por tu retroalimentación!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "El puntaje debe ser un valor entre 0.0 (0%) y 1.0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "Pensando...",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Escribe un resumen en 50 palabras que resuma [tema o palabra clave].",
 	"Yesterday": "Ayer",
 	"You": "Usted",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Puede personalizar sus interacciones con LLMs añadiendo memorias a través del botón 'Gestionar' debajo, haciendo que sean más útiles y personalizados para usted.",
 	"You cannot clone a base model": "No se puede clonar un modelo base",
 	"You have no archived conversations.": "No tiene conversaciones archivadas.",

+ 11 - 2
src/lib/i18n/locales/fa-IR/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(آخرین)",
 	"{{ models }}": "{{ مدل }}",
 	"{{ owner }}: You cannot delete a base model": "{{ مالک }}: شما نمیتوانید یک مدل پایه را حذف کنید",
-	"{{modelName}} is thinking...": "{{modelName}} در حال فکر کردن است...",
 	"{{user}}'s Chats": "{{user}} چت ها",
 	"{{webUIName}} Backend Required": "بکند {{webUIName}} نیاز است.",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "پیوست فایل",
 	"Attention to detail": "دقیق",
 	"Audio": "صدا",
-	"Audio settings updated successfully": "",
 	"August": "آگوست",
 	"Auto-playback response": "پخش خودکار پاسخ ",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "طول زمینه",
 	"Continue Response": "ادامه پاسخ",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL چت به کلیپ بورد کپی شد!",
+	"Copied to clipboard": "",
 	"Copy": "کپی",
 	"Copy Code": "",
 	"Copy last code block": "کپی آخرین بلوک کد",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "حالت فایل",
 	"File not found.": "فایل یافت نشد.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "آخرین فعال",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "روشن",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "مدل\u200cهای زبانی بزرگ می\u200cتوانند اشتباه کنند. اطلاعات مهم را راستی\u200cآزمایی کنید.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "مدیریت خطوط لوله",
 	"March": "مارچ",
 	"Max Tokens (num_predict)": "توکنهای بیشینه (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "حداکثر 3 مدل را می توان به طور همزمان دانلود کرد. لطفاً بعداً دوباره امتحان کنید.",
 	"May": "ماهی",
 	"Memories accessible by LLMs will be shown here.": "حافظه های دسترسی به LLMs در اینجا نمایش داده می شوند.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "بازنشانی ذخیره سازی برداری",
 	"Response AutoCopy to Clipboard": "کپی خودکار پاسخ به کلیپ بورد",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "نقش",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "جستجو {{count}} sites_one",
 	"Searched {{count}} sites_other": "جستجو {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "نشانی وب جستجوی Searxng",
 	"See readme.md for instructions": "برای مشاهده دستورالعمل\u200cها به readme.md مراجعه کنید",
 	"See what's new": "ببینید موارد جدید چه بوده",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "با تشکر از بازخورد شما!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "امتیاز باید یک مقدار بین 0.0 (0%) و 1.0 (100%) باشد.",
 	"Theme": "قالب",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "خلاصه ای در 50 کلمه بنویسید که [موضوع یا کلمه کلیدی] را خلاصه کند.",
 	"Yesterday": "دیروز",
 	"You": "شما",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "شما نمیتوانید یک مدل پایه را کلون کنید",
 	"You have no archived conversations.": "شما هیچ گفتگوی ذخیره شده ندارید.",

+ 11 - 2
src/lib/i18n/locales/fi-FI/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(uusin)",
 	"{{ models }}": "{{ mallit }}",
 	"{{ owner }}: You cannot delete a base model": "{{ omistaja }}: Perusmallia ei voi poistaa",
-	"{{modelName}} is thinking...": "{{modelName}} miettii...",
 	"{{user}}'s Chats": "{{user}}:n keskustelut",
 	"{{webUIName}} Backend Required": "{{webUIName}} backend vaaditaan",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Liitä tiedosto",
 	"Attention to detail": "Huomio yksityiskohtiin",
 	"Audio": "Ääni",
-	"Audio settings updated successfully": "",
 	"August": "elokuu",
 	"Auto-playback response": "Soita vastaus automaattisesti",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Kontekstin pituus",
 	"Continue Response": "Jatka vastausta",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Jaettu keskustelulinkki kopioitu leikepöydälle!",
+	"Copied to clipboard": "",
 	"Copy": "Kopioi",
 	"Copy Code": "",
 	"Copy last code block": "Kopioi viimeisin koodilohko",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Tiedostotila",
 	"File not found.": "Tiedostoa ei löytynyt.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Viimeksi aktiivinen",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Vaalea",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "Kielimallit voivat tehdä virheitä. Varmista tärkeät tiedot.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Hallitse putkia",
 	"March": "maaliskuu",
 	"Max Tokens (num_predict)": "Tokenien enimmäismäärä (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Enintään 3 mallia voidaan ladata samanaikaisesti. Yritä myöhemmin uudelleen.",
 	"May": "toukokuu",
 	"Memories accessible by LLMs will be shown here.": "Muistitiedostot, joita LLM-ohjelmat käyttävät, näkyvät tässä.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Tyhjennä vektorivarasto",
 	"Response AutoCopy to Clipboard": "Vastauksen automaattikopiointi leikepöydälle",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Rooli",
 	"Rosé Pine": "Rosee-mänty",
 	"Rosé Pine Dawn": "Aamuinen Rosee-mänty",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Haettu {{count}} sites_one",
 	"Searched {{count}} sites_other": "Haku {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng-kyselyn URL-osoite",
 	"See readme.md for instructions": "Katso lisää ohjeita readme.md:stä",
 	"See what's new": "Katso, mitä uutta",
@@ -611,6 +617,8 @@
 	"Tfs Z": "TFS Z",
 	"Thanks for your feedback!": "Kiitos palautteestasi!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Pisteytyksen tulee olla arvo välillä 0.0 (0%) ja 1.0 (100%).",
 	"Theme": "Teema",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Kirjoita 50 sanan yhteenveto, joka tiivistää [aihe tai avainsana].",
 	"Yesterday": "Eilen",
 	"You": "Sinä",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "Perusmallia ei voi kloonata",
 	"You have no archived conversations.": "Sinulla ei ole arkistoituja keskusteluja.",

+ 12 - 3
src/lib/i18n/locales/fr-CA/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(dernier)",
 	"{{ models }}": "{{ modèles }}",
 	"{{ owner }}: You cannot delete a base model": "{{ propriétaire }} : Vous ne pouvez pas supprimer un modèle de base.",
-	"{{modelName}} is thinking...": "{{modelName}} est en train de réfléchir...",
 	"{{user}}'s Chats": "Discussions de {{user}}",
 	"{{webUIName}} Backend Required": "Backend {{webUIName}} requis",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Joindre un document",
 	"Attention to detail": "Attention aux détails",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "Les paramètres audio ont été mis à jour avec succès",
 	"August": "Août",
 	"Auto-playback response": "Réponse de lecture automatique",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Longueur du contexte",
 	"Continue Response": "Continuer la réponse",
 	"Continue with {{provider}}": "Continuer avec {{provider}}",
-	"Controls": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Contrôle comment le texte des messages est divisé pour les demandes de TTS. 'Ponctuation' divise en phrases, 'paragraphes' divise en paragraphes et 'aucun' garde le message comme une seule chaîne.",
+	"Controls": "Contrôles",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL du chat copiée dans le presse-papiers\u00a0!",
+	"Copied to clipboard": "",
 	"Copy": "Copie",
 	"Copy Code": "",
 	"Copy last code block": "Copier le dernier bloc de code",
@@ -285,6 +285,7 @@
 	"File": "Fichier",
 	"File Mode": "Mode fichier",
 	"File not found.": "Fichier introuvable.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "Le filtre est maintenant désactivé globalement",
 	"Filter is now globally enabled": "Le filtre est désormais activé globalement",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Dernière activité",
 	"Last Modified": "Dernière modification",
+	"Leave empty for unlimited": "",
 	"Light": "Lumineux",
 	"Listening...": "En train d'écouter...",
 	"LLMs can make mistakes. Verify important information.": "Les LLM peuvent faire des erreurs. Vérifiez les informations importantes.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Gérer les pipelines",
 	"March": "Mars",
 	"Max Tokens (num_predict)": "Tokens maximaux (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Un maximum de 3 modèles peut être téléchargé en même temps. Veuillez réessayer ultérieurement.",
 	"May": "Mai",
 	"Memories accessible by LLMs will be shown here.": "Les mémoires accessibles par les LLMs seront affichées ici.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Réinitialiser le stockage des vecteurs",
 	"Response AutoCopy to Clipboard": "Copie automatique de la réponse vers le presse-papiers",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de réponse ne peuvent pas être activées car les autorisations du site web ont été refusées. Veuillez visiter les paramètres de votre navigateur pour accorder l'accès nécessaire.",
+	"Response splitting": "Fractionnement de la réponse",
 	"Role": "Rôle",
 	"Rosé Pine": "Pin rosé",
 	"Rosé Pine Dawn": "Aube de Pin Rosé",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_many": "Recherché {{count}} sites_many",
 	"Searched {{count}} sites_other": "Recherché {{count}} sites_autres",
 	"Searching \"{{searchQuery}}\"": "Recherche de « {{searchQuery}} »",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL de recherche Searxng",
 	"See readme.md for instructions": "Voir le fichier readme.md pour les instructions",
 	"See what's new": "Découvrez les nouvelles fonctionnalités",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Merci pour vos commentaires !",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0\u00a0%) et 1,0 (100\u00a0%).",
 	"Theme": "Thème",
 	"Thinking...": "En train de réfléchir...",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Rédigez un résumé de 50 mots qui résume [sujet ou mot-clé].",
 	"Yesterday": "Hier",
 	"You": "Vous",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Vous pouvez personnaliser vos interactions avec les LLM en ajoutant des souvenirs via le bouton 'Gérer' ci-dessous, ce qui les rendra plus utiles et adaptés à vos besoins.",
 	"You cannot clone a base model": "Vous ne pouvez pas cloner un modèle de base",
 	"You have no archived conversations.": "Vous n'avez aucune conversation archivée",

+ 81 - 72
src/lib/i18n/locales/fr-FR/translation.json

@@ -6,31 +6,30 @@
 	"(latest)": "(dernier)",
 	"{{ models }}": "{{ modèles }}",
 	"{{ owner }}: You cannot delete a base model": "{{ propriétaire }} : Vous ne pouvez pas supprimer un modèle de base.",
-	"{{modelName}} is thinking...": "{{modelName}} est en train de réfléchir...",
 	"{{user}}'s Chats": "Discussions de {{user}}",
 	"{{webUIName}} Backend Required": "Backend {{webUIName}} requis",
-	"*Prompt node ID(s) are required for image generation": "",
+	"*Prompt node ID(s) are required for image generation": "*Le(s) identifiant(s) de noeud du prompt sont nécessaires pour la génération d’images",
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modèle de tâche est utilisé lors de l’exécution de tâches telles que la génération de titres pour les conversations et les requêtes de recherche sur le web.",
 	"a user": "un utilisateur",
 	"About": "À propos",
 	"Account": "Compte",
 	"Account Activation Pending": "Activation du compte en attente",
 	"Accurate information": "Information exacte",
-	"Actions": "",
+	"Actions": "Actions",
 	"Active Users": "Utilisateurs actifs",
 	"Add": "Ajouter",
 	"Add a model id": "Ajouter un identifiant de modèle",
 	"Add a short description about what this model does": "Ajoutez une brève description de ce que fait ce modèle.",
 	"Add a short title for this prompt": "Ajoutez un bref titre pour cette prompt.",
-	"Add a tag": "Ajouter une balise",
+	"Add a tag": "Ajouter une étiquette",
 	"Add custom prompt": "Ajouter une prompt personnalisée",
 	"Add Docs": "Ajouter de la documentation",
 	"Add Files": "Ajouter des fichiers",
 	"Add Memory": "Ajouter de la mémoire",
 	"Add message": "Ajouter un message",
 	"Add Model": "Ajouter un modèle",
-	"Add Tag": "",
-	"Add Tags": "Ajouter des balises",
+	"Add Tag": "Ajouter une étiquette",
+	"Add Tags": "Ajouter des étiquettes",
 	"Add User": "Ajouter un Utilisateur",
 	"Adjusting these settings will apply changes universally to all users.": "L'ajustement de ces paramètres appliquera universellement les changements à tous les utilisateurs.",
 	"admin": "administrateur",
@@ -45,9 +44,9 @@
 	"All Users": "Tous les Utilisateurs",
 	"Allow": "Autoriser",
 	"Allow Chat Deletion": "Autoriser la suppression de l'historique de chat",
-	"Allow Chat Editing": "",
+	"Allow Chat Editing": "Autoriser la modification de l'historique de chat",
 	"Allow non-local voices": "Autoriser les voix non locales",
-	"Allow Temporary Chat": "",
+	"Allow Temporary Chat": "Autoriser le chat éphémère",
 	"Allow User Location": "Autoriser l'emplacement de l'utilisateur",
 	"Allow Voice Interruption in Call": "Autoriser l'interruption vocale pendant un appel",
 	"alphanumeric characters and hyphens": "caractères alphanumériques et tirets",
@@ -68,10 +67,9 @@
 	"Attach file": "Joindre un document",
 	"Attention to detail": "Attention aux détails",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "Les paramètres audio ont été mis à jour avec succès",
 	"August": "Août",
 	"Auto-playback response": "Réponse de lecture automatique",
-	"Automatic1111": "",
+	"Automatic1111": "Automatic1111",
 	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Chaîne d'authentification de l'API",
 	"AUTOMATIC1111 Base URL": "URL de base AUTOMATIC1111",
 	"AUTOMATIC1111 Base URL is required.": "L'URL de base {AUTOMATIC1111} est requise.",
@@ -94,7 +92,7 @@
 	"Chat": "Chat",
 	"Chat Background Image": "Image d'arrière-plan de la fenêtre de chat",
 	"Chat Bubble UI": "Bulles de discussion",
-	"Chat Controls": "",
+	"Chat Controls": "Contrôles du chat",
 	"Chat direction": "Direction du chat",
 	"Chats": "Conversations",
 	"Check Again": "Vérifiez à nouveau.",
@@ -113,7 +111,7 @@
 	"Click here to select a csv file.": "Cliquez ici pour sélectionner un fichier CSV.",
 	"Click here to select a py file.": "Cliquez ici pour sélectionner un fichier .py.",
 	"Click here to select documents.": "Cliquez ici pour sélectionner les documents.",
-	"Click here to upload a workflow.json file.": "",
+	"Click here to upload a workflow.json file.": "Cliquez ici pour télécharger un fichier workflow.json.",
 	"click here.": "cliquez ici.",
 	"Click on the user role button to change a user's role.": "Cliquez sur le bouton de rôle d'utilisateur pour modifier le rôle d'un utilisateur.",
 	"Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "L'autorisation d'écriture du presse-papier a été refusée. Veuillez vérifier les paramètres de votre navigateur pour accorder l'accès nécessaire.",
@@ -124,8 +122,8 @@
 	"ComfyUI": "ComfyUI",
 	"ComfyUI Base URL": "URL de base ComfyUI",
 	"ComfyUI Base URL is required.": "L'URL de base ComfyUI est requise.",
-	"ComfyUI Workflow": "",
-	"ComfyUI Workflow Nodes": "",
+	"ComfyUI Workflow": "Flux de travaux de ComfyUI",
+	"ComfyUI Workflow Nodes": "Noeud du flux de travaux de ComfyUI",
 	"Command": "Commande",
 	"Concurrent Requests": "Demandes concurrentes",
 	"Confirm": "Confirmer",
@@ -134,15 +132,17 @@
 	"Connections": "Connexions",
 	"Contact Admin for WebUI Access": "Contacter l'administrateur pour l'accès à l'interface Web",
 	"Content": "Contenu",
-	"Content Extraction": "",
+	"Content Extraction": "Extraction du contenu",
 	"Context Length": "Longueur du contexte",
 	"Continue Response": "Continuer la réponse",
 	"Continue with {{provider}}": "Continuer avec {{provider}}",
-	"Controls": "",
-	"Copied": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Contrôle la façon dont le texte des messages est divisé pour les demandes de TTS. 'Ponctuation' divise en phrases, 'paragraphes' divise en paragraphes et 'aucun' garde le message en tant que chaîne unique.",
+	"Controls": "Contrôles",
+	"Copied": "Copié",
 	"Copied shared chat URL to clipboard!": "URL du chat copiée dans le presse-papiers\u00a0!",
+	"Copied to clipboard": "",
 	"Copy": "Copie",
-	"Copy Code": "",
+	"Copy Code": "Copier le code",
 	"Copy last code block": "Copier le dernier bloc de code",
 	"Copy last response": "Copier la dernière réponse",
 	"Copy Link": "Copier le lien",
@@ -164,7 +164,7 @@
 	"Database": "Base de données",
 	"December": "Décembre",
 	"Default": "Par défaut",
-	"Default (Open AI)": "",
+	"Default (Open AI)": "Par défaut (Open AI)",
 	"Default (SentenceTransformers)": "Par défaut (Sentence Transformers)",
 	"Default Model": "Modèle standard",
 	"Default model updated": "Modèle par défaut mis à jour",
@@ -177,7 +177,7 @@
 	"Delete chat": "Supprimer la conversation",
 	"Delete Chat": "Supprimer la Conversation",
 	"Delete chat?": "Supprimer la conversation ?",
-	"Delete Doc": "",
+	"Delete Doc": "Supprimer le document",
 	"Delete function?": "Supprimer la fonction ?",
 	"Delete prompt?": "Supprimer la prompt ?",
 	"delete this link": "supprimer ce lien",
@@ -187,7 +187,7 @@
 	"Deleted {{name}}": "Supprimé {{name}}",
 	"Description": "Description",
 	"Didn't fully follow instructions": "N'a pas entièrement respecté les instructions",
-	"Disabled": "",
+	"Disabled": "Désactivé",
 	"Discover a function": "Découvrez une fonction",
 	"Discover a model": "Découvrir un modèle",
 	"Discover a prompt": "Découvrir une suggestion",
@@ -199,16 +199,16 @@
 	"Dismissible": "Fermeture",
 	"Display Emoji in Call": "Afficher les emojis pendant l'appel",
 	"Display the username instead of You in the Chat": "Afficher le nom d'utilisateur à la place de \"Vous\" dans le Chat",
-	"Do not install functions from sources you do not fully trust.": "",
-	"Do not install tools from sources you do not fully trust.": "",
+	"Do not install functions from sources you do not fully trust.": "N'installez pas de fonctions provenant de sources auxquelles vous ne faites pas entièrement confiance.",
+	"Do not install tools from sources you do not fully trust.": "N'installez pas d'outils provenant de sources auxquelles vous ne faites pas entièrement confiance.",
 	"Document": "Document",
 	"Documentation": "Documentation",
 	"Documents": "Documents",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "ne fait aucune connexion externe et garde vos données en sécurité sur votre serveur local.",
 	"Don't Allow": "Ne pas autoriser",
 	"Don't have an account?": "Vous n'avez pas de compte ?",
-	"don't install random functions from sources you don't trust.": "",
-	"don't install random tools from sources you don't trust.": "",
+	"don't install random functions from sources you don't trust.": "n'installez pas de fonctions aléatoires provenant de sources auxquelles vous ne faites pas confiance.",
+	"don't install random tools from sources you don't trust.": "n'installez pas d'outils aléatoires provenant de sources auxquelles vous ne faites pas confiance.",
 	"Don't like the style": "N'apprécie pas le style",
 	"Done": "Terminé",
 	"Download": "Télécharger",
@@ -220,18 +220,18 @@
 	"Edit Doc": "Modifier le document",
 	"Edit Memory": "Modifier la mémoire",
 	"Edit User": "Modifier l'utilisateur",
-	"ElevenLabs": "",
+	"ElevenLabs": "ElevenLabs",
 	"Email": "E-mail",
 	"Embedding Batch Size": "Taille du lot d'encodage",
 	"Embedding Model": "Modèle d'embedding",
 	"Embedding Model Engine": "Moteur de modèle d'encodage",
 	"Embedding model set to \"{{embedding_model}}\"": "Modèle d'encodage défini sur « {{embedding_model}} »",
 	"Enable Community Sharing": "Activer le partage communautaire",
-	"Enable Message Rating": "",
+	"Enable Message Rating": "Activer l'évaluation des messages",
 	"Enable New Sign Ups": "Activer les nouvelles inscriptions",
 	"Enable Web Search": "Activer la recherche web",
-	"Enabled": "",
-	"Engine": "",
+	"Enabled": "Activé",
+	"Engine": "Moteur",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.",
 	"Enter {{role}} message here": "Entrez le message {{role}} ici",
 	"Enter a detail about yourself for your LLMs to recall": "Saisissez un détail sur vous-même que vos LLMs pourront se rappeler",
@@ -244,7 +244,7 @@
 	"Enter Google PSE Engine Id": "Entrez l'identifiant du moteur Google PSE",
 	"Enter Image Size (e.g. 512x512)": "Entrez la taille de l'image (par ex. 512x512)",
 	"Enter language codes": "Entrez les codes de langue",
-	"Enter Model ID": "",
+	"Enter Model ID": "Entrez l'id du model",
 	"Enter model tag (e.g. {{modelTag}})": "Entrez l'étiquette du modèle (par ex. {{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "Entrez le nombre de pas (par ex. 50)",
 	"Enter Score": "Entrez votre score",
@@ -253,15 +253,15 @@
 	"Enter Serply API Key": "Entrez la clé API Serply",
 	"Enter Serpstack API Key": "Entrez la clé API Serpstack",
 	"Enter stop sequence": "Entrez la séquence d'arrêt",
-	"Enter system prompt": "",
+	"Enter system prompt": "Entrez le prompt du système",
 	"Enter Tavily API Key": "Entrez la clé API Tavily",
-	"Enter Tika Server URL": "",
+	"Enter Tika Server URL": "Entrez l'URL du serveur Tika",
 	"Enter Top K": "Entrez les Top K",
 	"Enter URL (e.g. http://127.0.0.1:7860/)": "Entrez l'URL (par ex. {http://127.0.0.1:7860/})",
 	"Enter URL (e.g. http://localhost:11434)": "Entrez l'URL (par ex. http://localhost:11434)",
 	"Enter Your Email": "Entrez votre adresse e-mail",
 	"Enter Your Full Name": "Entrez votre nom complet",
-	"Enter your message": "",
+	"Enter your message": "Entrez votre message",
 	"Enter Your Password": "Entrez votre mot de passe",
 	"Enter Your Role": "Entrez votre rôle",
 	"Error": "Erreur",
@@ -285,7 +285,8 @@
 	"File": "Fichier",
 	"File Mode": "Mode fichier",
 	"File not found.": "Fichier introuvable.",
-	"Files": "",
+	"File size should not exceed {{maxSize}} MB.": "",
+	"Files": "Fichiers",
 	"Filter is now globally disabled": "Le filtre est maintenant désactivé globalement",
 	"Filter is now globally enabled": "Le filtre est désormais activé globalement",
 	"Filters": "Filtres",
@@ -298,28 +299,28 @@
 	"Frequency Penalty": "Pénalité de fréquence",
 	"Function created successfully": "La fonction a été créée avec succès",
 	"Function deleted successfully": "Fonction supprimée avec succès",
-	"Function Description (e.g. A filter to remove profanity from text)": "",
-	"Function ID (e.g. my_filter)": "",
-	"Function is now globally disabled": "",
-	"Function is now globally enabled": "",
-	"Function Name (e.g. My Filter)": "",
+	"Function Description (e.g. A filter to remove profanity from text)": "Description de la fonction (par ex. Un filtre pour supprimer les grossièretés d'un texte)",
+	"Function ID (e.g. my_filter)": "ID de la fonction (par ex. mon_filtre)",
+	"Function is now globally disabled": "La fonction est désormais désactivée globalement",
+	"Function is now globally enabled": "La fonction est désormais activée globalement",
+	"Function Name (e.g. My Filter)": "Nom de la fonction (par ex. Mon Filtre)",
 	"Function updated successfully": "La fonction a été mise à jour avec succès",
 	"Functions": "Fonctions",
-	"Functions allow arbitrary code execution": "",
-	"Functions allow arbitrary code execution.": "",
+	"Functions allow arbitrary code execution": "Les fonctions permettent l'exécution de code arbitraire",
+	"Functions allow arbitrary code execution.": "Les fonctions permettent l'exécution de code arbitraire.",
 	"Functions imported successfully": "Fonctions importées avec succès",
 	"General": "Général",
 	"General Settings": "Paramètres Généraux",
 	"Generate Image": "Générer une image",
 	"Generating search query": "Génération d'une requête de recherche",
 	"Generation Info": "Informations sur la génération",
-	"Get up and running with": "",
+	"Get up and running with": "Démarrez avec",
 	"Global": "Mondial",
 	"Good Response": "Bonne réponse",
 	"Google PSE API Key": "Clé API Google PSE",
 	"Google PSE Engine Id": "ID du moteur de recherche personnalisé de Google",
 	"h:mm a": "h:mm a",
-	"Haptic Feedback": "",
+	"Haptic Feedback": "Retour haptique",
 	"has no conversations.": "n'a aucune conversation.",
 	"Hello, {{name}}": "Bonjour, {{name}}.",
 	"Help": "Aide",
@@ -327,7 +328,7 @@
 	"Hide Model": "Masquer le modèle",
 	"How can I help you today?": "Comment puis-je vous être utile aujourd'hui ?",
 	"Hybrid Search": "Recherche hybride",
-	"I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "",
+	"I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "Je reconnais avoir lu et compris les implications de mes actions. Je suis conscient des risques associés à l'exécution d'un code arbitraire et j'ai vérifié la fiabilité de la source.",
 	"Image Generation (Experimental)": "Génération d'images (expérimental)",
 	"Image Generation Engine": "Moteur de génération d'images",
 	"Image Settings": "Paramètres de l'image",
@@ -336,7 +337,7 @@
 	"Import Documents Mapping": "Import de la correspondance des documents",
 	"Import Functions": "Import de fonctions",
 	"Import Models": "Importer des modèles",
-	"Import Prompts": "Importer des Enseignes",
+	"Import Prompts": "Importer des prompts",
 	"Import Tools": "Outils d'importation",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "Inclure le drapeau `--api-auth` lors de l'exécution de stable-diffusion-webui",
 	"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lorsque vous exécutez stable-diffusion-webui",
@@ -358,9 +359,10 @@
 	"Keyboard shortcuts": "Raccourcis clavier",
 	"Knowledge": "Connaissance",
 	"Language": "Langue",
-	"large language models, locally.": "",
+	"large language models, locally.": "grand modèle de langage, localement",
 	"Last Active": "Dernière activité",
 	"Last Modified": "Dernière modification",
+	"Leave empty for unlimited": "",
 	"Light": "Lumineux",
 	"Listening...": "En train d'écouter...",
 	"LLMs can make mistakes. Verify important information.": "Les LLM peuvent faire des erreurs. Vérifiez les informations importantes.",
@@ -368,13 +370,15 @@
 	"LTR": "LTR",
 	"Made by OpenWebUI Community": "Réalisé par la communauté OpenWebUI",
 	"Make sure to enclose them with": "Assurez-vous de les inclure dans",
-	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
+	"Make sure to export a workflow.json file as API format from ComfyUI.": "Veillez à exporter un fichier workflow.json au format API depuis ComfyUI.",
 	"Manage": "Gérer",
 	"Manage Models": "Gérer les Modèles",
 	"Manage Ollama Models": "Gérer les modèles Ollama",
 	"Manage Pipelines": "Gérer les pipelines",
 	"March": "Mars",
 	"Max Tokens (num_predict)": "Tokens maximaux (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Un maximum de 3 modèles peut être téléchargé en même temps. Veuillez réessayer ultérieurement.",
 	"May": "Mai",
 	"Memories accessible by LLMs will be shown here.": "Les mémoires accessibles par les LLMs seront affichées ici.",
@@ -383,9 +387,9 @@
 	"Memory cleared successfully": "La mémoire a été effacée avec succès",
 	"Memory deleted successfully": "La mémoire a été supprimée avec succès",
 	"Memory updated successfully": "La mémoire a été mise à jour avec succès",
-	"Merge Responses": "",
+	"Merge Responses": "Fusionner les réponses",
 	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Les messages que vous envoyez après avoir créé votre lien ne seront pas partagés. Les utilisateurs disposant de l'URL pourront voir le chat partagé.",
-	"Min P": "",
+	"Min P": "P min",
 	"Minimum Score": "Score minimal",
 	"Mirostat": "Mirostat",
 	"Mirostat Eta": "Mirostat Eta",
@@ -410,7 +414,7 @@
 	"Models": "Modèles",
 	"More": "Plus de",
 	"Name": "Nom",
-	"Name Tag": "Étiquette de nom",
+	"Name Tag": "Nom de l'étiquette",
 	"Name your model": "Nommez votre modèle",
 	"New Chat": "Nouvelle conversation",
 	"New Password": "Nouveau mot de passe",
@@ -426,7 +430,7 @@
 	"Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.": "Note : Si vous définissez un score minimum, seuls les documents ayant un score supérieur ou égal à ce score minimum seront retournés par la recherche.",
 	"Notifications": "Notifications",
 	"November": "Novembre",
-	"num_gpu (Ollama)": "",
+	"num_gpu (Ollama)": "num_gpu (Ollama)",
 	"num_thread (Ollama)": "num_thread (Ollama)",
 	"OAuth ID": "ID OAuth",
 	"October": "Octobre",
@@ -471,7 +475,7 @@
 	"Pipelines Valves": "Vannes de Pipelines",
 	"Plain text (.txt)": "Texte simple (.txt)",
 	"Playground": "Aire de jeux",
-	"Please carefully review the following warnings:": "",
+	"Please carefully review the following warnings:": "Veuillez lire attentivement les avertissements suivants :",
 	"Positive attitude": "Attitude positive",
 	"Previous 30 days": "30 derniers jours",
 	"Previous 7 days": "7 derniers jours",
@@ -505,22 +509,23 @@
 	"Reset Vector Storage": "Réinitialiser le stockage des vecteurs",
 	"Response AutoCopy to Clipboard": "Copie automatique de la réponse vers le presse-papiers",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de réponse ne peuvent pas être activées car les autorisations du site web ont été refusées. Veuillez visiter les paramètres de votre navigateur pour accorder l'accès nécessaire.",
+	"Response splitting": "Fractionnement de la réponse",
 	"Role": "Rôle",
 	"Rosé Pine": "Pin rosé",
 	"Rosé Pine Dawn": "Aube de Pin Rosé",
 	"RTL": "RTL",
-	"Run": "",
-	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "",
+	"Run": "Exécuter",
+	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "Exécutez Llama 2, Code Llama et d'autres modèles. Personnalisez et créez votre propre modèle.",
 	"Running": "Courir",
 	"Save": "Enregistrer",
 	"Save & Create": "Enregistrer & Créer",
 	"Save & Update": "Enregistrer & Mettre à jour",
-	"Save Tag": "",
+	"Save Tag": "Enregistrer l'étiquette",
 	"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "La sauvegarde des journaux de discussion directement dans le stockage de votre navigateur n'est plus prise en charge. Veuillez prendre un instant pour télécharger et supprimer vos journaux de discussion en cliquant sur le bouton ci-dessous. Pas de soucis, vous pouvez facilement les réimporter depuis le backend via l'interface ci-dessous",
 	"Scan": "Scanner",
 	"Scan complete!": "Scan terminé !",
 	"Scan for documents from {{path}}": "Scanner des documents depuis {{path}}",
-	"Scroll to bottom when switching between branches": "",
+	"Scroll to bottom when switching between branches": "Défiler vers le bas lors du passage d'une branche à l'autre",
 	"Search": "Recherche",
 	"Search a model": "Rechercher un modèle",
 	"Search Chats": "Rechercher des conversations",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_many": "Recherché {{count}} sites_many",
 	"Searched {{count}} sites_other": "Recherché {{count}} sites_autres",
 	"Searching \"{{searchQuery}}\"": "Recherche de « {{searchQuery}} »",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL de recherche Searxng",
 	"See readme.md for instructions": "Voir le fichier readme.md pour les instructions",
 	"See what's new": "Découvrez les nouvelles fonctionnalités",
@@ -549,7 +555,7 @@
 	"Select a tool": "Sélectionnez un outil",
 	"Select an Ollama instance": "Sélectionnez une instance Ollama",
 	"Select Documents": "Sélectionnez des documents",
-	"Select Engine": "",
+	"Select Engine": "Sélectionnez le moteur",
 	"Select model": "Sélectionnez un modèle",
 	"Select only one model to call": "Sélectionnez seulement un modèle pour appeler",
 	"Selected model(s) do not support image inputs": "Les modèle(s) sélectionné(s) ne prennent pas en charge les entrées d'images",
@@ -596,22 +602,24 @@
 	"Success": "Réussite",
 	"Successfully updated.": "Mise à jour réussie.",
 	"Suggested": "Sugéré",
-	"Support": "",
-	"Support this plugin:": "",
+	"Support": "Supporter",
+	"Support this plugin:": "Supporter ce module",
 	"System": "Système",
 	"System Prompt": "Prompt du système",
-	"Tags": "Balises",
+	"Tags": "Étiquettes",
 	"Tap to interrupt": "Appuyez pour interrompre",
 	"Tavily API Key": "Clé API Tavily",
 	"Tell us more:": "Dites-nous en plus à ce sujet : ",
 	"Temperature": "Température",
 	"Template": "Template",
-	"Temporary Chat": "",
+	"Temporary Chat": "Chat éphémère",
 	"Text Completion": "Complétion de texte",
 	"Text-to-Speech Engine": "Moteur de synthèse vocale",
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Merci pour vos commentaires !",
-	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Les développeurs de ce plugin sont des bénévoles passionnés issus de la communauté. Si vous trouvez ce plugin utile, merci de contribuer à son développement.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Le score doit être une valeur comprise entre 0,0 (0\u00a0%) et 1,0 (100\u00a0%).",
 	"Theme": "Thème",
 	"Thinking...": "En train de réfléchir...",
@@ -634,7 +642,7 @@
 	"To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "Pour accéder à l'interface Web, veuillez contacter l'administrateur. Les administrateurs peuvent gérer les statuts des utilisateurs depuis le panneau d'administration.",
 	"To add documents here, upload them to the \"Documents\" workspace first.": "Pour ajouter des documents ici, téléchargez-les d'abord dans l'espace de travail « Documents ». ",
 	"to chat input.": "à l'entrée de discussion.",
-	"To select actions here, add them to the \"Functions\" workspace first.": "",
+	"To select actions here, add them to the \"Functions\" workspace first.": "Pour sélectionner des actions ici, ajoutez-les d'abord à l'espace de travail « Fonctions ».",
 	"To select filters here, add them to the \"Functions\" workspace first.": "Pour sélectionner des filtres ici, ajoutez-les d'abord à l'espace de travail « Fonctions ». ",
 	"To select toolkits here, add them to the \"Tools\" workspace first.": "Pour sélectionner des toolkits ici, ajoutez-les d'abord à l'espace de travail « Outils ». ",
 	"Today": "Aujourd'hui",
@@ -645,13 +653,13 @@
 	"Tool deleted successfully": "Outil supprimé avec succès",
 	"Tool imported successfully": "Outil importé avec succès",
 	"Tool updated successfully": "L'outil a été mis à jour avec succès",
-	"Toolkit Description (e.g. A toolkit for performing various operations)": "",
-	"Toolkit ID (e.g. my_toolkit)": "",
-	"Toolkit Name (e.g. My ToolKit)": "",
+	"Toolkit Description (e.g. A toolkit for performing various operations)": "Description du toolkit (par ex. un toolkit permettant d'effectuer diverses opérations)",
+	"Toolkit ID (e.g. my_toolkit)": "ID du Toolkit (par ex. mon_toolkit)",
+	"Toolkit Name (e.g. My ToolKit)": "Nom du toolkit (par ex. Mon Toolkit)",
 	"Tools": "Outils",
-	"Tools are a function calling system with arbitrary code execution": "",
-	"Tools have a function calling system that allows arbitrary code execution": "",
-	"Tools have a function calling system that allows arbitrary code execution.": "",
+	"Tools are a function calling system with arbitrary code execution": "Les outils sont un système d'appel de fonction avec exécution de code arbitraire",
+	"Tools have a function calling system that allows arbitrary code execution": "Les outils ont un système d'appel de fonction qui permet l'exécution de code arbitraire",
+	"Tools have a function calling system that allows arbitrary code execution.": "Les outils ont un système d'appel de fonction qui permet l'exécution de code arbitraire.",
 	"Top K": "Top K",
 	"Top P": "Top P",
 	"Trouble accessing Ollama?": "Rencontrez-vous des difficultés pour accéder à Ollama ?",
@@ -663,7 +671,7 @@
 	"Uh-oh! There was an issue connecting to {{provider}}.": "Oh non ! Un problème est survenu lors de la connexion à {{provider}}.",
 	"UI": "Interface utilisateur",
 	"Unknown file type '{{file_type}}'. Proceeding with the file upload anyway.": "Type de fichier inconnu '{{file_type}}'. Continuons tout de même le téléchargement du fichier.",
-	"Unpin": "",
+	"Unpin": "Désépingler",
 	"Update": "Mise à jour",
 	"Update and Copy Link": "Mettre à jour et copier le lien",
 	"Update password": "Mettre à jour le mot de passe",
@@ -693,7 +701,7 @@
 	"Version": "Version améliorée",
 	"Voice": "Voix",
 	"Warning": "Avertissement !",
-	"Warning:": "",
+	"Warning:": "Avertissement :",
 	"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Avertissement : Si vous mettez à jour ou modifiez votre modèle d'encodage, vous devrez réimporter tous les documents.",
 	"Web": "Web",
 	"Web API": "API Web",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Rédigez un résumé de 50 mots qui résume [sujet ou mot-clé].",
 	"Yesterday": "Hier",
 	"You": "Vous",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Vous pouvez personnaliser vos interactions avec les LLM en ajoutant des souvenirs via le bouton 'Gérer' ci-dessous, ce qui les rendra plus utiles et adaptés à vos besoins.",
 	"You cannot clone a base model": "Vous ne pouvez pas cloner un modèle de base",
 	"You have no archived conversations.": "Vous n'avez aucune conversation archivée",
@@ -718,7 +727,7 @@
 	"You're a helpful assistant.": "Vous êtes un assistant serviable.",
 	"You're now logged in.": "Vous êtes désormais connecté.",
 	"Your account status is currently pending activation.": "Votre statut de compte est actuellement en attente d'activation.",
-	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
+	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "L'intégralité de votre contribution ira directement au développeur du plugin ; Open WebUI ne prend aucun pourcentage. Cependant, la plateforme de financement choisie peut avoir ses propres frais.",
 	"Youtube": "YouTube",
 	"Youtube Loader Settings": "Paramètres de l'outil de téléchargement YouTube"
 }

+ 11 - 2
src/lib/i18n/locales/he-IL/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(האחרון)",
 	"{{ models }}": "{{ דגמים }}",
 	"{{ owner }}: You cannot delete a base model": "{{ בעלים }}: לא ניתן למחוק מודל בסיס",
-	"{{modelName}} is thinking...": "{{modelName}} חושב...",
 	"{{user}}'s Chats": "צ'אטים של {{user}}",
 	"{{webUIName}} Backend Required": "נדרש Backend של {{webUIName}}",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "צרף קובץ",
 	"Attention to detail": "תשומת לב לפרטים",
 	"Audio": "אודיו",
-	"Audio settings updated successfully": "",
 	"August": "אוגוסט",
 	"Auto-playback response": "תגובת השמעה אוטומטית",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "אורך הקשר",
 	"Continue Response": "המשך תגובה",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "העתקת כתובת URL של צ'אט משותף ללוח!",
+	"Copied to clipboard": "",
 	"Copy": "העתק",
 	"Copy Code": "",
 	"Copy last code block": "העתק את בלוק הקוד האחרון",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "מצב קובץ",
 	"File not found.": "הקובץ לא נמצא.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "פעיל לאחרונה",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "בהיר",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "מודלים בשפה טבעית יכולים לטעות. אמת מידע חשוב.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "ניהול צינורות",
 	"March": "מרץ",
 	"Max Tokens (num_predict)": "מקסימום אסימונים (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "ניתן להוריד מקסימום 3 מודלים בו זמנית. אנא נסה שוב מאוחר יותר.",
 	"May": "מאי",
 	"Memories accessible by LLMs will be shown here.": "מזכירים נגישים על ידי LLMs יוצגו כאן.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "איפוס אחסון וקטורים",
 	"Response AutoCopy to Clipboard": "העתקה אוטומטית של תגובה ללוח",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "תפקיד",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_two": "חיפש {{count}} sites_two",
 	"Searched {{count}} sites_other": "חיפש {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "כתובת URL של שאילתת Searxng",
 	"See readme.md for instructions": "ראה את readme.md להוראות",
 	"See what's new": "ראה מה חדש",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "תודה על המשוב שלך!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "ציון צריך להיות ערך בין 0.0 (0%) ל-1.0 (100%)",
 	"Theme": "נושא",
 	"Thinking...": "",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "כתוב סיכום ב-50 מילים שמסכם [נושא או מילת מפתח].",
 	"Yesterday": "אתמול",
 	"You": "אתה",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "לא ניתן לשכפל מודל בסיס",
 	"You have no archived conversations.": "אין לך שיחות בארכיון.",

+ 11 - 2
src/lib/i18n/locales/hi-IN/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(latest)",
 	"{{ models }}": "{{ मॉडल }}",
 	"{{ owner }}: You cannot delete a base model": "{{ मालिक }}: आप बेस मॉडल को हटा नहीं सकते",
-	"{{modelName}} is thinking...": "{{modelName}} सोच रहा है...",
 	"{{user}}'s Chats": "{{user}} की चैट",
 	"{{webUIName}} Backend Required": "{{webUIName}} बैकएंड आवश्यक",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "फ़ाइल atta",
 	"Attention to detail": "विस्तार पर ध्यान",
 	"Audio": "ऑडियो",
-	"Audio settings updated successfully": "",
 	"August": "अगस्त",
 	"Auto-playback response": "ऑटो-प्लेबैक प्रतिक्रिया",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "प्रसंग की लंबाई",
 	"Continue Response": "प्रतिक्रिया जारी रखें",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "साझा चैट URL को क्लिपबोर्ड पर कॉपी किया गया!",
+	"Copied to clipboard": "",
 	"Copy": "कॉपी",
 	"Copy Code": "",
 	"Copy last code block": "अंतिम कोड ब्लॉक कॉपी करें",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "फ़ाइल मोड",
 	"File not found.": "फ़ाइल प्राप्त नहीं हुई।",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "पिछली बार सक्रिय",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "सुन",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "एलएलएम गलतियाँ कर सकते हैं। महत्वपूर्ण जानकारी सत्यापित करें.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "पाइपलाइनों का प्रबंधन करें",
 	"March": "मार्च",
 	"Max Tokens (num_predict)": "अधिकतम टोकन (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "अधिकतम 3 मॉडल एक साथ डाउनलोड किये जा सकते हैं। कृपया बाद में पुन: प्रयास करें।",
 	"May": "मेई",
 	"Memories accessible by LLMs will be shown here.": "एलएलएम द्वारा सुलभ यादें यहां दिखाई जाएंगी।",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "वेक्टर संग्रहण रीसेट करें",
 	"Response AutoCopy to Clipboard": "क्लिपबोर्ड पर प्रतिक्रिया ऑटोकॉपी",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "भूमिका",
 	"Rosé Pine": "रोसे पिन",
 	"Rosé Pine Dawn": "रोसे पिन डेन",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "{{count}} sites_one खोजा गया",
 	"Searched {{count}} sites_other": "{{count}} sites_other खोजा गया",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng क्वेरी URL",
 	"See readme.md for instructions": "निर्देशों के लिए readme.md देखें",
 	"See what's new": "देखें, क्या नया है",
@@ -611,6 +617,8 @@
 	"Tfs Z": "टफ्स Z",
 	"Thanks for your feedback!": "आपकी प्रतिक्रिया के लिए धन्यवाद!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "स्कोर का मान 0.0 (0%) और 1.0 (100%) के बीच होना चाहिए।",
 	"Theme": "थीम",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "50 शब्दों में एक सारांश लिखें जो [विषय या कीवर्ड] का सारांश प्रस्तुत करता हो।",
 	"Yesterday": "कल",
 	"You": "आप",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "आप बेस मॉडल का क्लोन नहीं बना सकते",
 	"You have no archived conversations.": "आपको कोई अंकित चैट नहीं है।",

+ 11 - 2
src/lib/i18n/locales/hr-HR/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(najnovije)",
 	"{{ models }}": "{{ modeli }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Ne možete obrisati osnovni model",
-	"{{modelName}} is thinking...": "{{modelName}} razmišlja...",
 	"{{user}}'s Chats": "Razgovori korisnika {{user}}",
 	"{{webUIName}} Backend Required": "{{webUIName}} Backend je potreban",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Priloži datoteku",
 	"Attention to detail": "Pažnja na detalje",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "",
 	"August": "Kolovoz",
 	"Auto-playback response": "Automatska reprodukcija odgovora",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Dužina konteksta",
 	"Continue Response": "Nastavi odgovor",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL dijeljenog razgovora kopiran u međuspremnik!",
+	"Copied to clipboard": "",
 	"Copy": "Kopiraj",
 	"Copy Code": "",
 	"Copy last code block": "Kopiraj zadnji blok koda",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Način datoteke",
 	"File not found.": "Datoteka nije pronađena.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Zadnja aktivnost",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Svijetlo",
 	"Listening...": "Slušam...",
 	"LLMs can make mistakes. Verify important information.": "LLM-ovi mogu pogriješiti. Provjerite važne informacije.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Upravljanje cjevovodima",
 	"March": "Ožujak",
 	"Max Tokens (num_predict)": "Maksimalan broj tokena (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maksimalno 3 modela se mogu preuzeti istovremeno. Pokušajte ponovo kasnije.",
 	"May": "Svibanj",
 	"Memories accessible by LLMs will be shown here.": "Ovdje će biti prikazana memorija kojoj mogu pristupiti LLM-ovi.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Resetiraj pohranu vektora",
 	"Response AutoCopy to Clipboard": "Automatsko kopiranje odgovora u međuspremnik",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Uloga",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_few": "Pretraženo {{count}} sites_few",
 	"Searched {{count}} sites_other": "Pretraženo {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng URL upita",
 	"See readme.md for instructions": "Pogledajte readme.md za upute",
 	"See what's new": "Pogledajte što je novo",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Hvala na povratnim informacijama!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Ocjena treba biti vrijednost između 0,0 (0%) i 1,0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "Razmišljam",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Napišite sažetak u 50 riječi koji sažima [temu ili ključnu riječ].",
 	"Yesterday": "Jučer",
 	"You": "Vi",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Možete personalizirati svoje interakcije s LLM-ima dodavanjem uspomena putem gumba 'Upravljanje' u nastavku, čineći ih korisnijima i prilagođenijima vama.",
 	"You cannot clone a base model": "Ne možete klonirati osnovni model",
 	"You have no archived conversations.": "Nemate arhiviranih razgovora.",

+ 11 - 2
src/lib/i18n/locales/id-ID/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(terbaru)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Anda tidak dapat menghapus model dasar",
-	"{{modelName}} is thinking...": "{{modelName}} sedang berpikir...",
 	"{{user}}'s Chats": "Obrolan {{user}}",
 	"{{webUIName}} Backend Required": "{{webUIName}} Diperlukan Backend",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Lampirkan file",
 	"Attention to detail": "Perhatian terhadap detail",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "Pengaturan audio berhasil diperbarui",
 	"August": "Agustus",
 	"Auto-playback response": "Respons pemutaran otomatis",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Panjang Konteks",
 	"Continue Response": "Lanjutkan Tanggapan",
 	"Continue with {{provider}}": "Lanjutkan dengan {{penyedia}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Menyalin URL obrolan bersama ke papan klip!",
+	"Copied to clipboard": "",
 	"Copy": "Menyalin",
 	"Copy Code": "",
 	"Copy last code block": "Salin blok kode terakhir",
@@ -285,6 +285,7 @@
 	"File": "Berkas",
 	"File Mode": "Mode File",
 	"File not found.": "File tidak ditemukan.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "Filter sekarang dinonaktifkan secara global",
 	"Filter is now globally enabled": "Filter sekarang diaktifkan secara global",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Terakhir Aktif",
 	"Last Modified": "Terakhir Dimodifikasi",
+	"Leave empty for unlimited": "",
 	"Light": "Cahaya",
 	"Listening...": "Mendengarkan",
 	"LLMs can make mistakes. Verify important information.": "LLM dapat membuat kesalahan. Verifikasi informasi penting.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Mengelola Saluran Pipa",
 	"March": "Maret",
 	"Max Tokens (num_predict)": "Token Maksimal (num_prediksi)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maksimal 3 model dapat diunduh secara bersamaan. Silakan coba lagi nanti.",
 	"May": "Mei",
 	"Memories accessible by LLMs will be shown here.": "Memori yang dapat diakses oleh LLM akan ditampilkan di sini.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Setel Ulang Penyimpanan Vektor",
 	"Response AutoCopy to Clipboard": "Tanggapan Salin Otomatis ke Papan Klip",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Notifikasi respons tidak dapat diaktifkan karena izin situs web telah ditolak. Silakan kunjungi pengaturan browser Anda untuk memberikan akses yang diperlukan.",
+	"Response splitting": "",
 	"Role": "Peran",
 	"Rosé Pine": "Pinus Rosé",
 	"Rosé Pine Dawn": "Rosé Pine Fajar",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Mencari {{count}} situs_satu",
 	"Searched {{count}} sites_other": "Mencari {{count}} situs_lain",
 	"Searching \"{{searchQuery}}\"": "Mencari \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL Kueri Pencarian Searxng",
 	"See readme.md for instructions": "Lihat readme.md untuk instruksi",
 	"See what's new": "Lihat apa yang baru",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Terima kasih atas umpan balik Anda!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Nilai yang diberikan haruslah nilai antara 0,0 (0%) dan 1,0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "Berpikir",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Tulis ringkasan dalam 50 kata yang merangkum [topik atau kata kunci].",
 	"Yesterday": "Kemarin",
 	"You": "Anda",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Anda dapat mempersonalisasi interaksi Anda dengan LLM dengan menambahkan kenangan melalui tombol 'Kelola' di bawah ini, sehingga lebih bermanfaat dan disesuaikan untuk Anda.",
 	"You cannot clone a base model": "Anda tidak dapat mengkloning model dasar",
 	"You have no archived conversations.": "Anda tidak memiliki percakapan yang diarsipkan.",

+ 11 - 2
src/lib/i18n/locales/it-IT/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(ultima)",
 	"{{ models }}": "{{ modelli }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: non è possibile eliminare un modello di base",
-	"{{modelName}} is thinking...": "{{modelName}} sta pensando...",
 	"{{user}}'s Chats": "{{user}} Chat",
 	"{{webUIName}} Backend Required": "{{webUIName}} Backend richiesto",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Allega file",
 	"Attention to detail": "Attenzione ai dettagli",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "",
 	"August": "Agosto",
 	"Auto-playback response": "Riproduzione automatica della risposta",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Lunghezza contesto",
 	"Continue Response": "Continua risposta",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL della chat condivisa copiato negli appunti!",
+	"Copied to clipboard": "",
 	"Copy": "Copia",
 	"Copy Code": "",
 	"Copy last code block": "Copia ultimo blocco di codice",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Modalità file",
 	"File not found.": "File non trovato.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Ultima attività",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Chiaro",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "Gli LLM possono commettere errori. Verifica le informazioni importanti.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Gestire le pipeline",
 	"March": "Marzo",
 	"Max Tokens (num_predict)": "Numero massimo di gettoni (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "È possibile scaricare un massimo di 3 modelli contemporaneamente. Riprova più tardi.",
 	"May": "Maggio",
 	"Memories accessible by LLMs will be shown here.": "I memori accessibili ai LLM saranno mostrati qui.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Reimposta archivio vettoriale",
 	"Response AutoCopy to Clipboard": "Copia automatica della risposta negli appunti",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Ruolo",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_many": "Ricercato {{count}} sites_many",
 	"Searched {{count}} sites_other": "Ricercato {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng Query URL",
 	"See readme.md for instructions": "Vedi readme.md per le istruzioni",
 	"See what's new": "Guarda le novità",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Grazie per il tuo feedback!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Il punteggio dovrebbe essere un valore compreso tra 0.0 (0%) e 1.0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Scrivi un riassunto in 50 parole che riassume [argomento o parola chiave].",
 	"Yesterday": "Ieri",
 	"You": "Tu",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "Non è possibile clonare un modello di base",
 	"You have no archived conversations.": "Non hai conversazioni archiviate.",

+ 11 - 2
src/lib/i18n/locales/ja-JP/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(最新)",
 	"{{ models }}": "{{ モデル }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: ベースモデルは削除できません",
-	"{{modelName}} is thinking...": "{{modelName}} は思考中です...",
 	"{{user}}'s Chats": "{{user}} のチャット",
 	"{{webUIName}} Backend Required": "{{webUIName}} バックエンドが必要です",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "ファイルを添付する",
 	"Attention to detail": "詳細に注意する",
 	"Audio": "オーディオ",
-	"Audio settings updated successfully": "",
 	"August": "8月",
 	"Auto-playback response": "応答の自動再生",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "コンテキストの長さ",
 	"Continue Response": "続きの応答",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "共有チャットURLをクリップボードにコピーしました!",
+	"Copied to clipboard": "",
 	"Copy": "コピー",
 	"Copy Code": "",
 	"Copy last code block": "最後のコードブロックをコピー",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "ファイルモード",
 	"File not found.": "ファイルが見つかりません。",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "最終アクティブ",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "ライト",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "LLM は間違いを犯す可能性があります。重要な情報を検証してください。",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "パイプラインの管理",
 	"March": "3月",
 	"Max Tokens (num_predict)": "最大トークン数 (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "同時にダウンロードできるモデルは最大 3 つです。後でもう一度お試しください。",
 	"May": "5月",
 	"Memories accessible by LLMs will be shown here.": "LLM がアクセスできるメモリはここに表示されます。",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "ベクトルストレージをリセット",
 	"Response AutoCopy to Clipboard": "クリップボードへの応答の自動コピー",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "役割",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -534,6 +539,7 @@
 	"Search Tools": "",
 	"Searched {{count}} sites_other": "{{count}} sites_other検索",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng クエリ URL",
 	"See readme.md for instructions": "手順については readme.md を参照してください",
 	"See what's new": "新機能を見る",
@@ -610,6 +616,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "ご意見ありがとうございます!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "スコアは0.0(0%)から1.0(100%)の間の値にしてください。",
 	"Theme": "テーマ",
 	"Thinking...": "",
@@ -709,6 +717,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "[トピックまたはキーワード] を要約する 50 語の概要を書いてください。",
 	"Yesterday": "昨日",
 	"You": "あなた",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "基本モデルのクローンを作成できない",
 	"You have no archived conversations.": "これまでにアーカイブされた会話はありません。",

+ 11 - 2
src/lib/i18n/locales/ka-GE/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(უახლესი)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: თქვენ არ შეგიძლიათ წაშალოთ ბაზის მოდელი",
-	"{{modelName}} is thinking...": "{{modelName}} ფიქრობს...",
 	"{{user}}'s Chats": "{{user}}-ის ჩათები",
 	"{{webUIName}} Backend Required": "{{webUIName}} საჭიროა ბექენდი",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "ფაილის ჩაწერა",
 	"Attention to detail": "დეტალური მიმართვა",
 	"Audio": "ხმოვანი",
-	"Audio settings updated successfully": "",
 	"August": "აგვისტო",
 	"Auto-playback response": "ავტომატური დაკვრის პასუხი",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "კონტექსტის სიგრძე",
 	"Continue Response": "პასუხის გაგრძელება",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "ყავს ჩათის URL-ი კლიპბორდში!",
+	"Copied to clipboard": "",
 	"Copy": "კოპირება",
 	"Copy Code": "",
 	"Copy last code block": "ბოლო ბლოკის კოპირება",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "ფაილური რეჟიმი",
 	"File not found.": "ფაილი ვერ მოიძებნა",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "ბოლო აქტიური",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "მსუბუქი",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "შესაძლოა LLM-ებმა შეცდომები დაუშვან. გადაამოწმეთ მნიშვნელოვანი ინფორმაცია.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "მილსადენების მართვა",
 	"March": "მარტივი",
 	"Max Tokens (num_predict)": "მაქს ტოკენსი (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "მაქსიმუმ 3 მოდელის ჩამოტვირთვა შესაძლებელია ერთდროულად. Გთხოვთ სცადოთ მოგვიანებით.",
 	"May": "მაი",
 	"Memories accessible by LLMs will be shown here.": "ლლმ-ს აქვს ხელმისაწვდომი მემორიები აქ იქნება.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "ვექტორული მეხსიერების გადატვირთვა",
 	"Response AutoCopy to Clipboard": "პასუხის ავტომატური კოპირება ბუფერში",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "როლი",
 	"Rosé Pine": "ვარდისფერი ფიჭვის ხე",
 	"Rosé Pine Dawn": "ვარდისფერი ფიჭვის გარიჟრაჟი",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Searched {{count}} sites_one",
 	"Searched {{count}} sites_other": "Searched {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng Query URL",
 	"See readme.md for instructions": "იხილეთ readme.md ინსტრუქციებისთვის",
 	"See what's new": "სიახლეების ნახვა",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "მადლობა გამოხმაურებისთვის!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "ქულა 0.0 (0%) და 1.0 (100%) ჩაშენებული უნდა იყოს.",
 	"Theme": "თემა",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "დაწერეთ რეზიუმე 50 სიტყვით, რომელიც აჯამებს [თემას ან საკვანძო სიტყვას].",
 	"Yesterday": "აღდგენა",
 	"You": "ჩემი",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "თქვენ არ შეგიძლიათ ბაზის მოდელის კლონირება",
 	"You have no archived conversations.": "არ ხართ არქივირებული განხილვები.",

+ 11 - 2
src/lib/i18n/locales/ko-KR/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(latest)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: 기본 모델은 삭제할 수 없습니다.",
-	"{{modelName}} is thinking...": "{{modelName}} 모델이 생각 중입니다....",
 	"{{user}}'s Chats": "{{user}}의 채팅",
 	"{{webUIName}} Backend Required": "{{webUIName}} 백엔드가 필요합니다.",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "파일 첨부",
 	"Attention to detail": "세부 사항에 대한 주의",
 	"Audio": "오디오",
-	"Audio settings updated successfully": "",
 	"August": "8월",
 	"Auto-playback response": "응답 자동 재생",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "내용 길이",
 	"Continue Response": "대화 계속",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "공유 채팅 URL이 클립보드에 복사되었습니다!",
+	"Copied to clipboard": "",
 	"Copy": "복사",
 	"Copy Code": "",
 	"Copy last code block": "마지막 코드 블록 복사",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "파일 모드",
 	"File not found.": "파일을 찾을 수 없습니다.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "최근 활동",
 	"Last Modified": "마지막 수정",
+	"Leave empty for unlimited": "",
 	"Light": "Light",
 	"Listening...": "듣는 중...",
 	"LLMs can make mistakes. Verify important information.": "LLM은 실수를 할 수 있습니다. 중요한 정보는 확인이 필요합니다.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "파이프라인 관리",
 	"March": "3월",
 	"Max Tokens (num_predict)": "최대 토큰(num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "최대 3개의 모델을 동시에 다운로드할 수 있습니다. 나중에 다시 시도하세요.",
 	"May": "5월",
 	"Memories accessible by LLMs will be shown here.": "LLM에서 액세스할 수 있는 메모리는 여기에 표시됩니다.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "벡터 스토리지 초기화",
 	"Response AutoCopy to Clipboard": "응답을 클립보드에 자동 복사",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "역할",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "sites_one {{count}} 검색됨",
 	"Searched {{count}} sites_other": "sites_other {{count}} 검색됨",
 	"Searching \"{{searchQuery}}\"": "\"{{searchQuery}}\" 검색 중",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng 쿼리 URL",
 	"See readme.md for instructions": "설명은 readme.md를 참조하세요.",
 	"See what's new": "새로운 기능 보기",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "피드백 감사합니다!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "점수는 0.0(0%)에서 1.0(100%) 사이의 값이어야 합니다.",
 	"Theme": "테마",
 	"Thinking...": "생각 중...",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "[주제 또는 키워드]에 대한 50단어 요약문 작성.",
 	"Yesterday": "어제",
 	"You": "당신",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "아래 '관리' 버튼으로 메모리를 추가하여 LLM들과의 상호작용을 개인화할 수 있습니다. 이를 통해 더 유용하고 맞춤화된 경험을 제공합니다.",
 	"You cannot clone a base model": "기본 모델은 복제할 수 없습니다",
 	"You have no archived conversations.": "채팅을 아카이브한 적이 없습니다.",

+ 11 - 2
src/lib/i18n/locales/lt-LT/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(naujausias)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Negalite ištrinti bazinio modelio",
-	"{{modelName}} is thinking...": "{{modelName}} mąsto...",
 	"{{user}}'s Chats": "{{user}} susirašinėjimai",
 	"{{webUIName}} Backend Required": "{{webUIName}} būtinas serveris",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Pridėti failą",
 	"Attention to detail": "Dėmesys detalėms",
 	"Audio": "Audio įrašas",
-	"Audio settings updated successfully": "Audio nustatymai sėkmingai išsaugoti",
 	"August": "Rugpjūtis",
 	"Auto-playback response": "Automatinis atsakymo skaitymas",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Konteksto ilgis",
 	"Continue Response": "Tęsti atsakymą",
 	"Continue with {{provider}}": "Tęstti su {{tiekėju}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "Valdymas",
 	"Copied": "Nukopijuota",
 	"Copied shared chat URL to clipboard!": "Nukopijavote pokalbio nuorodą",
+	"Copied to clipboard": "",
 	"Copy": "Kopijuoti",
 	"Copy Code": "Kopijuoti kodą",
 	"Copy last code block": "Kopijuoti paskutinį kodo bloką",
@@ -285,6 +285,7 @@
 	"File": "Rinkmena",
 	"File Mode": "Rinkmenų rėžimas",
 	"File not found.": "Failas nerastas.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Rinkmenos",
 	"Filter is now globally disabled": "Filtrai nėra leidžiami globaliai",
 	"Filter is now globally enabled": "Filtrai globaliai leidžiami",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "dideli kalbos modeliai, lokaliai",
 	"Last Active": "Paskutinį kartą aktyvus",
 	"Last Modified": "Paskutinis pakeitimas",
+	"Leave empty for unlimited": "",
 	"Light": "Šviesus",
 	"Listening...": "Klausoma...",
 	"LLMs can make mistakes. Verify important information.": "Dideli kalbos modeliai gali klysti. Patikrinkite atsakymų teisingumą.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Tvarkyti procesus",
 	"March": "Kovas",
 	"Max Tokens (num_predict)": "Maksimalus žetonų kiekis (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Daugiausiai trys modeliai gali būti parsisiunčiami vienu metu.",
 	"May": "gegužė",
 	"Memories accessible by LLMs will be shown here.": "Atminitis prieinama kalbos modelio bus rodoma čia.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Reinicializuoti vektorių atmintį",
 	"Response AutoCopy to Clipboard": "Automatiškai nukopijuoti atsakymą",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Naršyklė neleidžia siųsti pranešimų",
+	"Response splitting": "",
 	"Role": "Rolė",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -537,6 +542,7 @@
 	"Searched {{count}} sites_many": "Ieškota {{count}} sites_many",
 	"Searched {{count}} sites_other": "Ieškota {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "Ieškoma \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng užklausos URL",
 	"See readme.md for instructions": "Žiūrėti readme.md papildomoms instrukcijoms",
 	"See what's new": "Žiūrėti naujoves",
@@ -613,6 +619,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Ačiū už atsiliepimus",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Šis modulis kuriamas savanorių. Palaikykite jų darbus finansiškai arba prisidėdami kodu.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Rezultatas turėtų būti tarp 0.0 (0%) ir 1.0 (100%)",
 	"Theme": "Tema",
 	"Thinking...": "Mąsto...",
@@ -712,6 +720,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Parašyk santrumpą trumpesnę nei 50 žodžių šiam tekstui: [tekstas]",
 	"Yesterday": "Vakar",
 	"You": "Jūs",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Galite pagerinti modelių darbą suteikdami jiems atminties funkcionalumą.",
 	"You cannot clone a base model": "Negalite klonuoti bazinio modelio",
 	"You have no archived conversations.": "Jūs neturite archyvuotų pokalbių",

+ 11 - 2
src/lib/i18n/locales/ms-MY/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(terkini)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Anda tidak boleh memadamkan model asas",
-	"{{modelName}} is thinking...": "{{modelName}} sedang berfikir...",
 	"{{user}}'s Chats": "Perbualan {{user}}",
 	"{{webUIName}} Backend Required": "{{webUIName}} Backend diperlukan",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Kepilkan Fail",
 	"Attention to detail": "Perincian",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "Tetapan audio berjaya dikemas kini",
 	"August": "Ogos",
 	"Auto-playback response": "Main semula respons secara automatik",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Panjang Konteks",
 	"Continue Response": "Teruskan Respons",
 	"Continue with {{provider}}": "Teruskan dengan {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "Kawalan",
 	"Copied": "Disalin",
 	"Copied shared chat URL to clipboard!": "Menyalin URL sembang kongsi ke papan klip",
+	"Copied to clipboard": "",
 	"Copy": "Salin",
 	"Copy Code": "Salin Kod",
 	"Copy last code block": "Salin Blok Kod Terakhir",
@@ -285,6 +285,7 @@
 	"File": "Fail",
 	"File Mode": "Mod Fail",
 	"File not found.": "Fail tidak dijumpai",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Fail-Fail",
 	"Filter is now globally disabled": "Tapisan kini dilumpuhkan secara global",
 	"Filter is now globally enabled": "Tapisan kini dibenarkan secara global",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "model bahasa besar, tempatan.",
 	"Last Active": "Dilihat aktif terakhir pada",
 	"Last Modified": "Kemaskini terakhir pada",
+	"Leave empty for unlimited": "",
 	"Light": "Cerah",
 	"Listening...": "Mendengar...",
 	"LLMs can make mistakes. Verify important information.": "LLM boleh membuat kesilapan. Sahkan maklumat penting",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Urus 'Pipelines'",
 	"March": "Mac",
 	"Max Tokens (num_predict)": "Token Maksimum ( num_predict )",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maksimum 3 model boleh dimuat turun serentak. Sila cuba sebentar lagi.",
 	"May": "Mei",
 	"Memories accessible by LLMs will be shown here.": "Memori yang boleh diakses oleh LLM akan ditunjukkan di sini.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Tetapkan Semula Storan Vektor",
 	"Response AutoCopy to Clipboard": "Salin Response secara Automatik ke Papan Klip",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Pemberitahuan respons tidak boleh diaktifkan kerana kebenaran tapak web tidak diberi. Sila lawati tetapan pelayar web anda untuk memberikan akses yang diperlukan.",
+	"Response splitting": "",
 	"Role": "Peranan",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Mencari {{count}} sites_one",
 	"Searched {{count}} sites_other": "Mencari {{count}} tapak_lain",
 	"Searching \"{{searchQuery}}\"": "encari \"{{ searchQuery }}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL Pertanyaan Searxng",
 	"See readme.md for instructions": "Lihat readme.md untuk arahan",
 	"See what's new": "Lihat apa yang terbaru",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Terima kasih atas maklum balas anda!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Pembangun di sebalik 'plugin' ini adalah sukarelawan yang bersemangat daripada komuniti. Jika anda mendapati 'plugin' ini membantu, sila pertimbangkan untuk menyumbang kepada pembangunannya.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Skor hendaklah berada diantara 0.0 (0%) dan 1.0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "Berfikir...",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Tulis ringkasan dalam 50 patah perkataan yang meringkaskan [topik atau kata kunci].",
 	"Yesterday": "Semalam",
 	"You": "Anda",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Anda boleh memperibadikan interaksi anda dengan LLM dengan menambahkan memori melalui butang 'Urus' di bawah, menjadikannya lebih membantu dan disesuaikan dengan anda.",
 	"You cannot clone a base model": "Anda tidak boleh mengklon model asas",
 	"You have no archived conversations.": "Anda tidak mempunyai perbualan yang diarkibkan",

+ 11 - 2
src/lib/i18n/locales/nb-NO/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(siste)",
 	"{{ models }}": "{{ modeller }}",
 	"{{ owner }}: You cannot delete a base model": "{{ eier }}: Du kan ikke slette en grunnmodell",
-	"{{modelName}} is thinking...": "{{modelName}} tenker...",
 	"{{user}}'s Chats": "{{user}}s samtaler",
 	"{{webUIName}} Backend Required": "{{webUIName}} Backend kreves",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Legg ved fil",
 	"Attention to detail": "Sans for detaljer",
 	"Audio": "Lyd",
-	"Audio settings updated successfully": "Lydinnstillingene ble oppdatert",
 	"August": "august",
 	"Auto-playback response": "Automatisk avspilling av svar",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Kontekstlengde",
 	"Continue Response": "Fortsett svar",
 	"Continue with {{provider}}": "Fortsett med {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "Kontroller",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Kopiert delt chat-URL til utklippstavlen!",
+	"Copied to clipboard": "",
 	"Copy": "Kopier",
 	"Copy Code": "",
 	"Copy last code block": "Kopier siste kodeblokk",
@@ -285,6 +285,7 @@
 	"File": "Fil",
 	"File Mode": "Filmodus",
 	"File not found.": "Fil ikke funnet.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Filer",
 	"Filter is now globally disabled": "Filteret er nå deaktivert på systemnivå",
 	"Filter is now globally enabled": "Filteret er nå aktivert på systemnivå",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "Store språkmodeller, lokalt.",
 	"Last Active": "Sist aktiv",
 	"Last Modified": "Sist endret",
+	"Leave empty for unlimited": "",
 	"Light": "Lys",
 	"Listening...": "Lytter ...",
 	"LLMs can make mistakes. Verify important information.": "Språkmodeller kan gjøre feil. Verifiser viktige opplysninger.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Administrer pipelines",
 	"March": "mars",
 	"Max Tokens (num_predict)": "Maks antall tokens (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maksimalt 3 modeller kan lastes ned samtidig. Vennligst prøv igjen senere.",
 	"May": "mai",
 	"Memories accessible by LLMs will be shown here.": "Minner tilgjengelige for språkmodeller vil vises her.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Tilbakestill vektorlagring",
 	"Response AutoCopy to Clipboard": "Respons auto-kopi til utklippstavle",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Respons-varsler kan ikke aktiveres da nettstedsrettighetene er nektet. Vennligst se nettleserinnstillingene dine for å gi nødvendig tilgang.",
+	"Response splitting": "",
 	"Role": "Rolle",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Søkte på {{count}} side",
 	"Searched {{count}} sites_other": "Søkte på {{count}} sider",
 	"Searching \"{{searchQuery}}\"": "Søker etter \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng forespørsels-URL",
 	"See readme.md for instructions": "Se readme.md for instruksjoner",
 	"See what's new": "Se hva som er nytt",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Takk for tilbakemeldingen!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Utviklerne bak denne utvidelsen er lidenskapelige frivillige fra fellesskapet. Hvis du finner denne utvidelsen nyttig, vennligst vurder å bidra til utviklingen.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Poengsummen skal være en verdi mellom 0,0 (0%) og 1,0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "Tenker ...",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Skriv et sammendrag på 50 ord som oppsummerer [emne eller nøkkelord].",
 	"Yesterday": "I går",
 	"You": "Du",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan tilpasse interaksjonene dine med språkmodeller ved å legge til minner gjennom 'Administrer'-knappen nedenfor, slik at de blir mer hjelpsomme og tilpasset deg.",
 	"You cannot clone a base model": "Du kan ikke klone en grunnmodell",
 	"You have no archived conversations.": "Du har ingen arkiverte samtaler.",

+ 11 - 2
src/lib/i18n/locales/nl-NL/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(nieuwste)",
 	"{{ models }}": "{{ modellen }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: U kunt een basismodel niet verwijderen",
-	"{{modelName}} is thinking...": "{{modelName}} is aan het denken...",
 	"{{user}}'s Chats": "{{user}}'s Chats",
 	"{{webUIName}} Backend Required": "{{webUIName}} Backend Verlpicht",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Voeg een bestand toe",
 	"Attention to detail": "Attention to detail",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "",
 	"August": "Augustus",
 	"Auto-playback response": "Automatisch afspelen van antwoord",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Context Lengte",
 	"Continue Response": "Doorgaan met Antwoord",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL van gedeelde gesprekspagina gekopieerd naar klembord!",
+	"Copied to clipboard": "",
 	"Copy": "Kopieer",
 	"Copy Code": "",
 	"Copy last code block": "Kopieer laatste code blok",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Bestandsmodus",
 	"File not found.": "Bestand niet gevonden.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Laatst Actief",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Licht",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "LLMs kunnen fouten maken. Verifieer belangrijke informatie.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Pijplijnen beheren",
 	"March": "Maart",
 	"Max Tokens (num_predict)": "Max Tokens (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maximaal 3 modellen kunnen tegelijkertijd worden gedownload. Probeer het later opnieuw.",
 	"May": "Mei",
 	"Memories accessible by LLMs will be shown here.": "Geheugen toegankelijk voor LLMs wordt hier getoond.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Reset Vector Opslag",
 	"Response AutoCopy to Clipboard": "Antwoord Automatisch Kopiëren naar Klembord",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Rol",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Gezocht op {{count}} sites_one",
 	"Searched {{count}} sites_other": "Gezocht op {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng Query URL",
 	"See readme.md for instructions": "Zie readme.md voor instructies",
 	"See what's new": "Zie wat er nieuw is",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Bedankt voor uw feedback!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Het score moet een waarde zijn tussen 0.0 (0%) en 1.0 (100%).",
 	"Theme": "Thema",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Schrijf een samenvatting in 50 woorden die [onderwerp of trefwoord] samenvat.",
 	"Yesterday": "gisteren",
 	"You": "U",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "U kunt een basismodel niet klonen",
 	"You have no archived conversations.": "U heeft geen gearchiveerde gesprekken.",

+ 11 - 2
src/lib/i18n/locales/pa-IN/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(ਤਾਜ਼ਾ)",
 	"{{ models }}": "{{ ਮਾਡਲ }}",
 	"{{ owner }}: You cannot delete a base model": "{{ ਮਾਲਕ }}: ਤੁਸੀਂ ਬੇਸ ਮਾਡਲ ਨੂੰ ਮਿਟਾ ਨਹੀਂ ਸਕਦੇ",
-	"{{modelName}} is thinking...": "{{modelName}} ਸੋਚ ਰਿਹਾ ਹੈ...",
 	"{{user}}'s Chats": "{{user}} ਦੀਆਂ ਗੱਲਾਂ",
 	"{{webUIName}} Backend Required": "{{webUIName}} ਬੈਕਐਂਡ ਲੋੜੀਂਦਾ ਹੈ",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "ਫਾਈਲ ਜੋੜੋ",
 	"Attention to detail": "ਵੇਰਵੇ 'ਤੇ ਧਿਆਨ",
 	"Audio": "ਆਡੀਓ",
-	"Audio settings updated successfully": "",
 	"August": "ਅਗਸਤ",
 	"Auto-playback response": "ਆਟੋ-ਪਲੇਬੈਕ ਜਵਾਬ",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "ਸੰਦਰਭ ਲੰਬਾਈ",
 	"Continue Response": "ਜਵਾਬ ਜਾਰੀ ਰੱਖੋ",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "ਸਾਂਝੇ ਕੀਤੇ ਗੱਲਬਾਤ URL ਨੂੰ ਕਲਿੱਪਬੋਰਡ 'ਤੇ ਕਾਪੀ ਕਰ ਦਿੱਤਾ!",
+	"Copied to clipboard": "",
 	"Copy": "ਕਾਪੀ ਕਰੋ",
 	"Copy Code": "",
 	"Copy last code block": "ਆਖਰੀ ਕੋਡ ਬਲਾਕ ਨੂੰ ਕਾਪੀ ਕਰੋ",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "ਫਾਈਲ ਮੋਡ",
 	"File not found.": "ਫਾਈਲ ਨਹੀਂ ਮਿਲੀ।",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "ਆਖਰੀ ਸਰਗਰਮ",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "ਹਲਕਾ",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "LLMs ਗਲਤੀਆਂ ਕਰ ਸਕਦੇ ਹਨ। ਮਹੱਤਵਪੂਰਨ ਜਾਣਕਾਰੀ ਦੀ ਪੁਸ਼ਟੀ ਕਰੋ।",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "ਪਾਈਪਲਾਈਨਾਂ ਦਾ ਪ੍ਰਬੰਧਨ ਕਰੋ",
 	"March": "ਮਾਰਚ",
 	"Max Tokens (num_predict)": "ਮੈਕਸ ਟੋਕਨ (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "ਇੱਕ ਸਮੇਂ ਵਿੱਚ ਵੱਧ ਤੋਂ ਵੱਧ 3 ਮਾਡਲ ਡਾਊਨਲੋਡ ਕੀਤੇ ਜਾ ਸਕਦੇ ਹਨ। ਕਿਰਪਾ ਕਰਕੇ ਬਾਅਦ ਵਿੱਚ ਦੁਬਾਰਾ ਕੋਸ਼ਿਸ਼ ਕਰੋ।",
 	"May": "ਮਈ",
 	"Memories accessible by LLMs will be shown here.": "LLMs ਲਈ ਸਮਰੱਥ ਕਾਰਨ ਇੱਕ ਸੂਚਨਾ ਨੂੰ ਸ਼ਾਮਲ ਕੀਤਾ ਗਿਆ ਹੈ।",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "ਵੈਕਟਰ ਸਟੋਰੇਜ ਨੂੰ ਰੀਸੈਟ ਕਰੋ",
 	"Response AutoCopy to Clipboard": "ਜਵਾਬ ਆਟੋ ਕਾਪੀ ਕਲਿੱਪਬੋਰਡ 'ਤੇ",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "ਭੂਮਿਕਾ",
 	"Rosé Pine": "ਰੋਜ਼ ਪਾਈਨ",
 	"Rosé Pine Dawn": "ਰੋਜ਼ ਪਾਈਨ ਡਾਨ",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "ਖੋਜਿਆ {{count}} sites_one",
 	"Searched {{count}} sites_other": "ਖੋਜਿਆ {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng Query URL",
 	"See readme.md for instructions": "ਹਦਾਇਤਾਂ ਲਈ readme.md ਵੇਖੋ",
 	"See what's new": "ਨਵਾਂ ਕੀ ਹੈ ਵੇਖੋ",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "ਤੁਹਾਡੇ ਫੀਡਬੈਕ ਲਈ ਧੰਨਵਾਦ!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "ਸਕੋਰ 0.0 (0%) ਅਤੇ 1.0 (100%) ਦੇ ਵਿਚਕਾਰ ਹੋਣਾ ਚਾਹੀਦਾ ਹੈ।",
 	"Theme": "ਥੀਮ",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "50 ਸ਼ਬਦਾਂ ਵਿੱਚ ਇੱਕ ਸੰਖੇਪ ਲਿਖੋ ਜੋ [ਵਿਸ਼ਾ ਜਾਂ ਕੁੰਜੀ ਸ਼ਬਦ] ਨੂੰ ਸੰਖੇਪ ਕਰਦਾ ਹੈ।",
 	"Yesterday": "ਕੱਲ੍ਹ",
 	"You": "ਤੁਸੀਂ",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "ਤੁਸੀਂ ਆਧਾਰ ਮਾਡਲ ਨੂੰ ਕਲੋਨ ਨਹੀਂ ਕਰ ਸਕਦੇ",
 	"You have no archived conversations.": "ਤੁਹਾਡੇ ਕੋਲ ਕੋਈ ਆਰਕਾਈਵ ਕੀਤੀਆਂ ਗੱਲਾਂ ਨਹੀਂ ਹਨ।",

+ 11 - 2
src/lib/i18n/locales/pl-PL/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(najnowszy)",
 	"{{ models }}": "{{ modele }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Nie można usunąć modelu podstawowego",
-	"{{modelName}} is thinking...": "{{modelName}} myśli...",
 	"{{user}}'s Chats": "{{user}} - czaty",
 	"{{webUIName}} Backend Required": "Backend {{webUIName}} wymagane",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Dołącz plik",
 	"Attention to detail": "Dbałość o szczegóły",
 	"Audio": "Dźwięk",
-	"Audio settings updated successfully": "",
 	"August": "Sierpień",
 	"Auto-playback response": "Odtwarzanie automatyczne odpowiedzi",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Długość kontekstu",
 	"Continue Response": "Kontynuuj odpowiedź",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Skopiowano URL czatu do schowka!",
+	"Copied to clipboard": "",
 	"Copy": "Kopiuj",
 	"Copy Code": "",
 	"Copy last code block": "Skopiuj ostatni blok kodu",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Tryb pliku",
 	"File not found.": "Plik nie został znaleziony.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Ostatnio aktywny",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Jasny",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "LLMy mogą popełniać błędy. Zweryfikuj ważne informacje.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Zarządzanie potokami",
 	"March": "Marzec",
 	"Max Tokens (num_predict)": "Maksymalna liczba żetonów (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maksymalnie 3 modele można pobierać jednocześnie. Spróbuj ponownie później.",
 	"May": "Maj",
 	"Memories accessible by LLMs will be shown here.": "Pamięci używane przez LLM będą tutaj widoczne.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Resetuj przechowywanie wektorów",
 	"Response AutoCopy to Clipboard": "Automatyczne kopiowanie odpowiedzi do schowka",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Rola",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -537,6 +542,7 @@
 	"Searched {{count}} sites_many": "Wyszukiwano {{count}} sites_many",
 	"Searched {{count}} sites_other": "Wyszukiwano {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Adres URL zapytania Searxng",
 	"See readme.md for instructions": "Zajrzyj do readme.md po instrukcje",
 	"See what's new": "Zobacz co nowego",
@@ -613,6 +619,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Dzięki za informację zwrotną!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Wynik powinien być wartością pomiędzy 0.0 (0%) a 1.0 (100%).",
 	"Theme": "Motyw",
 	"Thinking...": "",
@@ -712,6 +720,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Napisz podsumowanie w 50 słowach, które podsumowuje [temat lub słowo kluczowe].",
 	"Yesterday": "Wczoraj",
 	"You": "Ty",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "Nie można sklonować modelu podstawowego",
 	"You have no archived conversations.": "Nie masz zarchiwizowanych rozmów.",

+ 11 - 2
src/lib/i18n/locales/pt-BR/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(último)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Você não pode deletar um modelo base",
-	"{{modelName}} is thinking...": "{{modelName}} está pensando...",
 	"{{user}}'s Chats": "Chats de {{user}}",
 	"{{webUIName}} Backend Required": "Backend {{webUIName}} necessário",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Anexar arquivo",
 	"Attention to detail": "Atenção aos detalhes",
 	"Audio": "Áudio",
-	"Audio settings updated successfully": "Configurações de áudio atualizadas com sucesso",
 	"August": "Agosto",
 	"Auto-playback response": "Resposta de reprodução automática",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Context Length",
 	"Continue Response": "Continuar Resposta",
 	"Continue with {{provider}}": "Continuar com {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "Controles",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL de chat compartilhado copiado para a área de transferência!",
+	"Copied to clipboard": "",
 	"Copy": "Copiar",
 	"Copy Code": "",
 	"Copy last code block": "Copiar último bloco de código",
@@ -285,6 +285,7 @@
 	"File": "Arquivo",
 	"File Mode": "Modo de Arquivo",
 	"File not found.": "Arquivo não encontrado.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Arquivos",
 	"Filter is now globally disabled": "O filtro está agora desativado globalmente",
 	"Filter is now globally enabled": "O filtro está agora ativado globalmente",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "grandes modelos de linguagem, localmente.",
 	"Last Active": "Última Atividade",
 	"Last Modified": "Última Modificação",
+	"Leave empty for unlimited": "",
 	"Light": "Claro",
 	"Listening...": "Escutando...",
 	"LLMs can make mistakes. Verify important information.": "LLMs podem cometer erros. Verifique informações importantes.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Gerenciar Pipelines",
 	"March": "Março",
 	"Max Tokens (num_predict)": "Máximo de Tokens (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Máximo de 3 modelos podem ser baixados simultaneamente. Por favor, tente novamente mais tarde.",
 	"May": "Maio",
 	"Memories accessible by LLMs will be shown here.": "Memórias acessíveis por LLMs serão mostradas aqui.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Redefinir Armazenamento de Vetores",
 	"Response AutoCopy to Clipboard": "Cópia Automática da Resposta para a Área de Transferência",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Notificações de resposta não podem ser ativadas pois as permissões do site foram negadas. Por favor, visite as configurações do seu navegador para conceder o acesso necessário.",
+	"Response splitting": "",
 	"Role": "Função",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_many": "Pesquisou {{count}} sites_many",
 	"Searched {{count}} sites_other": "Pesquisou {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "Pesquisando \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL da Consulta Searxng",
 	"See readme.md for instructions": "Veja readme.md para instruções",
 	"See what's new": "Veja o que há de novo",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Obrigado pelo seu feedback!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Os desenvolvedores por trás deste plugin são voluntários apaixonados da comunidade. Se você achar este plugin útil, considere contribuir para o seu desenvolvimento.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "A pontuação deve ser um valor entre 0.0 (0%) e 1.0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "Pensando...",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Escreva um resumo em 50 palavras que resuma [tópico ou palavra-chave].",
 	"Yesterday": "Ontem",
 	"You": "Você",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Você pode personalizar suas interações com LLMs adicionando memórias através do botão 'Gerenciar' abaixo, tornando-as mais úteis e adaptadas a você.",
 	"You cannot clone a base model": "Você não pode clonar um modelo base",
 	"You have no archived conversations.": "Você não tem conversas arquivadas.",

+ 11 - 2
src/lib/i18n/locales/pt-PT/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(mais recente)",
 	"{{ models }}": "{{ modelos }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Não é possível excluir um modelo base",
-	"{{modelName}} is thinking...": "{{modelName}} está a pensar...",
 	"{{user}}'s Chats": "{{user}}'s Chats",
 	"{{webUIName}} Backend Required": "{{webUIName}} Backend Necessário",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Anexar ficheiro",
 	"Attention to detail": "Detalhado",
 	"Audio": "Áudio",
-	"Audio settings updated successfully": "",
 	"August": "Agosto",
 	"Auto-playback response": "Reprodução automática da resposta",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Comprimento do Contexto",
 	"Continue Response": "Continuar resposta",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL de Conversa partilhado copiada com sucesso!",
+	"Copied to clipboard": "",
 	"Copy": "Copiar",
 	"Copy Code": "",
 	"Copy last code block": "Copiar último bloco de código",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Modo de Ficheiro",
 	"File not found.": "Ficheiro não encontrado.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Último Ativo",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Claro",
 	"Listening...": "A escutar...",
 	"LLMs can make mistakes. Verify important information.": "LLMs podem cometer erros. Verifique informações importantes.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Gerir pipelines",
 	"March": "Março",
 	"Max Tokens (num_predict)": "Máx Tokens (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "O máximo de 3 modelos podem ser descarregados simultaneamente. Tente novamente mais tarde.",
 	"May": "Maio",
 	"Memories accessible by LLMs will be shown here.": "Memórias acessíveis por LLMs serão mostradas aqui.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Redefinir Armazenamento de Vetor",
 	"Response AutoCopy to Clipboard": "Cópia Automática da Resposta para a Área de Transferência",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Função",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_many": "Pesquisado {{count}} sites_many",
 	"Searched {{count}} sites_other": "Pesquisado {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL de consulta Searxng",
 	"See readme.md for instructions": "Consulte readme.md para obter instruções",
 	"See what's new": "Veja o que há de novo",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Obrigado pelo seu feedback!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "A pontuação deve ser um valor entre 0.0 (0%) e 1.0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "A pensar...",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Escreva um resumo em 50 palavras que resuma [tópico ou palavra-chave].",
 	"Yesterday": "Ontem",
 	"You": "Você",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Você pode personalizar as suas interações com LLMs adicionando memórias através do botão ‘Gerir’ abaixo, tornando-as mais úteis e personalizadas para você.",
 	"You cannot clone a base model": "Não é possível clonar um modelo base",
 	"You have no archived conversations.": "Você não tem conversas arquivadas.",

+ 11 - 2
src/lib/i18n/locales/ro-RO/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(ultimul)",
 	"{{ models }}": "{{ modele }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Nu puteți șterge un model de bază",
-	"{{modelName}} is thinking...": "{{modelName}} gândește...",
 	"{{user}}'s Chats": "Conversațiile lui {{user}}",
 	"{{webUIName}} Backend Required": "Este necesar backend-ul {{webUIName}}",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Atașează fișier",
 	"Attention to detail": "Atenție la detalii",
 	"Audio": "Audio",
-	"Audio settings updated successfully": "Setările audio au fost actualizate cu succes",
 	"August": "August",
 	"Auto-playback response": "Redare automată a răspunsului",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Lungime Context",
 	"Continue Response": "Continuă Răspunsul",
 	"Continue with {{provider}}": "Continuă cu {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "Controale",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "URL-ul conversației partajate a fost copiat în clipboard!",
+	"Copied to clipboard": "",
 	"Copy": "Copiază",
 	"Copy Code": "",
 	"Copy last code block": "Copiază ultimul bloc de cod",
@@ -285,6 +285,7 @@
 	"File": "Fișier",
 	"File Mode": "Mod Fișier",
 	"File not found.": "Fișierul nu a fost găsit.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Fișiere",
 	"Filter is now globally disabled": "Filtrul este acum dezactivat global",
 	"Filter is now globally enabled": "Filtrul este acum activat global",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "modele mari de limbaj, local.",
 	"Last Active": "Ultima Activitate",
 	"Last Modified": "Ultima Modificare",
+	"Leave empty for unlimited": "",
 	"Light": "Luminos",
 	"Listening...": "Ascult...",
 	"LLMs can make mistakes. Verify important information.": "LLM-urile pot face greșeli. Verificați informațiile importante.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Gestionează Conductele",
 	"March": "Martie",
 	"Max Tokens (num_predict)": "Număr Maxim de Tokeni (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maxim 3 modele pot fi descărcate simultan. Vă rugăm să încercați din nou mai târziu.",
 	"May": "Mai",
 	"Memories accessible by LLMs will be shown here.": "Memoriile accesibile de LLM-uri vor fi afișate aici.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Resetează Stocarea Vectorilor",
 	"Response AutoCopy to Clipboard": "Copiere Automată a Răspunsului în Clipboard",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Notificările de răspuns nu pot fi activate deoarece permisiunile site-ului au fost refuzate. Vă rugăm să vizitați setările browserului pentru a acorda accesul necesar.",
+	"Response splitting": "",
 	"Role": "Rol",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_few": "",
 	"Searched {{count}} sites_other": "{{count}} alte site-uri căutate",
 	"Searching \"{{searchQuery}}\"": "Căutare \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL Interogare Searxng",
 	"See readme.md for instructions": "Consultați readme.md pentru instrucțiuni",
 	"See what's new": "Vezi ce e nou",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Mulțumim pentru feedback!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Dezvoltatorii din spatele acestui plugin sunt voluntari pasionați din comunitate. Dacă considerați acest plugin util, vă rugăm să luați în considerare contribuția la dezvoltarea sa.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Scorul ar trebui să fie o valoare între 0.0 (0%) și 1.0 (100%).",
 	"Theme": "Temă",
 	"Thinking...": "Gândește...",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Scrieți un rezumat în 50 de cuvinte care rezumă [subiect sau cuvânt cheie].",
 	"Yesterday": "Ieri",
 	"You": "Tu",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Puteți personaliza interacțiunile dvs. cu LLM-urile adăugând amintiri prin butonul 'Gestionează' de mai jos, făcându-le mai utile și adaptate la dvs.",
 	"You cannot clone a base model": "Nu puteți clona un model de bază",
 	"You have no archived conversations.": "Nu aveți conversații arhivate.",

+ 11 - 2
src/lib/i18n/locales/ru-RU/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(последняя)",
 	"{{ models }}": "{{ модели }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Вы не можете удалить базовую модель",
-	"{{modelName}} is thinking...": "{{modelName}} думает...",
 	"{{user}}'s Chats": "Чаты {{user}}'а",
 	"{{webUIName}} Backend Required": "Необходимо подключение к серверу {{webUIName}}",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Прикрепить файл",
 	"Attention to detail": "Внимание к деталям",
 	"Audio": "Аудио",
-	"Audio settings updated successfully": "Настройки звука успешно обновлены",
 	"August": "Август",
 	"Auto-playback response": "Автоматическое воспроизведение ответа",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Длина контекста",
 	"Continue Response": "Продолжить ответ",
 	"Continue with {{provider}}": "Продолжить с {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "Управление",
 	"Copied": "Скопировано",
 	"Copied shared chat URL to clipboard!": "Копирование в буфер обмена выполнено успешно!",
+	"Copied to clipboard": "",
 	"Copy": "Копировать",
 	"Copy Code": "Скопировать код",
 	"Copy last code block": "Копировать последний блок кода",
@@ -285,6 +285,7 @@
 	"File": "Файл",
 	"File Mode": "Режим файла",
 	"File not found.": "Файл не найден.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Файлы",
 	"Filter is now globally disabled": "Фильтр теперь отключен глобально",
 	"Filter is now globally enabled": "Фильтр теперь включен глобально",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "большими языковыми моделями, локально.",
 	"Last Active": "Последний активный",
 	"Last Modified": "Последнее изменение",
+	"Leave empty for unlimited": "",
 	"Light": "Светлый",
 	"Listening...": "Слушаю...",
 	"LLMs can make mistakes. Verify important information.": "LLMs могут допускать ошибки. Проверяйте важную информацию.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Управление конвейерами",
 	"March": "Март",
 	"Max Tokens (num_predict)": "Максимальное количество токенов (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Максимальное количество моделей для загрузки одновременно - 3. Пожалуйста, попробуйте позже.",
 	"May": "Май",
 	"Memories accessible by LLMs will be shown here.": "Воспоминания, доступные LLMs, будут отображаться здесь.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Сбросить векторное хранилище",
 	"Response AutoCopy to Clipboard": "Автоматическое копирование ответа в буфер обмена",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Уведомления об ответах не могут быть активированы, поскольку доступ к веб-сайту был заблокирован. Пожалуйста, перейдите к настройкам своего браузера, чтобы предоставить необходимый доступ.",
+	"Response splitting": "",
 	"Role": "Роль",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -537,6 +542,7 @@
 	"Searched {{count}} sites_many": "Просмотрено {{count}} sites_many",
 	"Searched {{count}} sites_other": "Просмотрено {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "Поиск \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL-адрес запроса Searxng",
 	"See readme.md for instructions": "Смотрите readme.md для инструкций",
 	"See what's new": "Посмотреть, что нового",
@@ -613,6 +619,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Спасибо за вашу обратную связь!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Разработчики этого плагина - увлеченные волонтеры из сообщества. Если вы считаете этот плагин полезным, пожалуйста, подумайте о том, чтобы внести свой вклад в его разработку.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Оценка должна быть значением между 0,0 (0%) и 1,0 (100%).",
 	"Theme": "Тема",
 	"Thinking...": "Думаю...",
@@ -712,6 +720,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Напишите резюме в 50 словах, которое кратко описывает [тему или ключевое слово].",
 	"Yesterday": "Вчера",
 	"You": "Вы",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Вы можете персонализировать свое взаимодействие с LLMs, добавив воспоминания с помощью кнопки \"Управлять\" ниже, что сделает их более полезными и адаптированными для вас.",
 	"You cannot clone a base model": "Клонировать базовую модель невозможно",
 	"You have no archived conversations.": "У вас нет архивированных бесед.",

+ 11 - 2
src/lib/i18n/locales/sr-RS/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(најновије)",
 	"{{ models }}": "{{ модели }}",
 	"{{ owner }}: You cannot delete a base model": "{{ оwнер }}: Не можете избрисати основни модел",
-	"{{modelName}} is thinking...": "{{modelName}} размишља...",
 	"{{user}}'s Chats": "Ћаскања корисника {{user}}",
 	"{{webUIName}} Backend Required": "Захтева се {{webUIName}} позадинац",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Приложи датотеку",
 	"Attention to detail": "Пажња на детаље",
 	"Audio": "Звук",
-	"Audio settings updated successfully": "",
 	"August": "Август",
 	"Auto-playback response": "Самостално пуштање одговора",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Дужина контекста",
 	"Continue Response": "Настави одговор",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Адреса дељеног ћаскања ископирана у оставу!",
+	"Copied to clipboard": "",
 	"Copy": "Копирај",
 	"Copy Code": "",
 	"Copy last code block": "Копирај последњи блок кода",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Режим датотеке",
 	"File not found.": "Датотека није пронађена.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Последња активност",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Светла",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "ВЈМ-ови (LLM-ови) могу правити грешке. Проверите важне податке.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Управљање цевоводима",
 	"March": "Март",
 	"Max Tokens (num_predict)": "Маx Токенс (нум_предицт)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Највише 3 модела могу бити преузета истовремено. Покушајте поново касније.",
 	"May": "Мај",
 	"Memories accessible by LLMs will be shown here.": "Памћења које ће бити појављена од овог LLM-а ће бити приказана овде.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Ресетуј складиште вектора",
 	"Response AutoCopy to Clipboard": "Самостално копирање одговора у оставу",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Улога",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -536,6 +541,7 @@
 	"Searched {{count}} sites_few": "Претражио {{цоунт}} ситес_феw",
 	"Searched {{count}} sites_other": "Претражио {{цоунт}} ситес_отхер",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "УРЛ адреса Сеарxнг упита",
 	"See readme.md for instructions": "Погледај readme.md за упутства",
 	"See what's new": "Погледај шта је ново",
@@ -612,6 +618,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Хвала на вашем коментару!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Резултат треба да буде вредност између 0.0 (0%) и 1.0 (100%).",
 	"Theme": "Тема",
 	"Thinking...": "",
@@ -711,6 +719,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Напишите сажетак у 50 речи који резимира [тему или кључну реч].",
 	"Yesterday": "Јуче",
 	"You": "Ти",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "Не можеш клонирати основни модел",
 	"You have no archived conversations.": "Немате архивиране разговоре.",

+ 11 - 2
src/lib/i18n/locales/sv-SE/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(senaste)",
 	"{{ models }}": "{{ modeller }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Du kan inte ta bort en basmodell",
-	"{{modelName}} is thinking...": "{{modelName}} tänker...",
 	"{{user}}'s Chats": "{{user}}s Chats",
 	"{{webUIName}} Backend Required": "{{webUIName}} Backend krävs",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Bifoga fil",
 	"Attention to detail": "Detaljerad uppmärksamhet",
 	"Audio": "Ljud",
-	"Audio settings updated successfully": "",
 	"August": "augusti",
 	"Auto-playback response": "Automatisk uppspelning",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Kontextlängd",
 	"Continue Response": "Fortsätt svar",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Kopierad delad chatt-URL till urklipp!",
+	"Copied to clipboard": "",
 	"Copy": "Kopiera",
 	"Copy Code": "",
 	"Copy last code block": "Kopiera sista kodblock",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "Fil-läge",
 	"File not found.": "Fil hittades inte.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Senast aktiv",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "Ljus",
 	"Listening...": "Lyssnar...",
 	"LLMs can make mistakes. Verify important information.": "LLM:er kan göra misstag. Granska viktig information.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Hantera rörledningar",
 	"March": "mars",
 	"Max Tokens (num_predict)": "Maximalt antal tokens (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Högst 3 modeller kan laddas ner samtidigt. Vänligen försök igen senare.",
 	"May": "maj",
 	"Memories accessible by LLMs will be shown here.": "Minnen som LLM:er kan komma åt visas här.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Återställ vektorlager",
 	"Response AutoCopy to Clipboard": "Svara AutoCopy till urklipp",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "Roll",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Sökte på {{count}} sites_one",
 	"Searched {{count}} sites_other": "Sökte på {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "Söker \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng Query URL",
 	"See readme.md for instructions": "Se readme.md för instruktioner",
 	"See what's new": "Se vad som är nytt",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Tack för din feedback!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Betyget ska vara ett värde mellan 0.0 (0%) och 1.0 (100%).",
 	"Theme": "Tema",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Skriv en sammanfattning på 50 ord som sammanfattar [ämne eller nyckelord].",
 	"Yesterday": "Igår",
 	"You": "Dig",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Du kan anpassa dina interaktioner med stora språkmodeller genom att lägga till minnen via knappen 'Hantera' nedan, så att de blir mer användbara och skräddarsydda för dig.",
 	"You cannot clone a base model": "Du kan inte klona en basmodell",
 	"You have no archived conversations.": "Du har inga arkiverade samtal.",

+ 11 - 2
src/lib/i18n/locales/th-TH/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(ล่าสุด)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: คุณไม่สามารถลบโมเดลพื้นฐานได้",
-	"{{modelName}} is thinking...": "{{modelName}} กำลังคิด...",
 	"{{user}}'s Chats": "การสนทนาของ {{user}}",
 	"{{webUIName}} Backend Required": "ต้องการ Backend ของ {{webUIName}}",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "แนบไฟล์",
 	"Attention to detail": "ใส่ใจในรายละเอียด",
 	"Audio": "เสียง",
-	"Audio settings updated successfully": "อัปเดตการตั้งค่าเสียงสำเร็จแล้ว",
 	"August": "สิงหาคม",
 	"Auto-playback response": "ตอบสนองการเล่นอัตโนมัติ",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "ความยาวของบริบท",
 	"Continue Response": "ตอบสนองต่อไป",
 	"Continue with {{provider}}": "ดำเนินการต่อด้วย {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "การควบคุม",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "คัดลอก URL แชทที่แชร์ไปยังคลิปบอร์ดแล้ว!",
+	"Copied to clipboard": "",
 	"Copy": "คัดลอก",
 	"Copy Code": "",
 	"Copy last code block": "คัดลอกบล็อกโค้ดสุดท้าย",
@@ -285,6 +285,7 @@
 	"File": "ไฟล์",
 	"File Mode": "โหมดไฟล์",
 	"File not found.": "ไม่พบไฟล์",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "ไฟล์",
 	"Filter is now globally disabled": "การกรองถูกปิดใช้งานทั่วโลกแล้ว",
 	"Filter is now globally enabled": "การกรองถูกเปิดใช้งานทั่วโลกแล้ว",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "โมเดลภาษาขนาดใหญ่ในเครื่อง",
 	"Last Active": "ใช้งานล่าสุด",
 	"Last Modified": "แก้ไขล่าสุด",
+	"Leave empty for unlimited": "",
 	"Light": "แสง",
 	"Listening...": "กำลังฟัง...",
 	"LLMs can make mistakes. Verify important information.": "LLMs สามารถทำผิดพลาดได้ ตรวจสอบข้อมูลสำคัญ",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "จัดการไปป์ไลน์",
 	"March": "มีนาคม",
 	"Max Tokens (num_predict)": "โทเค็นสูงสุด (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "สามารถดาวน์โหลดโมเดลได้สูงสุด 3 โมเดลในเวลาเดียวกัน โปรดลองอีกครั้งในภายหลัง",
 	"May": "พฤษภาคม",
 	"Memories accessible by LLMs will be shown here.": "",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "รีเซ็ตการจัดเก็บเวกเตอร์",
 	"Response AutoCopy to Clipboard": "ตอบสนองการคัดลอกอัตโนมัติไปยังคลิปบอร์ด",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "ไม่สามารถเปิดการแจ้งเตือนการตอบสนองได้เนื่องจากเว็บไซต์ปฏิเสธ กรุณาเข้าการตั้งค่าเบราว์เซอร์ของคุณเพื่อให้สิทธิ์การเข้าถึงที่จำเป็น",
+	"Response splitting": "",
 	"Role": "บทบาท",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "ค้นหา {{count}} เว็บไซต์",
 	"Searched {{count}} sites_other": "ค้นหา {{count}} เว็บไซต์",
 	"Searching \"{{searchQuery}}\"": "กำลังค้นหา \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL คำค้นหา",
 	"See readme.md for instructions": "ดู readme.md สำหรับคำแนะนำ",
 	"See what's new": "ดูสิ่งที่ใหม่",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "ขอบคุณสำหรับความคิดเห็นของคุณ!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "นักพัฒนาที่อยู่เบื้องหลังปลั๊กอินนี้เป็นอาสาสมัครที่มีชื่นชอบการแบ่งบัน หากคุณพบว่าปลั๊กอินนี้มีประโยชน์ โปรดพิจารณาสนับสนุนการพัฒนาของเขา",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "คะแนนควรอยู่ระหว่าง 0.0 (0%) ถึง 1.0 (100%)",
 	"Theme": "ธีม",
 	"Thinking...": "กำลังคิด...",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "เขียนสรุปใน 50 คำที่สรุป [หัวข้อหรือคำสำคัญ]",
 	"Yesterday": "เมื่อวาน",
 	"You": "คุณ",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "คุณสามารถปรับแต่งการโต้ตอบของคุณกับ LLMs โดยเพิ่มความทรงจำผ่านปุ่ม 'จัดการ' ด้านล่าง ทำให้มันมีประโยชน์และเหมาะกับคุณมากขึ้น",
 	"You cannot clone a base model": "คุณไม่สามารถโคลนโมเดลฐานได้",
 	"You have no archived conversations.": "คุณไม่มีการสนทนาที่เก็บถาวร",

+ 3 - 0
src/lib/i18n/locales/tk-TM/transaltion.json

@@ -40,6 +40,7 @@
 	"alphanumeric characters and hyphens": "harply-sanjy belgiler we defisler",
 	"Already have an account?": "Hasabyňyz barmy?",
 	"an assistant": "kömekçi",
+	"An error occurred while processing files.": "",
 	"and": "we",
 	"and create a new shared link.": "we täze paýlaşylan baglanyşyk dörediň.",
 	"API Base URL": "API Esasy URL",
@@ -324,6 +325,8 @@
 	"Management": "Dolandyryş",
 	"Manual Input": "El bilen Girdi",
 	"March": "Mart",
+	"Max File Count": "",
+	"Max File Size(MB)": "",
 	"Mark as Read": "Okalan hökmünde belläň",
 	"Match": "Gab",
 	"May": "Maý",

+ 11 - 2
src/lib/i18n/locales/tk-TW/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "",
 	"{{ models }}": "",
 	"{{ owner }}: You cannot delete a base model": "",
-	"{{modelName}} is thinking...": "",
 	"{{user}}'s Chats": "",
 	"{{webUIName}} Backend Required": "",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "",
 	"Attention to detail": "",
 	"Audio": "",
-	"Audio settings updated successfully": "",
 	"August": "",
 	"Auto-playback response": "",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "",
+	"Copied to clipboard": "",
 	"Copy": "",
 	"Copy Code": "",
 	"Copy last code block": "",
@@ -285,6 +285,7 @@
 	"File": "",
 	"File Mode": "",
 	"File not found.": "",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "",
 	"Filter is now globally enabled": "",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "",
 	"Last Modified": "",
+	"Leave empty for unlimited": "",
 	"Light": "",
 	"Listening...": "",
 	"LLMs can make mistakes. Verify important information.": "",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "",
 	"March": "",
 	"Max Tokens (num_predict)": "",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "",
 	"May": "",
 	"Memories accessible by LLMs will be shown here.": "",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "",
 	"Response AutoCopy to Clipboard": "",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
+	"Response splitting": "",
 	"Role": "",
 	"Rosé Pine": "",
 	"Rosé Pine Dawn": "",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "",
 	"Searched {{count}} sites_other": "",
 	"Searching \"{{searchQuery}}\"": "",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "",
 	"See readme.md for instructions": "",
 	"See what's new": "",
@@ -611,6 +617,8 @@
 	"Tfs Z": "",
 	"Thanks for your feedback!": "",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "",
 	"Theme": "",
 	"Thinking...": "",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "",
 	"Yesterday": "",
 	"You": "",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
 	"You cannot clone a base model": "",
 	"You have no archived conversations.": "",

+ 11 - 2
src/lib/i18n/locales/tr-TR/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(en son)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Temel modeli silemezsiniz",
-	"{{modelName}} is thinking...": "{{modelName}} düşünüyor...",
 	"{{user}}'s Chats": "{{user}} Sohbetleri",
 	"{{webUIName}} Backend Required": "{{webUIName}} Arkayüz Gerekli",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Dosya ekle",
 	"Attention to detail": "Ayrıntılara dikkat",
 	"Audio": "Ses",
-	"Audio settings updated successfully": "Ses ayarları başarıyla güncellendi",
 	"August": "Ağustos",
 	"Auto-playback response": "Yanıtı otomatik oynatma",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Bağlam Uzunluğu",
 	"Continue Response": "Yanıta Devam Et",
 	"Continue with {{provider}}": "{{provider}} ile devam et",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "Paylaşılan sohbet URL'si panoya kopyalandı!",
+	"Copied to clipboard": "",
 	"Copy": "Kopyala",
 	"Copy Code": "",
 	"Copy last code block": "Son kod bloğunu kopyala",
@@ -285,6 +285,7 @@
 	"File": "Dosya",
 	"File Mode": "Dosya Modu",
 	"File not found.": "Dosya bulunamadı.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "",
 	"Filter is now globally disabled": "Filtre artık global olarak devre dışı",
 	"Filter is now globally enabled": "Filtre artık global olarak devrede",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "",
 	"Last Active": "Son Aktivite",
 	"Last Modified": "Son Düzenleme",
+	"Leave empty for unlimited": "",
 	"Light": "Açık",
 	"Listening...": "Dinleniyor...",
 	"LLMs can make mistakes. Verify important information.": "LLM'ler hata yapabilir. Önemli bilgileri doğrulayın.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Pipelineları Yönet",
 	"March": "Mart",
 	"Max Tokens (num_predict)": "Maksimum Token (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Aynı anda en fazla 3 model indirilebilir. Lütfen daha sonra tekrar deneyin.",
 	"May": "Mayıs",
 	"Memories accessible by LLMs will be shown here.": "LLM'ler tarafından erişilebilen bellekler burada gösterilecektir.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Vektör Depolamayı Sıfırla",
 	"Response AutoCopy to Clipboard": "Yanıtı Panoya Otomatik Kopyala",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Web sitesi izinleri reddedildiğinden yanıt bildirimleri etkinleştirilemiyor. Gerekli erişimi sağlamak için lütfen tarayıcı ayarlarınızı ziyaret edin.",
+	"Response splitting": "",
 	"Role": "Rol",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "Arandı {{count}} sites_one",
 	"Searched {{count}} sites_other": "Arandı {{count}} sites_other",
 	"Searching \"{{searchQuery}}\"": "\"{{searchQuery}}\" aranıyor",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng Sorgu URL'si",
 	"See readme.md for instructions": "Yönergeler için readme.md dosyasına bakın",
 	"See what's new": "Yeniliklere göz atın",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Geri bildiriminiz için teşekkürler!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Puan 0.0 (%0) ile 1.0 (%100) arasında bir değer olmalıdır.",
 	"Theme": "Tema",
 	"Thinking...": "Düşünüyor...",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "[Konuyu veya anahtar kelimeyi] özetleyen 50 kelimelik bir özet yazın.",
 	"Yesterday": "Dün",
 	"You": "Sen",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Aşağıdaki 'Yönet' düğmesi aracılığıyla bellekler ekleyerek LLM'lerle etkileşimlerinizi kişiselleştirebilir, onları daha yararlı ve size özel hale getirebilirsiniz.",
 	"You cannot clone a base model": "Bir temel modeli klonlayamazsınız",
 	"You have no archived conversations.": "Arşivlenmiş sohbetleriniz yok.",

+ 11 - 2
src/lib/i18n/locales/uk-UA/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(остання)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Ви не можете видалити базову модель.",
-	"{{modelName}} is thinking...": "{{modelName}} думає...",
 	"{{user}}'s Chats": "Чати {{user}}а",
 	"{{webUIName}} Backend Required": "Необхідно підключення бекенду {{webUIName}}",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "Прикріпити файл",
 	"Attention to detail": "Увага до деталей",
 	"Audio": "Аудіо",
-	"Audio settings updated successfully": "Налаштування звуку успішно оновлено",
 	"August": "Серпень",
 	"Auto-playback response": "Автоматичне відтворення відповіді",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "Довжина контексту",
 	"Continue Response": "Продовжити відповідь",
 	"Continue with {{provider}}": "Продовжити з {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "Керування",
 	"Copied": "Скопійовано",
 	"Copied shared chat URL to clipboard!": "Скопійовано URL-адресу спільного чату в буфер обміну!",
+	"Copied to clipboard": "",
 	"Copy": "Копіювати",
 	"Copy Code": "Копіювати код",
 	"Copy last code block": "Копіювати останній блок коду",
@@ -285,6 +285,7 @@
 	"File": "Файл",
 	"File Mode": "Файловий режим",
 	"File not found.": "Файл не знайдено.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Файли",
 	"Filter is now globally disabled": "Фільтр глобально вимкнено",
 	"Filter is now globally enabled": "Фільтр увімкнено глобально",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "великими мовними моделями, локально.",
 	"Last Active": "Остання активність",
 	"Last Modified": "Востаннє змінено",
+	"Leave empty for unlimited": "",
 	"Light": "Світла",
 	"Listening...": "Слухаю...",
 	"LLMs can make mistakes. Verify important information.": "LLMs можуть помилятися. Перевірте важливу інформацію.",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "Керування конвеєрами",
 	"March": "Березень",
 	"Max Tokens (num_predict)": "Макс токенів (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Максимум 3 моделі можна завантажити одночасно. Будь ласка, спробуйте пізніше.",
 	"May": "Травень",
 	"Memories accessible by LLMs will be shown here.": "Пам'ять, яка доступна LLM, буде показана тут.",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "Скинути векторне сховище",
 	"Response AutoCopy to Clipboard": "Автокопіювання відповіді в буфер обміну",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Сповіщення про відповіді не можуть бути активовані, оскільки вам було відмовлено в доступі до веб-сайту. Будь ласка, відвідайте налаштування вашого браузера, щоб надати необхідний доступ.",
+	"Response splitting": "",
 	"Role": "Роль",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -537,6 +542,7 @@
 	"Searched {{count}} sites_many": "Переглянуто {{count}} сайтів",
 	"Searched {{count}} sites_other": "Переглянуто {{count}} сайтів",
 	"Searching \"{{searchQuery}}\"": "Шукаю \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL-адреса запиту Searxng",
 	"See readme.md for instructions": "Див. readme.md для інструкцій",
 	"See what's new": "Подивіться, що нового",
@@ -613,6 +619,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Дякуємо за ваш відгук!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Розробники цього плагіна - пристрасні волонтери зі спільноти. Якщо ви вважаєте цей плагін корисним, будь ласка, зробіть свій внесок у його розвиток.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Оцінка повинна бути в діапазоні від 0.0 (0%) до 1.0 (100%).",
 	"Theme": "Тема",
 	"Thinking...": "Думаю...",
@@ -712,6 +720,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Напишіть стислий зміст у 50 слів, який узагальнює [тема або ключове слово].",
 	"Yesterday": "Вчора",
 	"You": "Ви",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Ви можете налаштувати ваші взаємодії з мовними моделями, додавши спогади через кнопку 'Керувати' внизу, що зробить їх більш корисними та персоналізованими для вас.",
 	"You cannot clone a base model": "Базову модель не можна клонувати",
 	"You have no archived conversations.": "У вас немає архівованих розмов.",

+ 28 - 19
src/lib/i18n/locales/vi-VN/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(mới nhất)",
 	"{{ models }}": "{{ mô hình }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}: Bạn không thể xóa base model",
-	"{{modelName}} is thinking...": "{{modelName}} đang suy nghĩ...",
 	"{{user}}'s Chats": "{{user}}'s Chats",
 	"{{webUIName}} Backend Required": "{{webUIName}} Yêu cầu Backend",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -45,9 +44,9 @@
 	"All Users": "Danh sách người sử dụng",
 	"Allow": "Cho phép",
 	"Allow Chat Deletion": "Cho phép Xóa nội dung chat",
-	"Allow Chat Editing": "",
+	"Allow Chat Editing": "Cho phép Sửa nội dung chat",
 	"Allow non-local voices": "Cho phép giọng nói không bản xứ",
-	"Allow Temporary Chat": "",
+	"Allow Temporary Chat": "Cho phép Chat nháp",
 	"Allow User Location": "Cho phép sử dụng vị trí người dùng",
 	"Allow Voice Interruption in Call": "Cho phép gián đoạn giọng nói trong cuộc gọi",
 	"alphanumeric characters and hyphens": "ký tự số và gạch nối",
@@ -68,7 +67,6 @@
 	"Attach file": "Đính kèm file",
 	"Attention to detail": "Có sự chú ý đến chi tiết của vấn đề",
 	"Audio": "Âm thanh",
-	"Audio settings updated successfully": "Đã cập nhật cài đặt âm thanh thành công",
 	"August": "Tháng 8",
 	"Auto-playback response": "Tự động phát lại phản hồi (Auto-playback)",
 	"Automatic1111": "",
@@ -94,7 +92,7 @@
 	"Chat": "Trò chuyện",
 	"Chat Background Image": "Hình nền trò chuyện",
 	"Chat Bubble UI": "Bảng chat",
-	"Chat Controls": "",
+	"Chat Controls": "Điều khiển Chats",
 	"Chat direction": "Hướng chat",
 	"Chats": "Chat",
 	"Check Again": "Kiểm tra Lại",
@@ -113,7 +111,7 @@
 	"Click here to select a csv file.": "Nhấn vào đây để chọn tệp csv",
 	"Click here to select a py file.": "Nhấn vào đây để chọn tệp py",
 	"Click here to select documents.": "Bấm vào đây để chọn tài liệu.",
-	"Click here to upload a workflow.json file.": "",
+	"Click here to upload a workflow.json file.": "Bấm vào đây để upload file worklow.json",
 	"click here.": "bấm vào đây.",
 	"Click on the user role button to change a user's role.": "Bấm vào nút trong cột VAI TRÒ để thay đổi quyền của người sử dụng.",
 	"Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "Quyền ghi vào clipboard bị từ chối. Vui lòng kiểm tra cài đặt trên trình duyệt của bạn để được cấp quyền truy cập cần thiết.",
@@ -138,11 +136,13 @@
 	"Context Length": "Độ dài ngữ cảnh (Context Length)",
 	"Continue Response": "Tiếp tục trả lời",
 	"Continue with {{provider}}": "Tiếp tục với {{provider}}",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "",
-	"Copied": "",
+	"Copied": "Đã sao chép",
 	"Copied shared chat URL to clipboard!": "Đã sao chép link chia sẻ trò chuyện vào clipboard!",
+	"Copied to clipboard": "",
 	"Copy": "Sao chép",
-	"Copy Code": "",
+	"Copy Code": "Sao chép Code",
 	"Copy last code block": "Sao chép khối mã cuối cùng",
 	"Copy last response": "Sao chép phản hồi cuối cùng",
 	"Copy Link": "Sao chép link",
@@ -226,10 +226,10 @@
 	"Embedding Model": "Mô hình embedding",
 	"Embedding Model Engine": "Trình xử lý embedding",
 	"Embedding model set to \"{{embedding_model}}\"": "Mô hình embedding đã được thiết lập thành \"{{embedding_model}}\"",
-	"Enable Community Sharing": "Kích hoạt Chia sẻ Cộng đồng",
-	"Enable Message Rating": "",
+	"Enable Community Sharing": "Cho phép Chia sẻ Cộng đồng",
+	"Enable Message Rating": "Cho phép phản hồi, đánh giá",
 	"Enable New Sign Ups": "Cho phép đăng ký mới",
-	"Enable Web Search": "Kích hoạt tìm kiếm Web",
+	"Enable Web Search": "Cho phép tìm kiếm Web",
 	"Enabled": "Đã bật",
 	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Đảm bảo tệp CSV của bạn bao gồm 4 cột theo thứ tự sau: Name, Email, Password, Role.",
@@ -285,6 +285,7 @@
 	"File": "Tệp",
 	"File Mode": "Chế độ Tệp văn bản",
 	"File not found.": "Không tìm thấy tệp.",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "Tệp",
 	"Filter is now globally disabled": "Bộ lọc hiện đã bị vô hiệu hóa trên toàn hệ thống",
 	"Filter is now globally enabled": "Bộ lọc hiện được kích hoạt trên toàn hệ thống",
@@ -319,7 +320,7 @@
 	"Google PSE API Key": "Khóa API Google PSE",
 	"Google PSE Engine Id": "ID công cụ Google PSE",
 	"h:mm a": "h:mm a",
-	"Haptic Feedback": "",
+	"Haptic Feedback": "Phản hồi xúc giác",
 	"has no conversations.": "không có hội thoại",
 	"Hello, {{name}}": "Xin chào {{name}}",
 	"Help": "Trợ giúp",
@@ -337,7 +338,7 @@
 	"Import Functions": "Nạp Functions",
 	"Import Models": "Nạp model",
 	"Import Prompts": "Nạp các prompt lên hệ thống",
-	"Import Tools": "Tạp Tools",
+	"Import Tools": "Nạp Tools",
 	"Include `--api-auth` flag when running stable-diffusion-webui": "",
 	"Include `--api` flag when running stable-diffusion-webui": "Bao gồm flag `--api` khi chạy stable-diffusion-webui",
 	"Info": "Thông tin",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "các mô hình ngôn ngữ lớn, mang tính địa phương",
 	"Last Active": "Truy cập gần nhất",
 	"Last Modified": "Lần sửa gần nhất",
+	"Leave empty for unlimited": "",
 	"Light": "Sáng",
 	"Listening...": "Đang nghe...",
 	"LLMs can make mistakes. Verify important information.": "Hệ thống có thể tạo ra nội dung không chính xác hoặc sai. Hãy kiểm chứng kỹ lưỡng thông tin trước khi tiếp nhận và sử dụng.",
@@ -368,13 +370,15 @@
 	"LTR": "LTR",
 	"Made by OpenWebUI Community": "Được tạo bởi Cộng đồng OpenWebUI",
 	"Make sure to enclose them with": "Hãy chắc chắn bao quanh chúng bằng",
-	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
+	"Make sure to export a workflow.json file as API format from ComfyUI.": "Đảm bảo xuất tệp Workflow.json đúng format API của ComfyUI.",
 	"Manage": "Quản lý",
 	"Manage Models": "Quản lý mô hình",
 	"Manage Ollama Models": "Quản lý mô hình với Ollama",
 	"Manage Pipelines": "Quản lý Pipelines",
 	"March": "Tháng 3",
 	"Max Tokens (num_predict)": "Tokens tối đa (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Tối đa 3 mô hình có thể được tải xuống cùng lúc. Vui lòng thử lại sau.",
 	"May": "Tháng 5",
 	"Memories accessible by LLMs will be shown here.": "Memory có thể truy cập bởi LLMs sẽ hiển thị ở đây.",
@@ -383,7 +387,7 @@
 	"Memory cleared successfully": "Memory đã bị xóa",
 	"Memory deleted successfully": "Memory đã bị loại bỏ",
 	"Memory updated successfully": "Memory đã cập nhật thành công",
-	"Merge Responses": "",
+	"Merge Responses": "Hợp nhất các phản hồi",
 	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Tin nhắn bạn gửi sau khi tạo liên kết sẽ không được chia sẻ. Người dùng có URL sẽ có thể xem cuộc trò chuyện được chia sẻ.",
 	"Min P": "",
 	"Minimum Score": "Score tối thiểu",
@@ -505,11 +509,12 @@
 	"Reset Vector Storage": "Cài đặt lại Vector Storage",
 	"Response AutoCopy to Clipboard": "Tự động Sao chép Phản hồi vào clipboard",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Không thể kích hoạt thông báo vì trang web không cấp quyền. Vui lòng truy cập cài đặt trình duyệt của bạn để cấp quyền cần thiết.",
+	"Response splitting": "",
 	"Role": "Vai trò",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
 	"RTL": "RTL",
-	"Run": "",
+	"Run": "Thực hiện",
 	"Run Llama 2, Code Llama, and other models. Customize and create your own.": "Chạy Llama 2, Code Llama và các mô hình khác. Tùy chỉnh hoặc mô hình riêng của bạn.",
 	"Running": "Đang chạy",
 	"Save": "Lưu",
@@ -520,7 +525,7 @@
 	"Scan": "Quét tài liệu",
 	"Scan complete!": "Quét hoàn tất!",
 	"Scan for documents from {{path}}": "Quét tài liệu từ đường dẫn: {{path}}",
-	"Scroll to bottom when switching between branches": "",
+	"Scroll to bottom when switching between branches": "Cuộn xuống dưới cùng khi chuyển đổi giữa các nhánh",
 	"Search": "Tìm kiếm",
 	"Search a model": "Tìm model",
 	"Search Chats": "Tìm kiếm các cuộc Chat",
@@ -534,6 +539,7 @@
 	"Search Tools": "Tìm kiếm Tools",
 	"Searched {{count}} sites_other": "Đã tìm thấy {{count}} trang web",
 	"Searching \"{{searchQuery}}\"": "Đang tìm \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "URL truy vấn Searxng",
 	"See readme.md for instructions": "Xem readme.md để biết hướng dẫn",
 	"See what's new": "Xem những cập nhật mới",
@@ -547,7 +553,7 @@
 	"Select a tool": "Chọn tool",
 	"Select an Ollama instance": "Chọn một thực thể Ollama",
 	"Select Documents": "Chọn tài liệu",
-	"Select Engine": "",
+	"Select Engine": "Chọn Engine",
 	"Select model": "Chọn model",
 	"Select only one model to call": "Chọn model để gọi",
 	"Selected model(s) do not support image inputs": "Model được lựa chọn không hỗ trợ đầu vào là hình ảnh",
@@ -604,12 +610,14 @@
 	"Tell us more:": "Hãy cho chúng tôi hiểu thêm về chất lượng của câu trả lời:",
 	"Temperature": "Mức độ sáng tạo",
 	"Template": "Mẫu",
-	"Temporary Chat": "",
+	"Temporary Chat": "Chat nháp",
 	"Text Completion": "Hoàn tất Văn bản",
 	"Text-to-Speech Engine": "Công cụ Chuyển Văn bản thành Giọng nói",
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "Cám ơn bạn đã gửi phản hồi!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "Các nhà phát triển đằng sau plugin này là những tình nguyện viên nhiệt huyết của cộng đồng. Nếu bạn thấy plugin này hữu ích, vui lòng cân nhắc đóng góp cho sự phát triển của nó.",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "Điểm (score) phải có giá trị từ 0,0 (0%) đến 1,0 (100%).",
 	"Theme": "Chủ đề",
 	"Thinking...": "Đang suy luận...",
@@ -709,6 +717,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "Viết một tóm tắt trong vòng 50 từ cho [chủ đề hoặc từ khóa].",
 	"Yesterday": "Hôm qua",
 	"You": "Bạn",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "Bạn có thể cá nhân hóa các tương tác của mình với LLM bằng cách thêm bộ nhớ thông qua nút 'Quản lý' bên dưới, làm cho chúng hữu ích hơn và phù hợp với bạn hơn.",
 	"You cannot clone a base model": "Bạn không thể nhân bản base model",
 	"You have no archived conversations.": "Bạn chưa lưu trữ một nội dung chat nào",

+ 27 - 18
src/lib/i18n/locales/zh-CN/translation.json

@@ -6,10 +6,9 @@
 	"(latest)": "(最新版)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}:您不能删除基础模型",
-	"{{modelName}} is thinking...": "{{modelName}} 正在思考...",
 	"{{user}}'s Chats": "{{user}} 的对话记录",
 	"{{webUIName}} Backend Required": "需要 {{webUIName}} 后端",
-	"*Prompt node ID(s) are required for image generation": "",
+	"*Prompt node ID(s) are required for image generation": "*图片生成需要 Prompt node ID",
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "任务模型用于执行生成对话标题和网络搜索查询等任务",
 	"a user": "用户",
 	"About": "关于",
@@ -44,10 +43,10 @@
 	"All Documents": "所有文档",
 	"All Users": "所有用户",
 	"Allow": "允许",
-	"Allow Chat Deletion": "允许删除聊天记录",
-	"Allow Chat Editing": "",
+	"Allow Chat Deletion": "允许删除对话记录",
+	"Allow Chat Editing": "允许编辑对话记录",
 	"Allow non-local voices": "允许调用非本地音色",
-	"Allow Temporary Chat": "",
+	"Allow Temporary Chat": "允许临时对话",
 	"Allow User Location": "允许获取您的位置",
 	"Allow Voice Interruption in Call": "允许通话中的打断语音",
 	"alphanumeric characters and hyphens": "字母数字字符和连字符",
@@ -68,11 +67,10 @@
 	"Attach file": "添加文件",
 	"Attention to detail": "注重细节",
 	"Audio": "语音",
-	"Audio settings updated successfully": "语音设置更新成功",
 	"August": "八月",
 	"Auto-playback response": "自动念出回复内容",
-	"Automatic1111": "",
-	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Api鉴权字符串",
+	"Automatic1111": "Automatic1111",
+	"AUTOMATIC1111 Api Auth String": "AUTOMATIC1111 Api 鉴权字符串",
 	"AUTOMATIC1111 Base URL": "AUTOMATIC1111 基础地址",
 	"AUTOMATIC1111 Base URL is required.": "需要 AUTOMATIC1111 基础地址。",
 	"available!": "版本可用!",
@@ -113,7 +111,7 @@
 	"Click here to select a csv file.": "单击此处选择 csv 文件。",
 	"Click here to select a py file.": "单击此处选择 py 文件。",
 	"Click here to select documents.": "单击选择文档",
-	"Click here to upload a workflow.json file.": "",
+	"Click here to upload a workflow.json file.": "单击此处上传 workflow.json 文件。",
 	"click here.": "点击这里。",
 	"Click on the user role button to change a user's role.": "点击角色前方的组别按钮以更改用户所属权限组。",
 	"Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "写入剪贴板时被拒绝。请检查浏览器设置,授予必要权限。",
@@ -124,8 +122,8 @@
 	"ComfyUI": "ComfyUI",
 	"ComfyUI Base URL": "ComfyUI 基础地址",
 	"ComfyUI Base URL is required.": "ComfyUI 基础地址为必需填写。",
-	"ComfyUI Workflow": "",
-	"ComfyUI Workflow Nodes": "",
+	"ComfyUI Workflow": "ComfyUI Workflow",
+	"ComfyUI Workflow Nodes": "ComfyUI Workflow Nodes",
 	"Command": "命令",
 	"Concurrent Requests": "并发请求",
 	"Confirm": "确认",
@@ -138,9 +136,11 @@
 	"Context Length": "上下文长度",
 	"Continue Response": "继续生成",
 	"Continue with {{provider}}": "使用 {{provider}} 继续",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "控制消息文本如何拆分以用于 TTS 请求。“Punctuation”拆分为句子,“paragraphs”拆分为段落,“none”将消息保留为单个字符串。",
 	"Controls": "对话高级设置",
 	"Copied": "已复制",
 	"Copied shared chat URL to clipboard!": "已复制此对话分享链接至剪贴板!",
+	"Copied to clipboard": "已复制到剪贴板",
 	"Copy": "复制",
 	"Copy Code": "复制代码",
 	"Copy last code block": "复制最后一个代码块中的代码",
@@ -164,7 +164,7 @@
 	"Database": "数据库",
 	"December": "十二月",
 	"Default": "默认",
-	"Default (Open AI)": "",
+	"Default (Open AI)": "默认 (OpenAI)",
 	"Default (SentenceTransformers)": "默认(SentenceTransformers)",
 	"Default Model": "默认模型",
 	"Default model updated": "默认模型已更新",
@@ -227,13 +227,13 @@
 	"Embedding Model Engine": "语义向量模型引擎",
 	"Embedding model set to \"{{embedding_model}}\"": "语义向量模型设置为 \"{{embedding_model}}\"",
 	"Enable Community Sharing": "启用分享至社区",
-	"Enable Message Rating": "",
+	"Enable Message Rating": "启用消息评价",
 	"Enable New Sign Ups": "允许新用户注册",
 	"Enable Web Search": "启用网络搜索",
 	"Enabled": "启用",
 	"Engine": "引擎",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "确保您的 CSV 文件按以下顺序包含 4 列: 姓名、电子邮箱、密码、角色。",
-	"Enter {{role}} message here": "在此处输入 {{role}} 息",
+	"Enter {{role}} message here": "在此处输入 {{role}} 息",
 	"Enter a detail about yourself for your LLMs to recall": "输入一个关于你自己的详细信息,方便你的大语言模型记住这些内容",
 	"Enter api auth string (e.g. username:password)": "输入api鉴权路径 (例如:username:password)",
 	"Enter Brave Search API Key": "输入 Brave Search API 密钥",
@@ -244,7 +244,7 @@
 	"Enter Google PSE Engine Id": "输入 Google PSE 引擎 ID",
 	"Enter Image Size (e.g. 512x512)": "输入图像分辨率 (例如:512x512)",
 	"Enter language codes": "输入语言代码",
-	"Enter Model ID": "",
+	"Enter Model ID": "输入模型 ID",
 	"Enter model tag (e.g. {{modelTag}})": "输入模型标签 (例如:{{modelTag}})",
 	"Enter Number of Steps (e.g. 50)": "输入步骤数 (Steps) (例如:50)",
 	"Enter Score": "输入评分",
@@ -285,6 +285,7 @@
 	"File": "文件",
 	"File Mode": "文件模式",
 	"File not found.": "文件未找到。",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "文件",
 	"Filter is now globally disabled": "过滤器已全局禁用",
 	"Filter is now globally enabled": "过滤器已全局启用",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "本地大语言模型",
 	"Last Active": "最后在线时间",
 	"Last Modified": "最后修改时间",
+	"Leave empty for unlimited": "",
 	"Light": "浅色",
 	"Listening...": "正在倾听...",
 	"LLMs can make mistakes. Verify important information.": "大语言模型可能会生成误导性错误信息,请对关键信息加以验证。",
@@ -368,13 +370,15 @@
 	"LTR": "从左至右",
 	"Made by OpenWebUI Community": "由 OpenWebUI 社区制作",
 	"Make sure to enclose them with": "确保将它们包含在内",
-	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
+	"Make sure to export a workflow.json file as API format from ComfyUI.": "确保从 ComfyUI 导出 API 格式的 workflow.json 文件。",
 	"Manage": "管理",
 	"Manage Models": "管理模型",
 	"Manage Ollama Models": "管理 Ollama 模型",
 	"Manage Pipelines": "管理 Pipeline",
 	"March": "三月",
 	"Max Tokens (num_predict)": "最多 Token (num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "最多可以同时下载 3 个模型,请稍后重试。",
 	"May": "五月",
 	"Memories accessible by LLMs will be shown here.": "大语言模型可访问的记忆将在此显示。",
@@ -383,7 +387,7 @@
 	"Memory cleared successfully": "记忆清除成功",
 	"Memory deleted successfully": "记忆删除成功",
 	"Memory updated successfully": "记忆更新成功",
-	"Merge Responses": "",
+	"Merge Responses": "合并回复",
 	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "创建链接后发送的消息不会被共享。具有 URL 的用户将能够查看共享对话。",
 	"Min P": "Min P",
 	"Minimum Score": "最低分",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "重置向量存储",
 	"Response AutoCopy to Clipboard": "自动复制回复到剪贴板",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "无法激活回复时发送通知。请检查浏览器设置,并授予必要的访问权限。",
+	"Response splitting": "拆分回复",
 	"Role": "权限组",
 	"Rosé Pine": "Rosé Pine",
 	"Rosé Pine Dawn": "Rosé Pine Dawn",
@@ -534,6 +539,7 @@
 	"Search Tools": "搜索工具",
 	"Searched {{count}} sites_other": "搜索到 {{count}} 个结果",
 	"Searching \"{{searchQuery}}\"": "搜索 \"{{searchQuery}}\" 中",
+	"Searching Knowledge for \"{{searchQuery}}\"": "检索有关 \"{{searchQuery}}\" 的知识中",
 	"Searxng Query URL": "Searxng 查询 URL",
 	"See readme.md for instructions": "查看 readme.md 以获取说明",
 	"See what's new": "查阅最新更新内容",
@@ -547,7 +553,7 @@
 	"Select a tool": "选择一个工具",
 	"Select an Ollama instance": "选择一个 Ollama 实例",
 	"Select Documents": "选择文档",
-	"Select Engine": "",
+	"Select Engine": "选择引擎",
 	"Select model": "选择模型",
 	"Select only one model to call": "请仅选择一个模型来呼叫",
 	"Selected model(s) do not support image inputs": "已选择的模型不支持发送图像",
@@ -610,6 +616,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "感谢您的反馈!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "本插件的背后开发者是社区中热情的志愿者。如果此插件有帮助到您,烦请考虑一下为它的开发做出贡献。",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "分值应介于 0.0(0%)和 1.0(100%)之间。",
 	"Theme": "主题",
 	"Thinking...": "正在思考...",
@@ -709,6 +717,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "用 50 个字写一个总结 [主题或关键词]。",
 	"Yesterday": "昨天",
 	"You": "你",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "通过点击下方的“管理”按钮,你可以添加记忆,以个性化大语言模型的互动,使其更有用,更符合你的需求。",
 	"You cannot clone a base model": "你不能复制基础模型",
 	"You have no archived conversations.": "你没有已归档的对话。",

+ 11 - 2
src/lib/i18n/locales/zh-TW/translation.json

@@ -6,7 +6,6 @@
 	"(latest)": "(最新版)",
 	"{{ models }}": "{{ models }}",
 	"{{ owner }}: You cannot delete a base model": "{{ owner }}:您無法刪除基礎模型",
-	"{{modelName}} is thinking...": "{{modelName}} 正在思考...",
 	"{{user}}'s Chats": "{{user}} 的對話",
 	"{{webUIName}} Backend Required": "需要 {{webUIName}} 後端",
 	"*Prompt node ID(s) are required for image generation": "",
@@ -68,7 +67,6 @@
 	"Attach file": "附加檔案",
 	"Attention to detail": "注重細節",
 	"Audio": "音訊",
-	"Audio settings updated successfully": "成功更新音訊設定",
 	"August": "8 月",
 	"Auto-playback response": "自動播放回應",
 	"Automatic1111": "",
@@ -138,9 +136,11 @@
 	"Context Length": "上下文長度",
 	"Continue Response": "繼續回應",
 	"Continue with {{provider}}": "使用 {{provider}} 繼續",
+	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "",
 	"Controls": "控制項",
 	"Copied": "",
 	"Copied shared chat URL to clipboard!": "已複製共用對話 URL 到剪貼簿!",
+	"Copied to clipboard": "",
 	"Copy": "複製",
 	"Copy Code": "",
 	"Copy last code block": "複製最後一個程式碼區塊",
@@ -285,6 +285,7 @@
 	"File": "檔案",
 	"File Mode": "檔案模式",
 	"File not found.": "找不到檔案。",
+	"File size should not exceed {{maxSize}} MB.": "",
 	"Files": "檔案",
 	"Filter is now globally disabled": "篩選器現在已全域停用",
 	"Filter is now globally enabled": "篩選器現在已全域啟用",
@@ -361,6 +362,7 @@
 	"large language models, locally.": "在本機執行大型語言模型。",
 	"Last Active": "上次活動時間",
 	"Last Modified": "上次修改時間",
+	"Leave empty for unlimited": "",
 	"Light": "淺色",
 	"Listening...": "正在聆聽...",
 	"LLMs can make mistakes. Verify important information.": "大型語言模型可能會出錯。請驗證重要資訊。",
@@ -375,6 +377,8 @@
 	"Manage Pipelines": "管理管線",
 	"March": "3 月",
 	"Max Tokens (num_predict)": "最大 token 數(num_predict)",
+	"Max Upload Count": "",
+	"Max Upload Size": "",
 	"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "最多可同時下載 3 個模型。請稍後再試。",
 	"May": "5 月",
 	"Memories accessible by LLMs will be shown here.": "可被大型語言模型存取的記憶將顯示在這裡。",
@@ -505,6 +509,7 @@
 	"Reset Vector Storage": "重設向量儲存空間",
 	"Response AutoCopy to Clipboard": "自動將回應複製到剪貼簿",
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "無法啟用回應通知,因為網站權限已遭拒。請前往瀏覽器設定以授予必要存取權限。",
+	"Response splitting": "",
 	"Role": "角色",
 	"Rosé Pine": "玫瑰松",
 	"Rosé Pine Dawn": "黎明玫瑰松",
@@ -535,6 +540,7 @@
 	"Searched {{count}} sites_one": "已搜尋 {{count}} 個網站",
 	"Searched {{count}} sites_other": "已搜尋 {{count}} 個網站",
 	"Searching \"{{searchQuery}}\"": "正在搜尋 \"{{searchQuery}}\"",
+	"Searching Knowledge for \"{{searchQuery}}\"": "",
 	"Searxng Query URL": "Searxng 查詢 URL",
 	"See readme.md for instructions": "檢視 readme.md 以取得說明",
 	"See what's new": "查看新功能",
@@ -611,6 +617,8 @@
 	"Tfs Z": "Tfs Z",
 	"Thanks for your feedback!": "感謝您的回饋!",
 	"The developers behind this plugin are passionate volunteers from the community. If you find this plugin helpful, please consider contributing to its development.": "這個外掛背後的開發者是來自社群的熱情志願者。如果您覺得這個外掛很有幫助,請考慮為其開發做出貢獻。",
+	"The maximum file size in MB. If the file size exceeds this limit, the file will not be uploaded.": "",
+	"The maximum number of files that can be used at once in chat. If the number of files exceeds this limit, the files will not be uploaded.": "",
 	"The score should be a value between 0.0 (0%) and 1.0 (100%).": "分數應該是介於 0.0(0%)和 1.0(100%)之間的值。",
 	"Theme": "主題",
 	"Thinking...": "正在思考...",
@@ -710,6 +718,7 @@
 	"Write a summary in 50 words that summarizes [topic or keyword].": "用 50 字寫一篇總結 [主題或關鍵字] 的摘要。",
 	"Yesterday": "昨天",
 	"You": "您",
+	"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
 	"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "您可以透過下方的「管理」按鈕新增記憶,將您與大型語言模型的互動個人化,讓它們更有幫助並更符合您的需求。",
 	"You cannot clone a base model": "您無法複製基礎模型",
 	"You have no archived conversations.": "您沒有已封存的對話。",

+ 6 - 0
src/lib/types/index.ts

@@ -7,3 +7,9 @@ export type Banner = {
 	dismissible?: boolean;
 	timestamp: number;
 };
+
+export enum TTS_RESPONSE_SPLIT {
+	PUNCTUATION = 'punctuation',
+	PARAGRAPHS = 'paragraphs',
+	NONE = 'none'
+}

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff