浏览代码

Merge pull request #10469 from open-webui/dev

0.5.17
Timothy Jaeryang Baek 2 月之前
父节点
当前提交
15485e7c5d
共有 100 个文件被更改,包括 3278 次插入1694 次删除
  1. 21 0
      CHANGELOG.md
  2. 63 23
      backend/open_webui/config.py
  3. 22 0
      backend/open_webui/env.py
  4. 6 3
      backend/open_webui/functions.py
  5. 38 4
      backend/open_webui/main.py
  6. 9 5
      backend/open_webui/models/chats.py
  7. 1 1
      backend/open_webui/models/feedbacks.py
  8. 1 1
      backend/open_webui/models/files.py
  9. 1 1
      backend/open_webui/models/folders.py
  10. 8 4
      backend/open_webui/models/functions.py
  11. 2 3
      backend/open_webui/models/models.py
  12. 1 1
      backend/open_webui/models/tags.py
  13. 8 4
      backend/open_webui/models/tools.py
  14. 23 0
      backend/open_webui/retrieval/loaders/main.py
  15. 7 1
      backend/open_webui/retrieval/models/colbert.py
  16. 79 35
      backend/open_webui/retrieval/utils.py
  17. 6 2
      backend/open_webui/retrieval/vector/dbs/chroma.py
  18. 10 4
      backend/open_webui/retrieval/vector/dbs/milvus.py
  19. 23 15
      backend/open_webui/retrieval/vector/dbs/pgvector.py
  20. 7 2
      backend/open_webui/retrieval/vector/dbs/qdrant.py
  21. 50 13
      backend/open_webui/routers/audio.py
  22. 6 20
      backend/open_webui/routers/auths.py
  23. 18 2
      backend/open_webui/routers/files.py
  24. 10 4
      backend/open_webui/routers/functions.py
  25. 11 5
      backend/open_webui/routers/groups.py
  26. 12 2
      backend/open_webui/routers/images.py
  27. 1 1
      backend/open_webui/routers/knowledge.py
  28. 130 16
      backend/open_webui/routers/ollama.py
  29. 44 11
      backend/open_webui/routers/openai.py
  30. 11 11
      backend/open_webui/routers/pipelines.py
  31. 98 39
      backend/open_webui/routers/retrieval.py
  32. 46 0
      backend/open_webui/routers/tasks.py
  33. 9 4
      backend/open_webui/routers/tools.py
  34. 6 1
      backend/open_webui/routers/utils.py
  35. 18 3
      backend/open_webui/storage/provider.py
  36. 249 0
      backend/open_webui/utils/audit.py
  37. 19 7
      backend/open_webui/utils/auth.py
  38. 5 5
      backend/open_webui/utils/chat.py
  39. 14 3
      backend/open_webui/utils/filter.py
  40. 140 0
      backend/open_webui/utils/logger.py
  41. 210 131
      backend/open_webui/utils/middleware.py
  42. 6 1
      backend/open_webui/utils/misc.py
  43. 6 5
      backend/open_webui/utils/models.py
  44. 1 10
      backend/open_webui/utils/oauth.py
  45. 7 1
      backend/open_webui/utils/payload.py
  46. 1 1
      backend/open_webui/utils/plugin.py
  47. 2 2
      backend/open_webui/utils/response.py
  48. 4 0
      backend/requirements.txt
  49. 2 2
      package-lock.json
  50. 1 1
      package.json
  51. 4 0
      pyproject.toml
  52. 7 0
      src/lib/apis/retrieval/index.ts
  53. 7 6
      src/lib/components/admin/Functions/FunctionEditor.svelte
  54. 461 430
      src/lib/components/admin/Settings/Documents.svelte
  55. 47 41
      src/lib/components/admin/Settings/Evaluations.svelte
  56. 46 2
      src/lib/components/admin/Settings/Models/ConfigureModelsModal.svelte
  57. 18 8
      src/lib/components/admin/Settings/Models/ModelList.svelte
  58. 321 289
      src/lib/components/admin/Settings/WebSearch.svelte
  59. 0 16
      src/lib/components/channel/MessageInput.svelte
  60. 2 0
      src/lib/components/chat/Artifacts.svelte
  61. 21 4
      src/lib/components/chat/Chat.svelte
  62. 3 1
      src/lib/components/chat/ChatPlaceholder.svelte
  63. 36 25
      src/lib/components/chat/MessageInput.svelte
  64. 7 1
      src/lib/components/chat/MessageInput/Commands/Prompts.svelte
  65. 93 0
      src/lib/components/chat/MessageInput/InputMenu.svelte
  66. 2 0
      src/lib/components/chat/Messages.svelte
  67. 2 1
      src/lib/components/chat/Messages/Citations.svelte
  68. 11 18
      src/lib/components/chat/Messages/CodeBlock.svelte
  69. 4 4
      src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte
  70. 20 6
      src/lib/components/chat/Messages/ResponseMessage.svelte
  71. 2 1
      src/lib/components/chat/Placeholder.svelte
  72. 31 9
      src/lib/components/chat/Settings/Personalization/ManageModal.svelte
  73. 40 2
      src/lib/components/chat/ShortcutsModal.svelte
  74. 12 3
      src/lib/components/common/CodeEditor.svelte
  75. 1 1
      src/lib/components/common/FileItemModal.svelte
  76. 0 14
      src/lib/components/workspace/Knowledge/KnowledgeBase.svelte
  77. 0 1
      src/lib/components/workspace/Models/ModelEditor.svelte
  78. 12 9
      src/lib/components/workspace/Prompts/PromptMenu.svelte
  79. 12 9
      src/lib/components/workspace/Tools/ToolMenu.svelte
  80. 7 7
      src/lib/components/workspace/Tools/ToolkitEditor.svelte
  81. 23 13
      src/lib/i18n/locales/ar-BH/translation.json
  82. 23 13
      src/lib/i18n/locales/bg-BG/translation.json
  83. 23 13
      src/lib/i18n/locales/bn-BD/translation.json
  84. 73 63
      src/lib/i18n/locales/ca-ES/translation.json
  85. 23 13
      src/lib/i18n/locales/ceb-PH/translation.json
  86. 23 13
      src/lib/i18n/locales/cs-CZ/translation.json
  87. 23 13
      src/lib/i18n/locales/da-DK/translation.json
  88. 23 13
      src/lib/i18n/locales/de-DE/translation.json
  89. 23 13
      src/lib/i18n/locales/dg-DG/translation.json
  90. 23 13
      src/lib/i18n/locales/el-GR/translation.json
  91. 23 13
      src/lib/i18n/locales/en-GB/translation.json
  92. 23 13
      src/lib/i18n/locales/en-US/translation.json
  93. 23 13
      src/lib/i18n/locales/es-ES/translation.json
  94. 23 13
      src/lib/i18n/locales/eu-ES/translation.json
  95. 23 13
      src/lib/i18n/locales/fa-IR/translation.json
  96. 89 79
      src/lib/i18n/locales/fi-FI/translation.json
  97. 23 13
      src/lib/i18n/locales/fr-CA/translation.json
  98. 23 13
      src/lib/i18n/locales/fr-FR/translation.json
  99. 23 13
      src/lib/i18n/locales/he-IL/translation.json
  100. 23 13
      src/lib/i18n/locales/hi-IN/translation.json

+ 21 - 0
CHANGELOG.md

@@ -5,6 +5,27 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.5.17] - 2025-02-27
+
+### Added
+
+- **🚀 Instant Document Upload with Bypass Embedding & Retrieval**: Admins can now enable "Bypass Embedding & Retrieval" in Admin Settings > Documents, significantly speeding up document uploads and ensuring full document context is retained without chunking.
+- **🔎 "Stream" Hook for Real-Time Filtering**: The new "stream" hook allows dynamic real-time message filtering. Learn more in our documentation (https://docs.openwebui.com/features/plugin/functions/filter).
+- **☁️ OneDrive Integration**: Early support for OneDrive storage integration has been introduced, expanding file import options.
+- **📈 Enhanced Logging with Loguru**: Backend logging has been improved with Loguru, making debugging and issue tracking far more efficient.
+- **⚙️ General Stability Enhancements**: Backend and frontend refactoring improves performance, ensuring a smoother and more reliable user experience.
+- **🌍 Updated Translations**: Refined multilingual support for better localization and accuracy across various languages.
+
+### Fixed
+
+- **🔄 Reliable Model Imports from the Community Platform**: Resolved import failures, allowing seamless integration of community-shared models without errors.
+- **📊 OpenAI Usage Statistics Restored**: Fixed an issue where OpenAI usage metrics were not displaying correctly, ensuring accurate tracking of usage data.
+- **🗂️ Deduplication for Retrieved Documents**: Documents retrieved during searches are now intelligently deduplicated, meaning no more redundant results—helping to keep information concise and relevant.
+
+### Changed
+
+- **📝 "Full Context Mode" Renamed for Clarity**: The "Full Context Mode" toggle in Web Search settings is now labeled "Bypass Embedding & Retrieval" for consistency across the UI.
+
 ## [0.5.16] - 2025-02-20
 
 ### Fixed

+ 63 - 23
backend/open_webui/config.py

@@ -9,7 +9,6 @@ from pathlib import Path
 from typing import Generic, Optional, TypeVar
 from urllib.parse import urlparse
 
-import chromadb
 import requests
 from pydantic import BaseModel
 from sqlalchemy import JSON, Column, DateTime, Integer, func
@@ -44,7 +43,7 @@ logging.getLogger("uvicorn.access").addFilter(EndpointFilter())
 
 # Function to run the alembic migrations
 def run_migrations():
-    print("Running migrations")
+    log.info("Running migrations")
     try:
         from alembic import command
         from alembic.config import Config
@@ -57,7 +56,7 @@ def run_migrations():
 
         command.upgrade(alembic_cfg, "head")
     except Exception as e:
-        print(f"Error: {e}")
+        log.exception(f"Error running migrations: {e}")
 
 
 run_migrations()
@@ -678,6 +677,10 @@ S3_REGION_NAME = os.environ.get("S3_REGION_NAME", None)
 S3_BUCKET_NAME = os.environ.get("S3_BUCKET_NAME", None)
 S3_KEY_PREFIX = os.environ.get("S3_KEY_PREFIX", None)
 S3_ENDPOINT_URL = os.environ.get("S3_ENDPOINT_URL", None)
+S3_USE_ACCELERATE_ENDPOINT = (
+    os.environ.get("S3_USE_ACCELERATE_ENDPOINT", "False").lower() == "true"
+)
+S3_ADDRESSING_STYLE = os.environ.get("S3_ADDRESSING_STYLE", None)
 
 GCS_BUCKET_NAME = os.environ.get("GCS_BUCKET_NAME", None)
 GOOGLE_APPLICATION_CREDENTIALS_JSON = os.environ.get(
@@ -1094,7 +1097,7 @@ try:
     banners = json.loads(os.environ.get("WEBUI_BANNERS", "[]"))
     banners = [BannerModel(**banner) for banner in banners]
 except Exception as e:
-    print(f"Error loading WEBUI_BANNERS: {e}")
+    log.exception(f"Error loading WEBUI_BANNERS: {e}")
     banners = []
 
 WEBUI_BANNERS = PersistentConfig("WEBUI_BANNERS", "ui.banners", banners)
@@ -1497,22 +1500,27 @@ Ensure that the tools are effectively utilized to achieve the highest-quality an
 VECTOR_DB = os.environ.get("VECTOR_DB", "chroma")
 
 # Chroma
-CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
-CHROMA_TENANT = os.environ.get("CHROMA_TENANT", chromadb.DEFAULT_TENANT)
-CHROMA_DATABASE = os.environ.get("CHROMA_DATABASE", chromadb.DEFAULT_DATABASE)
-CHROMA_HTTP_HOST = os.environ.get("CHROMA_HTTP_HOST", "")
-CHROMA_HTTP_PORT = int(os.environ.get("CHROMA_HTTP_PORT", "8000"))
-CHROMA_CLIENT_AUTH_PROVIDER = os.environ.get("CHROMA_CLIENT_AUTH_PROVIDER", "")
-CHROMA_CLIENT_AUTH_CREDENTIALS = os.environ.get("CHROMA_CLIENT_AUTH_CREDENTIALS", "")
-# Comma-separated list of header=value pairs
-CHROMA_HTTP_HEADERS = os.environ.get("CHROMA_HTTP_HEADERS", "")
-if CHROMA_HTTP_HEADERS:
-    CHROMA_HTTP_HEADERS = dict(
-        [pair.split("=") for pair in CHROMA_HTTP_HEADERS.split(",")]
+if VECTOR_DB == "chroma":
+    import chromadb
+
+    CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db"
+    CHROMA_TENANT = os.environ.get("CHROMA_TENANT", chromadb.DEFAULT_TENANT)
+    CHROMA_DATABASE = os.environ.get("CHROMA_DATABASE", chromadb.DEFAULT_DATABASE)
+    CHROMA_HTTP_HOST = os.environ.get("CHROMA_HTTP_HOST", "")
+    CHROMA_HTTP_PORT = int(os.environ.get("CHROMA_HTTP_PORT", "8000"))
+    CHROMA_CLIENT_AUTH_PROVIDER = os.environ.get("CHROMA_CLIENT_AUTH_PROVIDER", "")
+    CHROMA_CLIENT_AUTH_CREDENTIALS = os.environ.get(
+        "CHROMA_CLIENT_AUTH_CREDENTIALS", ""
     )
-else:
-    CHROMA_HTTP_HEADERS = None
-CHROMA_HTTP_SSL = os.environ.get("CHROMA_HTTP_SSL", "false").lower() == "true"
+    # Comma-separated list of header=value pairs
+    CHROMA_HTTP_HEADERS = os.environ.get("CHROMA_HTTP_HEADERS", "")
+    if CHROMA_HTTP_HEADERS:
+        CHROMA_HTTP_HEADERS = dict(
+            [pair.split("=") for pair in CHROMA_HTTP_HEADERS.split(",")]
+        )
+    else:
+        CHROMA_HTTP_HEADERS = None
+    CHROMA_HTTP_SSL = os.environ.get("CHROMA_HTTP_SSL", "false").lower() == "true"
 # this uses the model defined in the Dockerfile ENV variable. If you dont use docker or docker based deployments such as k8s, the default embedding model will be used (sentence-transformers/all-MiniLM-L6-v2)
 
 # Milvus
@@ -1566,6 +1574,18 @@ GOOGLE_DRIVE_API_KEY = PersistentConfig(
     os.environ.get("GOOGLE_DRIVE_API_KEY", ""),
 )
 
+ENABLE_ONEDRIVE_INTEGRATION = PersistentConfig(
+    "ENABLE_ONEDRIVE_INTEGRATION",
+    "onedrive.enable",
+    os.getenv("ENABLE_ONEDRIVE_INTEGRATION", "False").lower() == "true",
+)
+
+ONEDRIVE_CLIENT_ID = PersistentConfig(
+    "ONEDRIVE_CLIENT_ID",
+    "onedrive.client_id",
+    os.environ.get("ONEDRIVE_CLIENT_ID", ""),
+)
+
 # RAG Content Extraction
 CONTENT_EXTRACTION_ENGINE = PersistentConfig(
     "CONTENT_EXTRACTION_ENGINE",
@@ -1579,6 +1599,26 @@ TIKA_SERVER_URL = PersistentConfig(
     os.getenv("TIKA_SERVER_URL", "http://tika:9998"),  # Default for sidecar deployment
 )
 
+DOCUMENT_INTELLIGENCE_ENDPOINT = PersistentConfig(
+    "DOCUMENT_INTELLIGENCE_ENDPOINT",
+    "rag.document_intelligence_endpoint",
+    os.getenv("DOCUMENT_INTELLIGENCE_ENDPOINT", ""),
+)
+
+DOCUMENT_INTELLIGENCE_KEY = PersistentConfig(
+    "DOCUMENT_INTELLIGENCE_KEY",
+    "rag.document_intelligence_key",
+    os.getenv("DOCUMENT_INTELLIGENCE_KEY", ""),
+)
+
+
+BYPASS_EMBEDDING_AND_RETRIEVAL = PersistentConfig(
+    "BYPASS_EMBEDDING_AND_RETRIEVAL",
+    "rag.bypass_embedding_and_retrieval",
+    os.environ.get("BYPASS_EMBEDDING_AND_RETRIEVAL", "False").lower() == "true",
+)
+
+
 RAG_TOP_K = PersistentConfig(
     "RAG_TOP_K", "rag.top_k", int(os.environ.get("RAG_TOP_K", "3"))
 )
@@ -1795,10 +1835,10 @@ RAG_WEB_SEARCH_ENGINE = PersistentConfig(
     os.getenv("RAG_WEB_SEARCH_ENGINE", ""),
 )
 
-RAG_WEB_SEARCH_FULL_CONTEXT = PersistentConfig(
-    "RAG_WEB_SEARCH_FULL_CONTEXT",
-    "rag.web.search.full_context",
-    os.getenv("RAG_WEB_SEARCH_FULL_CONTEXT", "False").lower() == "true",
+BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL = PersistentConfig(
+    "BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL",
+    "rag.web.search.bypass_embedding_and_retrieval",
+    os.getenv("BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL", "False").lower() == "true",
 )
 
 # You can provide a list of your own websites to filter after performing a web search.

+ 22 - 0
backend/open_webui/env.py

@@ -419,3 +419,25 @@ OFFLINE_MODE = os.environ.get("OFFLINE_MODE", "false").lower() == "true"
 
 if OFFLINE_MODE:
     os.environ["HF_HUB_OFFLINE"] = "1"
+
+####################################
+# AUDIT LOGGING
+####################################
+ENABLE_AUDIT_LOGS = os.getenv("ENABLE_AUDIT_LOGS", "false").lower() == "true"
+# Where to store log file
+AUDIT_LOGS_FILE_PATH = f"{DATA_DIR}/audit.log"
+# Maximum size of a file before rotating into a new log file
+AUDIT_LOG_FILE_ROTATION_SIZE = os.getenv("AUDIT_LOG_FILE_ROTATION_SIZE", "10MB")
+# METADATA | REQUEST | REQUEST_RESPONSE
+AUDIT_LOG_LEVEL = os.getenv("AUDIT_LOG_LEVEL", "REQUEST_RESPONSE").upper()
+try:
+    MAX_BODY_LOG_SIZE = int(os.environ.get("MAX_BODY_LOG_SIZE") or 2048)
+except ValueError:
+    MAX_BODY_LOG_SIZE = 2048
+
+# Comma separated list for urls to exclude from audit
+AUDIT_EXCLUDED_PATHS = os.getenv("AUDIT_EXCLUDED_PATHS", "/chats,/chat,/folders").split(
+    ","
+)
+AUDIT_EXCLUDED_PATHS = [path.strip() for path in AUDIT_EXCLUDED_PATHS]
+AUDIT_EXCLUDED_PATHS = [path.lstrip("/") for path in AUDIT_EXCLUDED_PATHS]

+ 6 - 3
backend/open_webui/functions.py

@@ -2,6 +2,7 @@ import logging
 import sys
 import inspect
 import json
+import asyncio
 
 from pydantic import BaseModel
 from typing import AsyncGenerator, Generator, Iterator
@@ -76,11 +77,13 @@ async def get_function_models(request):
         if hasattr(function_module, "pipes"):
             sub_pipes = []
 
-            # Check if pipes is a function or a list
-
+            # Handle pipes being a list, sync function, or async function
             try:
                 if callable(function_module.pipes):
-                    sub_pipes = function_module.pipes()
+                    if asyncio.iscoroutinefunction(function_module.pipes):
+                        sub_pipes = await function_module.pipes()
+                    else:
+                        sub_pipes = function_module.pipes()
                 else:
                     sub_pipes = function_module.pipes
             except Exception as e:

+ 38 - 4
backend/open_webui/main.py

@@ -45,6 +45,9 @@ from starlette.middleware.sessions import SessionMiddleware
 from starlette.responses import Response, StreamingResponse
 
 
+from open_webui.utils import logger
+from open_webui.utils.audit import AuditLevel, AuditLoggingMiddleware
+from open_webui.utils.logger import start_logger
 from open_webui.socket.main import (
     app as socket_app,
     periodic_usage_pool_cleanup,
@@ -95,6 +98,7 @@ from open_webui.config import (
     OLLAMA_API_CONFIGS,
     # OpenAI
     ENABLE_OPENAI_API,
+    ONEDRIVE_CLIENT_ID,
     OPENAI_API_BASE_URLS,
     OPENAI_API_KEYS,
     OPENAI_API_CONFIGS,
@@ -161,6 +165,7 @@ from open_webui.config import (
     RAG_TEMPLATE,
     DEFAULT_RAG_TEMPLATE,
     RAG_FULL_CONTEXT,
+    BYPASS_EMBEDDING_AND_RETRIEVAL,
     RAG_EMBEDDING_MODEL,
     RAG_EMBEDDING_MODEL_AUTO_UPDATE,
     RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE,
@@ -180,6 +185,8 @@ from open_webui.config import (
     CHUNK_SIZE,
     CONTENT_EXTRACTION_ENGINE,
     TIKA_SERVER_URL,
+    DOCUMENT_INTELLIGENCE_ENDPOINT,
+    DOCUMENT_INTELLIGENCE_KEY,
     RAG_TOP_K,
     RAG_TEXT_SPLITTER,
     TIKTOKEN_ENCODING_NAME,
@@ -188,7 +195,7 @@ from open_webui.config import (
     YOUTUBE_LOADER_PROXY_URL,
     # Retrieval (Web Search)
     RAG_WEB_SEARCH_ENGINE,
-    RAG_WEB_SEARCH_FULL_CONTEXT,
+    BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL,
     RAG_WEB_SEARCH_RESULT_COUNT,
     RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
     RAG_WEB_SEARCH_TRUST_ENV,
@@ -215,11 +222,13 @@ from open_webui.config import (
     GOOGLE_PSE_ENGINE_ID,
     GOOGLE_DRIVE_CLIENT_ID,
     GOOGLE_DRIVE_API_KEY,
+    ONEDRIVE_CLIENT_ID,
     ENABLE_RAG_HYBRID_SEARCH,
     ENABLE_RAG_LOCAL_WEB_FETCH,
     ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION,
     ENABLE_RAG_WEB_SEARCH,
     ENABLE_GOOGLE_DRIVE_INTEGRATION,
+    ENABLE_ONEDRIVE_INTEGRATION,
     UPLOAD_DIR,
     # WebUI
     WEBUI_AUTH,
@@ -298,8 +307,11 @@ from open_webui.config import (
     reset_config,
 )
 from open_webui.env import (
+    AUDIT_EXCLUDED_PATHS,
+    AUDIT_LOG_LEVEL,
     CHANGELOG,
     GLOBAL_LOG_LEVEL,
+    MAX_BODY_LOG_SIZE,
     SAFE_MODE,
     SRC_LOG_LEVELS,
     VERSION,
@@ -384,6 +396,7 @@ https://github.com/open-webui/open-webui
 
 @asynccontextmanager
 async def lifespan(app: FastAPI):
+    start_logger()
     if RESET_CONFIG_ON_START:
         reset_config()
 
@@ -526,6 +539,7 @@ app.state.config.FILE_MAX_COUNT = RAG_FILE_MAX_COUNT
 
 
 app.state.config.RAG_FULL_CONTEXT = RAG_FULL_CONTEXT
+app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL = BYPASS_EMBEDDING_AND_RETRIEVAL
 app.state.config.ENABLE_RAG_HYBRID_SEARCH = ENABLE_RAG_HYBRID_SEARCH
 app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = (
     ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION
@@ -533,6 +547,8 @@ app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = (
 
 app.state.config.CONTENT_EXTRACTION_ENGINE = CONTENT_EXTRACTION_ENGINE
 app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL
+app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = DOCUMENT_INTELLIGENCE_ENDPOINT
+app.state.config.DOCUMENT_INTELLIGENCE_KEY = DOCUMENT_INTELLIGENCE_KEY
 
 app.state.config.TEXT_SPLITTER = RAG_TEXT_SPLITTER
 app.state.config.TIKTOKEN_ENCODING_NAME = TIKTOKEN_ENCODING_NAME
@@ -560,10 +576,13 @@ app.state.config.YOUTUBE_LOADER_PROXY_URL = YOUTUBE_LOADER_PROXY_URL
 
 app.state.config.ENABLE_RAG_WEB_SEARCH = ENABLE_RAG_WEB_SEARCH
 app.state.config.RAG_WEB_SEARCH_ENGINE = RAG_WEB_SEARCH_ENGINE
-app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT = RAG_WEB_SEARCH_FULL_CONTEXT
+app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL = (
+    BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
+)
 app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST = RAG_WEB_SEARCH_DOMAIN_FILTER_LIST
 
 app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION = ENABLE_GOOGLE_DRIVE_INTEGRATION
+app.state.config.ENABLE_ONEDRIVE_INTEGRATION = ENABLE_ONEDRIVE_INTEGRATION
 app.state.config.SEARXNG_QUERY_URL = SEARXNG_QUERY_URL
 app.state.config.GOOGLE_PSE_API_KEY = GOOGLE_PSE_API_KEY
 app.state.config.GOOGLE_PSE_ENGINE_ID = GOOGLE_PSE_ENGINE_ID
@@ -879,6 +898,19 @@ app.include_router(
 app.include_router(utils.router, prefix="/api/v1/utils", tags=["utils"])
 
 
+try:
+    audit_level = AuditLevel(AUDIT_LOG_LEVEL)
+except ValueError as e:
+    logger.error(f"Invalid audit level: {AUDIT_LOG_LEVEL}. Error: {e}")
+    audit_level = AuditLevel.NONE
+
+if audit_level != AuditLevel.NONE:
+    app.add_middleware(
+        AuditLoggingMiddleware,
+        audit_level=audit_level,
+        excluded_paths=AUDIT_EXCLUDED_PATHS,
+        max_body_size=MAX_BODY_LOG_SIZE,
+    )
 ##################################
 #
 # Chat Endpoints
@@ -911,7 +943,7 @@ async def get_models(request: Request, user=Depends(get_verified_user)):
 
         return filtered_models
 
-    models = await get_all_models(request)
+    models = await get_all_models(request, user=user)
 
     # Filter out filter pipelines
     models = [
@@ -951,7 +983,7 @@ async def chat_completion(
     user=Depends(get_verified_user),
 ):
     if not request.app.state.MODELS:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
 
     model_item = form_data.pop("model_item", {})
     tasks = form_data.pop("background_tasks", None)
@@ -1146,6 +1178,7 @@ async def get_app_config(request: Request):
                     "enable_admin_export": ENABLE_ADMIN_EXPORT,
                     "enable_admin_chat_access": ENABLE_ADMIN_CHAT_ACCESS,
                     "enable_google_drive_integration": app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION,
+                    "enable_onedrive_integration": app.state.config.ENABLE_ONEDRIVE_INTEGRATION,
                 }
                 if user is not None
                 else {}
@@ -1177,6 +1210,7 @@ async def get_app_config(request: Request):
                     "client_id": GOOGLE_DRIVE_CLIENT_ID.value,
                     "api_key": GOOGLE_DRIVE_API_KEY.value,
                 },
+                "onedrive": {"client_id": ONEDRIVE_CLIENT_ID.value},
             }
             if user is not None
             else {}

+ 9 - 5
backend/open_webui/models/chats.py

@@ -1,3 +1,4 @@
+import logging
 import json
 import time
 import uuid
@@ -5,7 +6,7 @@ from typing import Optional
 
 from open_webui.internal.db import Base, get_db
 from open_webui.models.tags import TagModel, Tag, Tags
-
+from open_webui.env import SRC_LOG_LEVELS
 
 from pydantic import BaseModel, ConfigDict
 from sqlalchemy import BigInteger, Boolean, Column, String, Text, JSON
@@ -16,6 +17,9 @@ from sqlalchemy.sql import exists
 # Chat DB Schema
 ####################
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
 
 class Chat(Base):
     __tablename__ = "chat"
@@ -670,7 +674,7 @@ class ChatTable:
             # Perform pagination at the SQL level
             all_chats = query.offset(skip).limit(limit).all()
 
-            print(len(all_chats))
+            log.info(f"The number of chats: {len(all_chats)}")
 
             # Validate and return chats
             return [ChatModel.model_validate(chat) for chat in all_chats]
@@ -731,7 +735,7 @@ class ChatTable:
             query = db.query(Chat).filter_by(user_id=user_id)
             tag_id = tag_name.replace(" ", "_").lower()
 
-            print(db.bind.dialect.name)
+            log.info(f"DB dialect name: {db.bind.dialect.name}")
             if db.bind.dialect.name == "sqlite":
                 # SQLite JSON1 querying for tags within the meta JSON field
                 query = query.filter(
@@ -752,7 +756,7 @@ class ChatTable:
                 )
 
             all_chats = query.all()
-            print("all_chats", all_chats)
+            log.debug(f"all_chats: {all_chats}")
             return [ChatModel.model_validate(chat) for chat in all_chats]
 
     def add_chat_tag_by_id_and_user_id_and_tag_name(
@@ -810,7 +814,7 @@ class ChatTable:
             count = query.count()
 
             # Debugging output for inspection
-            print(f"Count of chats for tag '{tag_name}':", count)
+            log.info(f"Count of chats for tag '{tag_name}': {count}")
 
             return count
 

+ 1 - 1
backend/open_webui/models/feedbacks.py

@@ -118,7 +118,7 @@ class FeedbackTable:
                 else:
                     return None
             except Exception as e:
-                print(e)
+                log.exception(f"Error creating a new feedback: {e}")
                 return None
 
     def get_feedback_by_id(self, id: str) -> Optional[FeedbackModel]:

+ 1 - 1
backend/open_webui/models/files.py

@@ -119,7 +119,7 @@ class FilesTable:
                 else:
                     return None
             except Exception as e:
-                print(f"Error creating tool: {e}")
+                log.exception(f"Error inserting a new file: {e}")
                 return None
 
     def get_file_by_id(self, id: str) -> Optional[FileModel]:

+ 1 - 1
backend/open_webui/models/folders.py

@@ -82,7 +82,7 @@ class FolderTable:
                 else:
                     return None
             except Exception as e:
-                print(e)
+                log.exception(f"Error inserting a new folder: {e}")
                 return None
 
     def get_folder_by_id_and_user_id(

+ 8 - 4
backend/open_webui/models/functions.py

@@ -105,7 +105,7 @@ class FunctionsTable:
                 else:
                     return None
         except Exception as e:
-            print(f"Error creating tool: {e}")
+            log.exception(f"Error creating a new function: {e}")
             return None
 
     def get_function_by_id(self, id: str) -> Optional[FunctionModel]:
@@ -170,7 +170,7 @@ class FunctionsTable:
                 function = db.get(Function, id)
                 return function.valves if function.valves else {}
             except Exception as e:
-                print(f"An error occurred: {e}")
+                log.exception(f"Error getting function valves by id {id}: {e}")
                 return None
 
     def update_function_valves_by_id(
@@ -202,7 +202,9 @@ class FunctionsTable:
 
             return user_settings["functions"]["valves"].get(id, {})
         except Exception as e:
-            print(f"An error occurred: {e}")
+            log.exception(
+                f"Error getting user values by id {id} and user id {user_id}: {e}"
+            )
             return None
 
     def update_user_valves_by_id_and_user_id(
@@ -225,7 +227,9 @@ class FunctionsTable:
 
             return user_settings["functions"]["valves"][id]
         except Exception as e:
-            print(f"An error occurred: {e}")
+            log.exception(
+                f"Error updating user valves by id {id} and user_id {user_id}: {e}"
+            )
             return None
 
     def update_function_by_id(self, id: str, updated: dict) -> Optional[FunctionModel]:

+ 2 - 3
backend/open_webui/models/models.py

@@ -166,7 +166,7 @@ class ModelsTable:
                 else:
                     return None
         except Exception as e:
-            print(e)
+            log.exception(f"Failed to insert a new model: {e}")
             return None
 
     def get_all_models(self) -> list[ModelModel]:
@@ -246,8 +246,7 @@ class ModelsTable:
                 db.refresh(model)
                 return ModelModel.model_validate(model)
         except Exception as e:
-            print(e)
-
+            log.exception(f"Failed to update the model by id {id}: {e}")
             return None
 
     def delete_model_by_id(self, id: str) -> bool:

+ 1 - 1
backend/open_webui/models/tags.py

@@ -61,7 +61,7 @@ class TagTable:
                 else:
                     return None
             except Exception as e:
-                print(e)
+                log.exception(f"Error inserting a new tag: {e}")
                 return None
 
     def get_tag_by_name_and_user_id(

+ 8 - 4
backend/open_webui/models/tools.py

@@ -131,7 +131,7 @@ class ToolsTable:
                 else:
                     return None
             except Exception as e:
-                print(f"Error creating tool: {e}")
+                log.exception(f"Error creating a new tool: {e}")
                 return None
 
     def get_tool_by_id(self, id: str) -> Optional[ToolModel]:
@@ -175,7 +175,7 @@ class ToolsTable:
                 tool = db.get(Tool, id)
                 return tool.valves if tool.valves else {}
         except Exception as e:
-            print(f"An error occurred: {e}")
+            log.exception(f"Error getting tool valves by id {id}: {e}")
             return None
 
     def update_tool_valves_by_id(self, id: str, valves: dict) -> Optional[ToolValves]:
@@ -204,7 +204,9 @@ class ToolsTable:
 
             return user_settings["tools"]["valves"].get(id, {})
         except Exception as e:
-            print(f"An error occurred: {e}")
+            log.exception(
+                f"Error getting user values by id {id} and user_id {user_id}: {e}"
+            )
             return None
 
     def update_user_valves_by_id_and_user_id(
@@ -227,7 +229,9 @@ class ToolsTable:
 
             return user_settings["tools"]["valves"][id]
         except Exception as e:
-            print(f"An error occurred: {e}")
+            log.exception(
+                f"Error updating user valves by id {id} and user_id {user_id}: {e}"
+            )
             return None
 
     def update_tool_by_id(self, id: str, updated: dict) -> Optional[ToolModel]:

+ 23 - 0
backend/open_webui/retrieval/loaders/main.py

@@ -4,6 +4,7 @@ import ftfy
 import sys
 
 from langchain_community.document_loaders import (
+    AzureAIDocumentIntelligenceLoader,
     BSHTMLLoader,
     CSVLoader,
     Docx2txtLoader,
@@ -76,6 +77,7 @@ known_source_ext = [
     "jsx",
     "hs",
     "lhs",
+    "json",
 ]
 
 
@@ -147,6 +149,27 @@ class Loader:
                     file_path=file_path,
                     mime_type=file_content_type,
                 )
+        elif (
+            self.engine == "document_intelligence"
+            and self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT") != ""
+            and self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY") != ""
+            and (
+                file_ext in ["pdf", "xls", "xlsx", "docx", "ppt", "pptx"]
+                or file_content_type
+                in [
+                    "application/vnd.ms-excel",
+                    "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+                    "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+                    "application/vnd.ms-powerpoint",
+                    "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+                ]
+            )
+        ):
+            loader = AzureAIDocumentIntelligenceLoader(
+                file_path=file_path,
+                api_endpoint=self.kwargs.get("DOCUMENT_INTELLIGENCE_ENDPOINT"),
+                api_key=self.kwargs.get("DOCUMENT_INTELLIGENCE_KEY"),
+            )
         else:
             if file_ext == "pdf":
                 loader = PyPDFLoader(

+ 7 - 1
backend/open_webui/retrieval/models/colbert.py

@@ -1,13 +1,19 @@
 import os
+import logging
 import torch
 import numpy as np
 from colbert.infra import ColBERTConfig
 from colbert.modeling.checkpoint import Checkpoint
 
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
 
 class ColBERT:
     def __init__(self, name, **kwargs) -> None:
-        print("ColBERT: Loading model", name)
+        log.info("ColBERT: Loading model", name)
         self.device = "cuda" if torch.cuda.is_available() else "cpu"
 
         DOCKER = kwargs.get("env") == "docker"

+ 79 - 35
backend/open_webui/retrieval/utils.py

@@ -5,6 +5,7 @@ from typing import Optional, Union
 
 import asyncio
 import requests
+import hashlib
 
 from huggingface_hub import snapshot_download
 from langchain.retrievers import ContextualCompressionRetriever, EnsembleRetriever
@@ -17,6 +18,7 @@ from open_webui.retrieval.vector.connector import VECTOR_DB_CLIENT
 from open_webui.utils.misc import get_last_user_message, calculate_sha256_string
 
 from open_webui.models.users import UserModel
+from open_webui.models.files import Files
 
 from open_webui.env import (
     SRC_LOG_LEVELS,
@@ -81,7 +83,7 @@ def query_doc(
 
         return result
     except Exception as e:
-        print(e)
+        log.exception(f"Error querying doc {collection_name} with limit {k}: {e}")
         raise e
 
 
@@ -94,7 +96,7 @@ def get_doc(collection_name: str, user: UserModel = None):
 
         return result
     except Exception as e:
-        print(e)
+        log.exception(f"Error getting doc {collection_name}: {e}")
         raise e
 
 
@@ -174,46 +176,41 @@ def merge_get_results(get_results: list[dict]) -> dict:
 
 def merge_and_sort_query_results(
     query_results: list[dict], k: int, reverse: bool = False
-) -> list[dict]:
+) -> dict:
     # Initialize lists to store combined data
-    combined_distances = []
-    combined_documents = []
-    combined_metadatas = []
+    combined = []
+    seen_hashes = set()  # To store unique document hashes
 
     for data in query_results:
-        combined_distances.extend(data["distances"][0])
-        combined_documents.extend(data["documents"][0])
-        combined_metadatas.extend(data["metadatas"][0])
+        distances = data["distances"][0]
+        documents = data["documents"][0]
+        metadatas = data["metadatas"][0]
+
+        for distance, document, metadata in zip(distances, documents, metadatas):
+            if isinstance(document, str):
+                doc_hash = hashlib.md5(
+                    document.encode()
+                ).hexdigest()  # Compute a hash for uniqueness
 
-    # Create a list of tuples (distance, document, metadata)
-    combined = list(zip(combined_distances, combined_documents, combined_metadatas))
+                if doc_hash not in seen_hashes:
+                    seen_hashes.add(doc_hash)
+                    combined.append((distance, document, metadata))
 
     # Sort the list based on distances
     combined.sort(key=lambda x: x[0], reverse=reverse)
 
-    # We don't have anything :-(
-    if not combined:
-        sorted_distances = []
-        sorted_documents = []
-        sorted_metadatas = []
-    else:
-        # Unzip the sorted list
-        sorted_distances, sorted_documents, sorted_metadatas = zip(*combined)
-
-        # Slicing the lists to include only k elements
-        sorted_distances = list(sorted_distances)[:k]
-        sorted_documents = list(sorted_documents)[:k]
-        sorted_metadatas = list(sorted_metadatas)[:k]
+    # Slice to keep only the top k elements
+    sorted_distances, sorted_documents, sorted_metadatas = (
+        zip(*combined[:k]) if combined else ([], [], [])
+    )
 
-    # Create the output dictionary
-    result = {
-        "distances": [sorted_distances],
-        "documents": [sorted_documents],
-        "metadatas": [sorted_metadatas],
+    # Create and return the output dictionary
+    return {
+        "distances": [list(sorted_distances)],
+        "documents": [list(sorted_documents)],
+        "metadatas": [list(sorted_metadatas)],
     }
 
-    return result
-
 
 def get_all_items_from_collections(collection_names: list[str]) -> dict:
     results = []
@@ -342,6 +339,7 @@ def get_embedding_function(
 
 
 def get_sources_from_files(
+    request,
     files,
     queries,
     embedding_function,
@@ -359,19 +357,64 @@ def get_sources_from_files(
     relevant_contexts = []
 
     for file in files:
+
+        context = None
         if file.get("docs"):
+            # BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
             context = {
                 "documents": [[doc.get("content") for doc in file.get("docs")]],
                 "metadatas": [[doc.get("metadata") for doc in file.get("docs")]],
             }
         elif file.get("context") == "full":
+            # Manual Full Mode Toggle
             context = {
                 "documents": [[file.get("file").get("data", {}).get("content")]],
                 "metadatas": [[{"file_id": file.get("id"), "name": file.get("name")}]],
             }
-        else:
-            context = None
+        elif (
+            file.get("type") != "web_search"
+            and request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL
+        ):
+            # BYPASS_EMBEDDING_AND_RETRIEVAL
+            if file.get("type") == "collection":
+                file_ids = file.get("data", {}).get("file_ids", [])
+
+                documents = []
+                metadatas = []
+                for file_id in file_ids:
+                    file_object = Files.get_file_by_id(file_id)
+
+                    if file_object:
+                        documents.append(file_object.data.get("content", ""))
+                        metadatas.append(
+                            {
+                                "file_id": file_id,
+                                "name": file_object.filename,
+                                "source": file_object.filename,
+                            }
+                        )
+
+                context = {
+                    "documents": [documents],
+                    "metadatas": [metadatas],
+                }
 
+            elif file.get("id"):
+                file_object = Files.get_file_by_id(file.get("id"))
+                if file_object:
+                    context = {
+                        "documents": [[file_object.data.get("content", "")]],
+                        "metadatas": [
+                            [
+                                {
+                                    "file_id": file.get("id"),
+                                    "name": file_object.filename,
+                                    "source": file_object.filename,
+                                }
+                            ]
+                        ],
+                    }
+        else:
             collection_names = []
             if file.get("type") == "collection":
                 if file.get("legacy"):
@@ -434,6 +477,7 @@ def get_sources_from_files(
         if context:
             if "data" in file:
                 del file["data"]
+
             relevant_contexts.append({**context, "file": file})
 
     sources = []
@@ -530,7 +574,7 @@ def generate_openai_batch_embeddings(
         else:
             raise "Something went wrong :/"
     except Exception as e:
-        print(e)
+        log.exception(f"Error generating openai batch embeddings: {e}")
         return None
 
 
@@ -564,7 +608,7 @@ def generate_ollama_batch_embeddings(
         else:
             raise "Something went wrong :/"
     except Exception as e:
-        print(e)
+        log.exception(f"Error generating ollama batch embeddings: {e}")
         return None
 
 

+ 6 - 2
backend/open_webui/retrieval/vector/dbs/chroma.py

@@ -1,4 +1,5 @@
 import chromadb
+import logging
 from chromadb import Settings
 from chromadb.utils.batch_utils import create_batches
 
@@ -16,6 +17,10 @@ from open_webui.config import (
     CHROMA_CLIENT_AUTH_PROVIDER,
     CHROMA_CLIENT_AUTH_CREDENTIALS,
 )
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
 
 
 class ChromaClient:
@@ -102,8 +107,7 @@ class ChromaClient:
                     }
                 )
             return None
-        except Exception as e:
-            print(e)
+        except:
             return None
 
     def get(self, collection_name: str) -> Optional[GetResult]:

+ 10 - 4
backend/open_webui/retrieval/vector/dbs/milvus.py

@@ -1,7 +1,7 @@
 from pymilvus import MilvusClient as Client
 from pymilvus import FieldSchema, DataType
 import json
-
+import logging
 from typing import Optional
 
 from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult
@@ -10,6 +10,10 @@ from open_webui.config import (
     MILVUS_DB,
     MILVUS_TOKEN,
 )
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
 
 
 class MilvusClient:
@@ -168,7 +172,7 @@ class MilvusClient:
         try:
             # Loop until there are no more items to fetch or the desired limit is reached
             while remaining > 0:
-                print("remaining", remaining)
+                log.info(f"remaining: {remaining}")
                 current_fetch = min(
                     max_limit, remaining
                 )  # Determine how many items to fetch in this iteration
@@ -195,10 +199,12 @@ class MilvusClient:
                 if results_count < current_fetch:
                     break
 
-            print(all_results)
+            log.debug(all_results)
             return self._result_to_get_result([all_results])
         except Exception as e:
-            print(e)
+            log.exception(
+                f"Error querying collection {collection_name} with limit {limit}: {e}"
+            )
             return None
 
     def get(self, collection_name: str) -> Optional[GetResult]:

+ 23 - 15
backend/open_webui/retrieval/vector/dbs/pgvector.py

@@ -1,4 +1,5 @@
 from typing import Optional, List, Dict, Any
+import logging
 from sqlalchemy import (
     cast,
     column,
@@ -24,9 +25,14 @@ from sqlalchemy.exc import NoSuchTableError
 from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult
 from open_webui.config import PGVECTOR_DB_URL, PGVECTOR_INITIALIZE_MAX_VECTOR_LENGTH
 
+from open_webui.env import SRC_LOG_LEVELS
+
 VECTOR_LENGTH = PGVECTOR_INITIALIZE_MAX_VECTOR_LENGTH
 Base = declarative_base()
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
 
 class DocumentChunk(Base):
     __tablename__ = "document_chunk"
@@ -82,10 +88,10 @@ class PgvectorClient:
                 )
             )
             self.session.commit()
-            print("Initialization complete.")
+            log.info("Initialization complete.")
         except Exception as e:
             self.session.rollback()
-            print(f"Error during initialization: {e}")
+            log.exception(f"Error during initialization: {e}")
             raise
 
     def check_vector_length(self) -> None:
@@ -150,12 +156,12 @@ class PgvectorClient:
                 new_items.append(new_chunk)
             self.session.bulk_save_objects(new_items)
             self.session.commit()
-            print(
+            log.info(
                 f"Inserted {len(new_items)} items into collection '{collection_name}'."
             )
         except Exception as e:
             self.session.rollback()
-            print(f"Error during insert: {e}")
+            log.exception(f"Error during insert: {e}")
             raise
 
     def upsert(self, collection_name: str, items: List[VectorItem]) -> None:
@@ -184,10 +190,12 @@ class PgvectorClient:
                     )
                     self.session.add(new_chunk)
             self.session.commit()
-            print(f"Upserted {len(items)} items into collection '{collection_name}'.")
+            log.info(
+                f"Upserted {len(items)} items into collection '{collection_name}'."
+            )
         except Exception as e:
             self.session.rollback()
-            print(f"Error during upsert: {e}")
+            log.exception(f"Error during upsert: {e}")
             raise
 
     def search(
@@ -278,7 +286,7 @@ class PgvectorClient:
                 ids=ids, distances=distances, documents=documents, metadatas=metadatas
             )
         except Exception as e:
-            print(f"Error during search: {e}")
+            log.exception(f"Error during search: {e}")
             return None
 
     def query(
@@ -310,7 +318,7 @@ class PgvectorClient:
                 metadatas=metadatas,
             )
         except Exception as e:
-            print(f"Error during query: {e}")
+            log.exception(f"Error during query: {e}")
             return None
 
     def get(
@@ -334,7 +342,7 @@ class PgvectorClient:
 
             return GetResult(ids=ids, documents=documents, metadatas=metadatas)
         except Exception as e:
-            print(f"Error during get: {e}")
+            log.exception(f"Error during get: {e}")
             return None
 
     def delete(
@@ -356,22 +364,22 @@ class PgvectorClient:
                     )
             deleted = query.delete(synchronize_session=False)
             self.session.commit()
-            print(f"Deleted {deleted} items from collection '{collection_name}'.")
+            log.info(f"Deleted {deleted} items from collection '{collection_name}'.")
         except Exception as e:
             self.session.rollback()
-            print(f"Error during delete: {e}")
+            log.exception(f"Error during delete: {e}")
             raise
 
     def reset(self) -> None:
         try:
             deleted = self.session.query(DocumentChunk).delete()
             self.session.commit()
-            print(
+            log.info(
                 f"Reset complete. Deleted {deleted} items from 'document_chunk' table."
             )
         except Exception as e:
             self.session.rollback()
-            print(f"Error during reset: {e}")
+            log.exception(f"Error during reset: {e}")
             raise
 
     def close(self) -> None:
@@ -387,9 +395,9 @@ class PgvectorClient:
             )
             return exists
         except Exception as e:
-            print(f"Error checking collection existence: {e}")
+            log.exception(f"Error checking collection existence: {e}")
             return False
 
     def delete_collection(self, collection_name: str) -> None:
         self.delete(collection_name)
-        print(f"Collection '{collection_name}' deleted.")
+        log.info(f"Collection '{collection_name}' deleted.")

+ 7 - 2
backend/open_webui/retrieval/vector/dbs/qdrant.py

@@ -1,4 +1,5 @@
 from typing import Optional
+import logging
 
 from qdrant_client import QdrantClient as Qclient
 from qdrant_client.http.models import PointStruct
@@ -6,9 +7,13 @@ from qdrant_client.models import models
 
 from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult
 from open_webui.config import QDRANT_URI, QDRANT_API_KEY
+from open_webui.env import SRC_LOG_LEVELS
 
 NO_LIMIT = 999999999
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
 
 class QdrantClient:
     def __init__(self):
@@ -49,7 +54,7 @@ class QdrantClient:
             ),
         )
 
-        print(f"collection {collection_name_with_prefix} successfully created!")
+        log.info(f"collection {collection_name_with_prefix} successfully created!")
 
     def _create_collection_if_not_exists(self, collection_name, dimension):
         if not self.has_collection(collection_name=collection_name):
@@ -120,7 +125,7 @@ class QdrantClient:
             )
             return self._result_to_get_result(points.points)
         except Exception as e:
-            print(e)
+            log.exception(f"Error querying a collection '{collection_name}': {e}")
             return None
 
     def get(self, collection_name: str) -> Optional[GetResult]:

+ 50 - 13
backend/open_webui/routers/audio.py

@@ -71,7 +71,7 @@ from pydub.utils import mediainfo
 def is_mp4_audio(file_path):
     """Check if the given file is an MP4 audio file."""
     if not os.path.isfile(file_path):
-        print(f"File not found: {file_path}")
+        log.error(f"File not found: {file_path}")
         return False
 
     info = mediainfo(file_path)
@@ -88,7 +88,7 @@ def convert_mp4_to_wav(file_path, output_path):
     """Convert MP4 audio file to WAV format."""
     audio = AudioSegment.from_file(file_path, format="mp4")
     audio.export(output_path, format="wav")
-    print(f"Converted {file_path} to {output_path}")
+    log.info(f"Converted {file_path} to {output_path}")
 
 
 def set_faster_whisper_model(model: str, auto_update: bool = False):
@@ -266,7 +266,6 @@ async def speech(request: Request, user=Depends(get_verified_user)):
         payload["model"] = request.app.state.config.TTS_MODEL
 
         try:
-            # print(payload)
             timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT)
             async with aiohttp.ClientSession(
                 timeout=timeout, trust_env=True
@@ -468,7 +467,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
 
 
 def transcribe(request: Request, file_path):
-    print("transcribe", file_path)
+    log.info(f"transcribe: {file_path}")
     filename = os.path.basename(file_path)
     file_dir = os.path.dirname(file_path)
     id = filename.split(".")[0]
@@ -680,7 +679,22 @@ def transcription(
 def get_available_models(request: Request) -> list[dict]:
     available_models = []
     if request.app.state.config.TTS_ENGINE == "openai":
-        available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}]
+        # Use custom endpoint if not using the official OpenAI API URL
+        if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith(
+            "https://api.openai.com"
+        ):
+            try:
+                response = requests.get(
+                    f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/models"
+                )
+                response.raise_for_status()
+                data = response.json()
+                available_models = data.get("models", [])
+            except Exception as e:
+                log.error(f"Error fetching models from custom endpoint: {str(e)}")
+                available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}]
+        else:
+            available_models = [{"id": "tts-1"}, {"id": "tts-1-hd"}]
     elif request.app.state.config.TTS_ENGINE == "elevenlabs":
         try:
             response = requests.get(
@@ -711,14 +725,37 @@ def get_available_voices(request) -> dict:
     """Returns {voice_id: voice_name} dict"""
     available_voices = {}
     if request.app.state.config.TTS_ENGINE == "openai":
-        available_voices = {
-            "alloy": "alloy",
-            "echo": "echo",
-            "fable": "fable",
-            "onyx": "onyx",
-            "nova": "nova",
-            "shimmer": "shimmer",
-        }
+        # Use custom endpoint if not using the official OpenAI API URL
+        if not request.app.state.config.TTS_OPENAI_API_BASE_URL.startswith(
+            "https://api.openai.com"
+        ):
+            try:
+                response = requests.get(
+                    f"{request.app.state.config.TTS_OPENAI_API_BASE_URL}/audio/voices"
+                )
+                response.raise_for_status()
+                data = response.json()
+                voices_list = data.get("voices", [])
+                available_voices = {voice["id"]: voice["name"] for voice in voices_list}
+            except Exception as e:
+                log.error(f"Error fetching voices from custom endpoint: {str(e)}")
+                available_voices = {
+                    "alloy": "alloy",
+                    "echo": "echo",
+                    "fable": "fable",
+                    "onyx": "onyx",
+                    "nova": "nova",
+                    "shimmer": "shimmer",
+                }
+        else:
+            available_voices = {
+                "alloy": "alloy",
+                "echo": "echo",
+                "fable": "fable",
+                "onyx": "onyx",
+                "nova": "nova",
+                "shimmer": "shimmer",
+            }
     elif request.app.state.config.TTS_ENGINE == "elevenlabs":
         try:
             available_voices = get_elevenlabs_voices(

+ 6 - 20
backend/open_webui/routers/auths.py

@@ -31,10 +31,7 @@ from open_webui.env import (
 )
 from fastapi import APIRouter, Depends, HTTPException, Request, status
 from fastapi.responses import RedirectResponse, Response
-from open_webui.config import (
-    OPENID_PROVIDER_URL,
-    ENABLE_OAUTH_SIGNUP,
-)
+from open_webui.config import OPENID_PROVIDER_URL, ENABLE_OAUTH_SIGNUP, ENABLE_LDAP
 from pydantic import BaseModel
 from open_webui.utils.misc import parse_duration, validate_email_format
 from open_webui.utils.auth import (
@@ -51,8 +48,10 @@ from open_webui.utils.access_control import get_permissions
 from typing import Optional, List
 
 from ssl import CERT_REQUIRED, PROTOCOL_TLS
-from ldap3 import Server, Connection, NONE, Tls
-from ldap3.utils.conv import escape_filter_chars
+
+if ENABLE_LDAP.value:
+    from ldap3 import Server, Connection, NONE, Tls
+    from ldap3.utils.conv import escape_filter_chars
 
 router = APIRouter()
 
@@ -252,14 +251,6 @@ async def ldap_auth(request: Request, response: Response, form_data: LdapForm):
             if not user:
                 try:
                     user_count = Users.get_num_users()
-                    if (
-                        request.app.state.USER_COUNT
-                        and user_count >= request.app.state.USER_COUNT
-                    ):
-                        raise HTTPException(
-                            status.HTTP_403_FORBIDDEN,
-                            detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
-                        )
 
                     role = (
                         "admin"
@@ -439,11 +430,6 @@ async def signup(request: Request, response: Response, form_data: SignupForm):
             )
 
     user_count = Users.get_num_users()
-    if request.app.state.USER_COUNT and user_count >= request.app.state.USER_COUNT:
-        raise HTTPException(
-            status.HTTP_403_FORBIDDEN, detail=ERROR_MESSAGES.ACCESS_PROHIBITED
-        )
-
     if not validate_email_format(form_data.email.lower()):
         raise HTTPException(
             status.HTTP_400_BAD_REQUEST, detail=ERROR_MESSAGES.INVALID_EMAIL_FORMAT
@@ -613,7 +599,7 @@ async def get_admin_details(request: Request, user=Depends(get_current_user)):
         admin_email = request.app.state.config.ADMIN_EMAIL
         admin_name = None
 
-        print(admin_email, admin_name)
+        log.info(f"Admin details - Email: {admin_email}, Name: {admin_name}")
 
         if admin_email:
             admin = Users.get_user_by_email(admin_email)

+ 18 - 2
backend/open_webui/routers/files.py

@@ -16,6 +16,7 @@ from open_webui.models.files import (
     Files,
 )
 from open_webui.routers.retrieval import ProcessFileForm, process_file
+from open_webui.routers.audio import transcribe
 from open_webui.storage.provider import Storage
 from open_webui.utils.auth import get_admin_user, get_verified_user
 from pydantic import BaseModel
@@ -67,7 +68,22 @@ def upload_file(
         )
 
         try:
-            process_file(request, ProcessFileForm(file_id=id), user=user)
+            if file.content_type in [
+                "audio/mpeg",
+                "audio/wav",
+                "audio/ogg",
+                "audio/x-m4a",
+            ]:
+                file_path = Storage.get_file(file_path)
+                result = transcribe(request, file_path)
+                process_file(
+                    request,
+                    ProcessFileForm(file_id=id, content=result.get("text", "")),
+                    user=user,
+                )
+            else:
+                process_file(request, ProcessFileForm(file_id=id), user=user)
+
             file_item = Files.get_file_by_id(id=id)
         except Exception as e:
             log.exception(e)
@@ -273,7 +289,7 @@ async def get_html_file_content_by_id(id: str, user=Depends(get_verified_user)):
 
             # Check if the file already exists in the cache
             if file_path.is_file():
-                print(f"file_path: {file_path}")
+                log.info(f"file_path: {file_path}")
                 return FileResponse(file_path)
             else:
                 raise HTTPException(

+ 10 - 4
backend/open_webui/routers/functions.py

@@ -1,4 +1,5 @@
 import os
+import logging
 from pathlib import Path
 from typing import Optional
 
@@ -13,6 +14,11 @@ from open_webui.config import CACHE_DIR
 from open_webui.constants import ERROR_MESSAGES
 from fastapi import APIRouter, Depends, HTTPException, Request, status
 from open_webui.utils.auth import get_admin_user, get_verified_user
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
+
 
 router = APIRouter()
 
@@ -79,7 +85,7 @@ async def create_new_function(
                     detail=ERROR_MESSAGES.DEFAULT("Error creating function"),
                 )
         except Exception as e:
-            print(e)
+            log.exception(f"Failed to create a new function: {e}")
             raise HTTPException(
                 status_code=status.HTTP_400_BAD_REQUEST,
                 detail=ERROR_MESSAGES.DEFAULT(e),
@@ -183,7 +189,7 @@ async def update_function_by_id(
         FUNCTIONS[id] = function_module
 
         updated = {**form_data.model_dump(exclude={"id"}), "type": function_type}
-        print(updated)
+        log.debug(updated)
 
         function = Functions.update_function_by_id(id, updated)
 
@@ -299,7 +305,7 @@ async def update_function_valves_by_id(
                 Functions.update_function_valves_by_id(id, valves.model_dump())
                 return valves.model_dump()
             except Exception as e:
-                print(e)
+                log.exception(f"Error updating function values by id {id}: {e}")
                 raise HTTPException(
                     status_code=status.HTTP_400_BAD_REQUEST,
                     detail=ERROR_MESSAGES.DEFAULT(e),
@@ -388,7 +394,7 @@ async def update_function_user_valves_by_id(
                 )
                 return user_valves.model_dump()
             except Exception as e:
-                print(e)
+                log.exception(f"Error updating function user valves by id {id}: {e}")
                 raise HTTPException(
                     status_code=status.HTTP_400_BAD_REQUEST,
                     detail=ERROR_MESSAGES.DEFAULT(e),

+ 11 - 5
backend/open_webui/routers/groups.py

@@ -1,7 +1,7 @@
 import os
 from pathlib import Path
 from typing import Optional
-
+import logging
 
 from open_webui.models.users import Users
 from open_webui.models.groups import (
@@ -14,7 +14,13 @@ from open_webui.models.groups import (
 from open_webui.config import CACHE_DIR
 from open_webui.constants import ERROR_MESSAGES
 from fastapi import APIRouter, Depends, HTTPException, Request, status
+
 from open_webui.utils.auth import get_admin_user, get_verified_user
+from open_webui.env import SRC_LOG_LEVELS
+
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
 
 router = APIRouter()
 
@@ -37,7 +43,7 @@ async def get_groups(user=Depends(get_verified_user)):
 
 
 @router.post("/create", response_model=Optional[GroupResponse])
-async def create_new_function(form_data: GroupForm, user=Depends(get_admin_user)):
+async def create_new_group(form_data: GroupForm, user=Depends(get_admin_user)):
     try:
         group = Groups.insert_new_group(user.id, form_data)
         if group:
@@ -48,7 +54,7 @@ async def create_new_function(form_data: GroupForm, user=Depends(get_admin_user)
                 detail=ERROR_MESSAGES.DEFAULT("Error creating group"),
             )
     except Exception as e:
-        print(e)
+        log.exception(f"Error creating a new group: {e}")
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail=ERROR_MESSAGES.DEFAULT(e),
@@ -94,7 +100,7 @@ async def update_group_by_id(
                 detail=ERROR_MESSAGES.DEFAULT("Error updating group"),
             )
     except Exception as e:
-        print(e)
+        log.exception(f"Error updating group {id}: {e}")
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail=ERROR_MESSAGES.DEFAULT(e),
@@ -118,7 +124,7 @@ async def delete_group_by_id(id: str, user=Depends(get_admin_user)):
                 detail=ERROR_MESSAGES.DEFAULT("Error deleting group"),
             )
     except Exception as e:
-        print(e)
+        log.exception(f"Error deleting group {id}: {e}")
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail=ERROR_MESSAGES.DEFAULT(e),

+ 12 - 2
backend/open_webui/routers/images.py

@@ -144,6 +144,8 @@ async def update_config(
     request.app.state.config.COMFYUI_BASE_URL = (
         form_data.comfyui.COMFYUI_BASE_URL.strip("/")
     )
+    request.app.state.config.COMFYUI_API_KEY = form_data.comfyui.COMFYUI_API_KEY
+
     request.app.state.config.COMFYUI_WORKFLOW = form_data.comfyui.COMFYUI_WORKFLOW
     request.app.state.config.COMFYUI_WORKFLOW_NODES = (
         form_data.comfyui.COMFYUI_WORKFLOW_NODES
@@ -203,9 +205,17 @@ async def verify_url(request: Request, user=Depends(get_admin_user)):
             request.app.state.config.ENABLE_IMAGE_GENERATION = False
             raise HTTPException(status_code=400, detail=ERROR_MESSAGES.INVALID_URL)
     elif request.app.state.config.IMAGE_GENERATION_ENGINE == "comfyui":
+
+        headers = None
+        if request.app.state.config.COMFYUI_API_KEY:
+            headers = {
+                "Authorization": f"Bearer {request.app.state.config.COMFYUI_API_KEY}"
+            }
+
         try:
             r = requests.get(
-                url=f"{request.app.state.config.COMFYUI_BASE_URL}/object_info"
+                url=f"{request.app.state.config.COMFYUI_BASE_URL}/object_info",
+                headers=headers,
             )
             r.raise_for_status()
             return True
@@ -351,7 +361,7 @@ def get_models(request: Request, user=Depends(get_verified_user)):
             if model_node_id:
                 model_list_key = None
 
-                print(workflow[model_node_id]["class_type"])
+                log.info(workflow[model_node_id]["class_type"])
                 for key in info[workflow[model_node_id]["class_type"]]["input"][
                     "required"
                 ]:

+ 1 - 1
backend/open_webui/routers/knowledge.py

@@ -614,7 +614,7 @@ def add_files_to_knowledge_batch(
         )
 
     # Get files content
-    print(f"files/batch/add - {len(form_data)} files")
+    log.info(f"files/batch/add - {len(form_data)} files")
     files: List[FileModel] = []
     for form in form_data:
         file = Files.get_file_by_id(form.file_id)

+ 130 - 16
backend/open_webui/routers/ollama.py

@@ -14,6 +14,11 @@ from urllib.parse import urlparse
 import aiohttp
 from aiocache import cached
 import requests
+from open_webui.models.users import UserModel
+
+from open_webui.env import (
+    ENABLE_FORWARD_USER_INFO_HEADERS,
+)
 
 from fastapi import (
     Depends,
@@ -66,12 +71,26 @@ log.setLevel(SRC_LOG_LEVELS["OLLAMA"])
 ##########################################
 
 
-async def send_get_request(url, key=None):
+async def send_get_request(url, key=None, user: UserModel = None):
     timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST)
     try:
         async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
             async with session.get(
-                url, headers={**({"Authorization": f"Bearer {key}"} if key else {})}
+                url,
+                headers={
+                    "Content-Type": "application/json",
+                    **({"Authorization": f"Bearer {key}"} if key else {}),
+                    **(
+                        {
+                            "X-OpenWebUI-User-Name": user.name,
+                            "X-OpenWebUI-User-Id": user.id,
+                            "X-OpenWebUI-User-Email": user.email,
+                            "X-OpenWebUI-User-Role": user.role,
+                        }
+                        if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                        else {}
+                    ),
+                },
             ) as response:
                 return await response.json()
     except Exception as e:
@@ -96,6 +115,7 @@ async def send_post_request(
     stream: bool = True,
     key: Optional[str] = None,
     content_type: Optional[str] = None,
+    user: UserModel = None,
 ):
 
     r = None
@@ -110,6 +130,16 @@ async def send_post_request(
             headers={
                 "Content-Type": "application/json",
                 **({"Authorization": f"Bearer {key}"} if key else {}),
+                **(
+                    {
+                        "X-OpenWebUI-User-Name": user.name,
+                        "X-OpenWebUI-User-Id": user.id,
+                        "X-OpenWebUI-User-Email": user.email,
+                        "X-OpenWebUI-User-Role": user.role,
+                    }
+                    if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                    else {}
+                ),
             },
         )
         r.raise_for_status()
@@ -191,7 +221,19 @@ async def verify_connection(
         try:
             async with session.get(
                 f"{url}/api/version",
-                headers={**({"Authorization": f"Bearer {key}"} if key else {})},
+                headers={
+                    **({"Authorization": f"Bearer {key}"} if key else {}),
+                    **(
+                        {
+                            "X-OpenWebUI-User-Name": user.name,
+                            "X-OpenWebUI-User-Id": user.id,
+                            "X-OpenWebUI-User-Email": user.email,
+                            "X-OpenWebUI-User-Role": user.role,
+                        }
+                        if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                        else {}
+                    ),
+                },
             ) as r:
                 if r.status != 200:
                     detail = f"HTTP Error: {r.status}"
@@ -254,7 +296,7 @@ async def update_config(
 
 
 @cached(ttl=3)
-async def get_all_models(request: Request):
+async def get_all_models(request: Request, user: UserModel = None):
     log.info("get_all_models()")
     if request.app.state.config.ENABLE_OLLAMA_API:
         request_tasks = []
@@ -262,7 +304,7 @@ async def get_all_models(request: Request):
             if (str(idx) not in request.app.state.config.OLLAMA_API_CONFIGS) and (
                 url not in request.app.state.config.OLLAMA_API_CONFIGS  # Legacy support
             ):
-                request_tasks.append(send_get_request(f"{url}/api/tags"))
+                request_tasks.append(send_get_request(f"{url}/api/tags", user=user))
             else:
                 api_config = request.app.state.config.OLLAMA_API_CONFIGS.get(
                     str(idx),
@@ -275,7 +317,9 @@ async def get_all_models(request: Request):
                 key = api_config.get("key", None)
 
                 if enable:
-                    request_tasks.append(send_get_request(f"{url}/api/tags", key))
+                    request_tasks.append(
+                        send_get_request(f"{url}/api/tags", key, user=user)
+                    )
                 else:
                     request_tasks.append(asyncio.ensure_future(asyncio.sleep(0, None)))
 
@@ -360,7 +404,7 @@ async def get_ollama_tags(
     models = []
 
     if url_idx is None:
-        models = await get_all_models(request)
+        models = await get_all_models(request, user=user)
     else:
         url = request.app.state.config.OLLAMA_BASE_URLS[url_idx]
         key = get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS)
@@ -370,7 +414,19 @@ async def get_ollama_tags(
             r = requests.request(
                 method="GET",
                 url=f"{url}/api/tags",
-                headers={**({"Authorization": f"Bearer {key}"} if key else {})},
+                headers={
+                    **({"Authorization": f"Bearer {key}"} if key else {}),
+                    **(
+                        {
+                            "X-OpenWebUI-User-Name": user.name,
+                            "X-OpenWebUI-User-Id": user.id,
+                            "X-OpenWebUI-User-Email": user.email,
+                            "X-OpenWebUI-User-Role": user.role,
+                        }
+                        if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                        else {}
+                    ),
+                },
             )
             r.raise_for_status()
 
@@ -477,6 +533,7 @@ async def get_ollama_loaded_models(request: Request, user=Depends(get_verified_u
                         url, {}
                     ),  # Legacy support
                 ).get("key", None),
+                user=user,
             )
             for idx, url in enumerate(request.app.state.config.OLLAMA_BASE_URLS)
         ]
@@ -509,6 +566,7 @@ async def pull_model(
         url=f"{url}/api/pull",
         payload=json.dumps(payload),
         key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS),
+        user=user,
     )
 
 
@@ -527,7 +585,7 @@ async def push_model(
     user=Depends(get_admin_user),
 ):
     if url_idx is None:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
         models = request.app.state.OLLAMA_MODELS
 
         if form_data.name in models:
@@ -545,6 +603,7 @@ async def push_model(
         url=f"{url}/api/push",
         payload=form_data.model_dump_json(exclude_none=True).encode(),
         key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS),
+        user=user,
     )
 
 
@@ -571,6 +630,7 @@ async def create_model(
         url=f"{url}/api/create",
         payload=form_data.model_dump_json(exclude_none=True).encode(),
         key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS),
+        user=user,
     )
 
 
@@ -588,7 +648,7 @@ async def copy_model(
     user=Depends(get_admin_user),
 ):
     if url_idx is None:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
         models = request.app.state.OLLAMA_MODELS
 
         if form_data.source in models:
@@ -609,6 +669,16 @@ async def copy_model(
             headers={
                 "Content-Type": "application/json",
                 **({"Authorization": f"Bearer {key}"} if key else {}),
+                **(
+                    {
+                        "X-OpenWebUI-User-Name": user.name,
+                        "X-OpenWebUI-User-Id": user.id,
+                        "X-OpenWebUI-User-Email": user.email,
+                        "X-OpenWebUI-User-Role": user.role,
+                    }
+                    if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                    else {}
+                ),
             },
             data=form_data.model_dump_json(exclude_none=True).encode(),
         )
@@ -643,7 +713,7 @@ async def delete_model(
     user=Depends(get_admin_user),
 ):
     if url_idx is None:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
         models = request.app.state.OLLAMA_MODELS
 
         if form_data.name in models:
@@ -665,6 +735,16 @@ async def delete_model(
             headers={
                 "Content-Type": "application/json",
                 **({"Authorization": f"Bearer {key}"} if key else {}),
+                **(
+                    {
+                        "X-OpenWebUI-User-Name": user.name,
+                        "X-OpenWebUI-User-Id": user.id,
+                        "X-OpenWebUI-User-Email": user.email,
+                        "X-OpenWebUI-User-Role": user.role,
+                    }
+                    if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                    else {}
+                ),
             },
         )
         r.raise_for_status()
@@ -693,7 +773,7 @@ async def delete_model(
 async def show_model_info(
     request: Request, form_data: ModelNameForm, user=Depends(get_verified_user)
 ):
-    await get_all_models(request)
+    await get_all_models(request, user=user)
     models = request.app.state.OLLAMA_MODELS
 
     if form_data.name not in models:
@@ -714,6 +794,16 @@ async def show_model_info(
             headers={
                 "Content-Type": "application/json",
                 **({"Authorization": f"Bearer {key}"} if key else {}),
+                **(
+                    {
+                        "X-OpenWebUI-User-Name": user.name,
+                        "X-OpenWebUI-User-Id": user.id,
+                        "X-OpenWebUI-User-Email": user.email,
+                        "X-OpenWebUI-User-Role": user.role,
+                    }
+                    if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                    else {}
+                ),
             },
             data=form_data.model_dump_json(exclude_none=True).encode(),
         )
@@ -757,7 +847,7 @@ async def embed(
     log.info(f"generate_ollama_batch_embeddings {form_data}")
 
     if url_idx is None:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
         models = request.app.state.OLLAMA_MODELS
 
         model = form_data.model
@@ -783,6 +873,16 @@ async def embed(
             headers={
                 "Content-Type": "application/json",
                 **({"Authorization": f"Bearer {key}"} if key else {}),
+                **(
+                    {
+                        "X-OpenWebUI-User-Name": user.name,
+                        "X-OpenWebUI-User-Id": user.id,
+                        "X-OpenWebUI-User-Email": user.email,
+                        "X-OpenWebUI-User-Role": user.role,
+                    }
+                    if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                    else {}
+                ),
             },
             data=form_data.model_dump_json(exclude_none=True).encode(),
         )
@@ -826,7 +926,7 @@ async def embeddings(
     log.info(f"generate_ollama_embeddings {form_data}")
 
     if url_idx is None:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
         models = request.app.state.OLLAMA_MODELS
 
         model = form_data.model
@@ -852,6 +952,16 @@ async def embeddings(
             headers={
                 "Content-Type": "application/json",
                 **({"Authorization": f"Bearer {key}"} if key else {}),
+                **(
+                    {
+                        "X-OpenWebUI-User-Name": user.name,
+                        "X-OpenWebUI-User-Id": user.id,
+                        "X-OpenWebUI-User-Email": user.email,
+                        "X-OpenWebUI-User-Role": user.role,
+                    }
+                    if ENABLE_FORWARD_USER_INFO_HEADERS and user
+                    else {}
+                ),
             },
             data=form_data.model_dump_json(exclude_none=True).encode(),
         )
@@ -901,7 +1011,7 @@ async def generate_completion(
     user=Depends(get_verified_user),
 ):
     if url_idx is None:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
         models = request.app.state.OLLAMA_MODELS
 
         model = form_data.model
@@ -931,6 +1041,7 @@ async def generate_completion(
         url=f"{url}/api/generate",
         payload=form_data.model_dump_json(exclude_none=True).encode(),
         key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS),
+        user=user,
     )
 
 
@@ -1060,6 +1171,7 @@ async def generate_chat_completion(
         stream=form_data.stream,
         key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS),
         content_type="application/x-ndjson",
+        user=user,
     )
 
 
@@ -1162,6 +1274,7 @@ async def generate_openai_completion(
         payload=json.dumps(payload),
         stream=payload.get("stream", False),
         key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS),
+        user=user,
     )
 
 
@@ -1240,6 +1353,7 @@ async def generate_openai_chat_completion(
         payload=json.dumps(payload),
         stream=payload.get("stream", False),
         key=get_api_key(url_idx, url, request.app.state.config.OLLAMA_API_CONFIGS),
+        user=user,
     )
 
 
@@ -1253,7 +1367,7 @@ async def get_openai_models(
 
     models = []
     if url_idx is None:
-        model_list = await get_all_models(request)
+        model_list = await get_all_models(request, user=user)
         models = [
             {
                 "id": model["model"],

+ 44 - 11
backend/open_webui/routers/openai.py

@@ -26,6 +26,7 @@ from open_webui.env import (
     ENABLE_FORWARD_USER_INFO_HEADERS,
     BYPASS_MODEL_ACCESS_CONTROL,
 )
+from open_webui.models.users import UserModel
 
 from open_webui.constants import ERROR_MESSAGES
 from open_webui.env import ENV, SRC_LOG_LEVELS
@@ -51,12 +52,25 @@ log.setLevel(SRC_LOG_LEVELS["OPENAI"])
 ##########################################
 
 
-async def send_get_request(url, key=None):
+async def send_get_request(url, key=None, user: UserModel = None):
     timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST)
     try:
         async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
             async with session.get(
-                url, headers={**({"Authorization": f"Bearer {key}"} if key else {})}
+                url,
+                headers={
+                    **({"Authorization": f"Bearer {key}"} if key else {}),
+                    **(
+                        {
+                            "X-OpenWebUI-User-Name": user.name,
+                            "X-OpenWebUI-User-Id": user.id,
+                            "X-OpenWebUI-User-Email": user.email,
+                            "X-OpenWebUI-User-Role": user.role,
+                        }
+                        if ENABLE_FORWARD_USER_INFO_HEADERS
+                        else {}
+                    ),
+                },
             ) as response:
                 return await response.json()
     except Exception as e:
@@ -84,9 +98,15 @@ def openai_o1_o3_handler(payload):
         payload["max_completion_tokens"] = payload["max_tokens"]
         del payload["max_tokens"]
 
-    # Fix: O1 does not support the "system" parameter, Modify "system" to "user"
+    # Fix: o1 and o3 do not support the "system" role directly.
+    # For older models like "o1-mini" or "o1-preview", use role "user".
+    # For newer o1/o3 models, replace "system" with "developer".
     if payload["messages"][0]["role"] == "system":
-        payload["messages"][0]["role"] = "user"
+        model_lower = payload["model"].lower()
+        if model_lower.startswith("o1-mini") or model_lower.startswith("o1-preview"):
+            payload["messages"][0]["role"] = "user"
+        else:
+            payload["messages"][0]["role"] = "developer"
 
     return payload
 
@@ -247,7 +267,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
         raise HTTPException(status_code=401, detail=ERROR_MESSAGES.OPENAI_NOT_FOUND)
 
 
-async def get_all_models_responses(request: Request) -> list:
+async def get_all_models_responses(request: Request, user: UserModel) -> list:
     if not request.app.state.config.ENABLE_OPENAI_API:
         return []
 
@@ -271,7 +291,9 @@ async def get_all_models_responses(request: Request) -> list:
         ):
             request_tasks.append(
                 send_get_request(
-                    f"{url}/models", request.app.state.config.OPENAI_API_KEYS[idx]
+                    f"{url}/models",
+                    request.app.state.config.OPENAI_API_KEYS[idx],
+                    user=user,
                 )
             )
         else:
@@ -291,6 +313,7 @@ async def get_all_models_responses(request: Request) -> list:
                         send_get_request(
                             f"{url}/models",
                             request.app.state.config.OPENAI_API_KEYS[idx],
+                            user=user,
                         )
                     )
                 else:
@@ -352,13 +375,13 @@ async def get_filtered_models(models, user):
 
 
 @cached(ttl=3)
-async def get_all_models(request: Request) -> dict[str, list]:
+async def get_all_models(request: Request, user: UserModel) -> dict[str, list]:
     log.info("get_all_models()")
 
     if not request.app.state.config.ENABLE_OPENAI_API:
         return {"data": []}
 
-    responses = await get_all_models_responses(request)
+    responses = await get_all_models_responses(request, user=user)
 
     def extract_data(response):
         if response and "data" in response:
@@ -418,7 +441,7 @@ async def get_models(
     }
 
     if url_idx is None:
-        models = await get_all_models(request)
+        models = await get_all_models(request, user=user)
     else:
         url = request.app.state.config.OPENAI_API_BASE_URLS[url_idx]
         key = request.app.state.config.OPENAI_API_KEYS[url_idx]
@@ -515,6 +538,16 @@ async def verify_connection(
                 headers={
                     "Authorization": f"Bearer {key}",
                     "Content-Type": "application/json",
+                    **(
+                        {
+                            "X-OpenWebUI-User-Name": user.name,
+                            "X-OpenWebUI-User-Id": user.id,
+                            "X-OpenWebUI-User-Email": user.email,
+                            "X-OpenWebUI-User-Role": user.role,
+                        }
+                        if ENABLE_FORWARD_USER_INFO_HEADERS
+                        else {}
+                    ),
                 },
             ) as r:
                 if r.status != 200:
@@ -587,7 +620,7 @@ async def generate_chat_completion(
                 detail="Model not found",
             )
 
-    await get_all_models(request)
+    await get_all_models(request, user=user)
     model = request.app.state.OPENAI_MODELS.get(model_id)
     if model:
         idx = model["urlIdx"]
@@ -777,7 +810,7 @@ async def proxy(path: str, request: Request, user=Depends(get_verified_user)):
         if r is not None:
             try:
                 res = await r.json()
-                print(res)
+                log.error(res)
                 if "error" in res:
                     detail = f"External: {res['error']['message'] if 'message' in res['error'] else res['error']}"
             except Exception:

+ 11 - 11
backend/open_webui/routers/pipelines.py

@@ -101,7 +101,7 @@ async def process_pipeline_inlet_filter(request, payload, user, models):
                 if "detail" in res:
                     raise Exception(response.status, res["detail"])
             except Exception as e:
-                print(f"Connection error: {e}")
+                log.exception(f"Connection error: {e}")
 
     return payload
 
@@ -153,7 +153,7 @@ async def process_pipeline_outlet_filter(request, payload, user, models):
                 except Exception:
                     pass
             except Exception as e:
-                print(f"Connection error: {e}")
+                log.exception(f"Connection error: {e}")
 
     return payload
 
@@ -169,7 +169,7 @@ router = APIRouter()
 
 @router.get("/list")
 async def get_pipelines_list(request: Request, user=Depends(get_admin_user)):
-    responses = await get_all_models_responses(request)
+    responses = await get_all_models_responses(request, user)
     log.debug(f"get_pipelines_list: get_openai_models_responses returned {responses}")
 
     urlIdxs = [
@@ -196,7 +196,7 @@ async def upload_pipeline(
     file: UploadFile = File(...),
     user=Depends(get_admin_user),
 ):
-    print("upload_pipeline", urlIdx, file.filename)
+    log.info(f"upload_pipeline: urlIdx={urlIdx}, filename={file.filename}")
     # Check if the uploaded file is a python file
     if not (file.filename and file.filename.endswith(".py")):
         raise HTTPException(
@@ -231,7 +231,7 @@ async def upload_pipeline(
         return {**data}
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.exception(f"Connection error: {e}")
 
         detail = None
         status_code = status.HTTP_404_NOT_FOUND
@@ -282,7 +282,7 @@ async def add_pipeline(
         return {**data}
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.exception(f"Connection error: {e}")
 
         detail = None
         if r is not None:
@@ -327,7 +327,7 @@ async def delete_pipeline(
         return {**data}
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.exception(f"Connection error: {e}")
 
         detail = None
         if r is not None:
@@ -361,7 +361,7 @@ async def get_pipelines(
         return {**data}
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.exception(f"Connection error: {e}")
 
         detail = None
         if r is not None:
@@ -400,7 +400,7 @@ async def get_pipeline_valves(
         return {**data}
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.exception(f"Connection error: {e}")
 
         detail = None
         if r is not None:
@@ -440,7 +440,7 @@ async def get_pipeline_valves_spec(
         return {**data}
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.exception(f"Connection error: {e}")
 
         detail = None
         if r is not None:
@@ -482,7 +482,7 @@ async def update_pipeline_valves(
         return {**data}
     except Exception as e:
         # Handle connection error here
-        print(f"Connection error: {e}")
+        log.exception(f"Connection error: {e}")
 
         detail = None
 

+ 98 - 39
backend/open_webui/routers/retrieval.py

@@ -352,10 +352,16 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)):
         "status": True,
         "pdf_extract_images": request.app.state.config.PDF_EXTRACT_IMAGES,
         "RAG_FULL_CONTEXT": request.app.state.config.RAG_FULL_CONTEXT,
+        "BYPASS_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL,
         "enable_google_drive_integration": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION,
+        "enable_onedrive_integration": request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION,
         "content_extraction": {
             "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE,
             "tika_server_url": request.app.state.config.TIKA_SERVER_URL,
+            "document_intelligence_config": {
+                "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
+                "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
+            },
         },
         "chunk": {
             "text_splitter": request.app.state.config.TEXT_SPLITTER,
@@ -373,10 +379,11 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)):
         },
         "web": {
             "ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION": request.app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION,
-            "RAG_WEB_SEARCH_FULL_CONTEXT": request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT,
+            "BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL,
             "search": {
                 "enabled": request.app.state.config.ENABLE_RAG_WEB_SEARCH,
                 "drive": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION,
+                "onedrive": request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION,
                 "engine": request.app.state.config.RAG_WEB_SEARCH_ENGINE,
                 "searxng_query_url": request.app.state.config.SEARXNG_QUERY_URL,
                 "google_pse_api_key": request.app.state.config.GOOGLE_PSE_API_KEY,
@@ -399,6 +406,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)):
                 "bing_search_v7_subscription_key": request.app.state.config.BING_SEARCH_V7_SUBSCRIPTION_KEY,
                 "exa_api_key": request.app.state.config.EXA_API_KEY,
                 "result_count": request.app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
+                "trust_env": request.app.state.config.RAG_WEB_SEARCH_TRUST_ENV,
                 "concurrent_requests": request.app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
                 "domain_filter_list": request.app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST,
             },
@@ -411,9 +419,15 @@ class FileConfig(BaseModel):
     max_count: Optional[int] = None
 
 
+class DocumentIntelligenceConfigForm(BaseModel):
+    endpoint: str
+    key: str
+
+
 class ContentExtractionConfig(BaseModel):
     engine: str = ""
     tika_server_url: Optional[str] = None
+    document_intelligence_config: Optional[DocumentIntelligenceConfigForm] = None
 
 
 class ChunkParamUpdateForm(BaseModel):
@@ -460,13 +474,15 @@ class WebSearchConfig(BaseModel):
 class WebConfig(BaseModel):
     search: WebSearchConfig
     ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION: Optional[bool] = None
-    RAG_WEB_SEARCH_FULL_CONTEXT: Optional[bool] = None
+    BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL: Optional[bool] = None
 
 
 class ConfigUpdateForm(BaseModel):
     RAG_FULL_CONTEXT: Optional[bool] = None
+    BYPASS_EMBEDDING_AND_RETRIEVAL: Optional[bool] = None
     pdf_extract_images: Optional[bool] = None
     enable_google_drive_integration: Optional[bool] = None
+    enable_onedrive_integration: Optional[bool] = None
     file: Optional[FileConfig] = None
     content_extraction: Optional[ContentExtractionConfig] = None
     chunk: Optional[ChunkParamUpdateForm] = None
@@ -490,24 +506,45 @@ async def update_rag_config(
         else request.app.state.config.RAG_FULL_CONTEXT
     )
 
+    request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL = (
+        form_data.BYPASS_EMBEDDING_AND_RETRIEVAL
+        if form_data.BYPASS_EMBEDDING_AND_RETRIEVAL is not None
+        else request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL
+    )
+
     request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION = (
         form_data.enable_google_drive_integration
         if form_data.enable_google_drive_integration is not None
         else request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION
     )
 
+    request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION = (
+        form_data.enable_onedrive_integration
+        if form_data.enable_onedrive_integration is not None
+        else request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION
+    )
+
     if form_data.file is not None:
         request.app.state.config.FILE_MAX_SIZE = form_data.file.max_size
         request.app.state.config.FILE_MAX_COUNT = form_data.file.max_count
 
     if form_data.content_extraction is not None:
-        log.info(f"Updating text settings: {form_data.content_extraction}")
+        log.info(
+            f"Updating content extraction: {request.app.state.config.CONTENT_EXTRACTION_ENGINE} to {form_data.content_extraction.engine}"
+        )
         request.app.state.config.CONTENT_EXTRACTION_ENGINE = (
             form_data.content_extraction.engine
         )
         request.app.state.config.TIKA_SERVER_URL = (
             form_data.content_extraction.tika_server_url
         )
+        if form_data.content_extraction.document_intelligence_config is not None:
+            request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT = (
+                form_data.content_extraction.document_intelligence_config.endpoint
+            )
+            request.app.state.config.DOCUMENT_INTELLIGENCE_KEY = (
+                form_data.content_extraction.document_intelligence_config.key
+            )
 
     if form_data.chunk is not None:
         request.app.state.config.TEXT_SPLITTER = form_data.chunk.text_splitter
@@ -528,8 +565,8 @@ async def update_rag_config(
         request.app.state.config.ENABLE_RAG_WEB_SEARCH = form_data.web.search.enabled
         request.app.state.config.RAG_WEB_SEARCH_ENGINE = form_data.web.search.engine
 
-        request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT = (
-            form_data.web.RAG_WEB_SEARCH_FULL_CONTEXT
+        request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL = (
+            form_data.web.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
         )
 
         request.app.state.config.SEARXNG_QUERY_URL = (
@@ -597,6 +634,7 @@ async def update_rag_config(
         "status": True,
         "pdf_extract_images": request.app.state.config.PDF_EXTRACT_IMAGES,
         "RAG_FULL_CONTEXT": request.app.state.config.RAG_FULL_CONTEXT,
+        "BYPASS_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL,
         "file": {
             "max_size": request.app.state.config.FILE_MAX_SIZE,
             "max_count": request.app.state.config.FILE_MAX_COUNT,
@@ -604,6 +642,10 @@ async def update_rag_config(
         "content_extraction": {
             "engine": request.app.state.config.CONTENT_EXTRACTION_ENGINE,
             "tika_server_url": request.app.state.config.TIKA_SERVER_URL,
+            "document_intelligence_config": {
+                "endpoint": request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
+                "key": request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
+            },
         },
         "chunk": {
             "text_splitter": request.app.state.config.TEXT_SPLITTER,
@@ -617,7 +659,7 @@ async def update_rag_config(
         },
         "web": {
             "ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION": request.app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION,
-            "RAG_WEB_SEARCH_FULL_CONTEXT": request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT,
+            "BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL,
             "search": {
                 "enabled": request.app.state.config.ENABLE_RAG_WEB_SEARCH,
                 "engine": request.app.state.config.RAG_WEB_SEARCH_ENGINE,
@@ -880,7 +922,12 @@ def process_file(
             # Update the content in the file
             # Usage: /files/{file_id}/data/content/update
 
-            VECTOR_DB_CLIENT.delete_collection(collection_name=f"file-{file.id}")
+            try:
+                # /files/{file_id}/data/content/update
+                VECTOR_DB_CLIENT.delete_collection(collection_name=f"file-{file.id}")
+            except:
+                # Audio file upload pipeline
+                pass
 
             docs = [
                 Document(
@@ -937,6 +984,8 @@ def process_file(
                     engine=request.app.state.config.CONTENT_EXTRACTION_ENGINE,
                     TIKA_SERVER_URL=request.app.state.config.TIKA_SERVER_URL,
                     PDF_EXTRACT_IMAGES=request.app.state.config.PDF_EXTRACT_IMAGES,
+                    DOCUMENT_INTELLIGENCE_ENDPOINT=request.app.state.config.DOCUMENT_INTELLIGENCE_ENDPOINT,
+                    DOCUMENT_INTELLIGENCE_KEY=request.app.state.config.DOCUMENT_INTELLIGENCE_KEY,
                 )
                 docs = loader.load(
                     file.filename, file.meta.get("content_type"), file_path
@@ -979,36 +1028,45 @@ def process_file(
         hash = calculate_sha256_string(text_content)
         Files.update_file_hash_by_id(file.id, hash)
 
-        try:
-            result = save_docs_to_vector_db(
-                request,
-                docs=docs,
-                collection_name=collection_name,
-                metadata={
-                    "file_id": file.id,
-                    "name": file.filename,
-                    "hash": hash,
-                },
-                add=(True if form_data.collection_name else False),
-                user=user,
-            )
-
-            if result:
-                Files.update_file_metadata_by_id(
-                    file.id,
-                    {
-                        "collection_name": collection_name,
+        if not request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL:
+            try:
+                result = save_docs_to_vector_db(
+                    request,
+                    docs=docs,
+                    collection_name=collection_name,
+                    metadata={
+                        "file_id": file.id,
+                        "name": file.filename,
+                        "hash": hash,
                     },
+                    add=(True if form_data.collection_name else False),
+                    user=user,
                 )
 
-                return {
-                    "status": True,
-                    "collection_name": collection_name,
-                    "filename": file.filename,
-                    "content": text_content,
-                }
-        except Exception as e:
-            raise e
+                if result:
+                    Files.update_file_metadata_by_id(
+                        file.id,
+                        {
+                            "collection_name": collection_name,
+                        },
+                    )
+
+                    return {
+                        "status": True,
+                        "collection_name": collection_name,
+                        "filename": file.filename,
+                        "content": text_content,
+                    }
+            except Exception as e:
+                raise e
+        else:
+            return {
+                "status": True,
+                "collection_name": None,
+                "filename": file.filename,
+                "content": text_content,
+            }
+
     except Exception as e:
         log.exception(e)
         if "No pandoc was found" in str(e):
@@ -1368,9 +1426,11 @@ async def process_web_search(
         )
         docs = await loader.aload()
 
-        if request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT:
+        if request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL:
             return {
                 "status": True,
+                "collection_name": None,
+                "filenames": urls,
                 "docs": [
                     {
                         "content": doc.page_content,
@@ -1378,7 +1438,6 @@ async def process_web_search(
                     }
                     for doc in docs
                 ],
-                "filenames": urls,
                 "loaded_count": len(docs),
             }
         else:
@@ -1553,11 +1612,11 @@ def reset_upload_dir(user=Depends(get_admin_user)) -> bool:
                     elif os.path.isdir(file_path):
                         shutil.rmtree(file_path)  # Remove the directory
                 except Exception as e:
-                    print(f"Failed to delete {file_path}. Reason: {e}")
+                    log.exception(f"Failed to delete {file_path}. Reason: {e}")
         else:
-            print(f"The directory {folder} does not exist")
+            log.warning(f"The directory {folder} does not exist")
     except Exception as e:
-        print(f"Failed to process the directory {folder}. Reason: {e}")
+        log.exception(f"Failed to process the directory {folder}. Reason: {e}")
     return True
 
 

+ 46 - 0
backend/open_webui/routers/tasks.py

@@ -20,6 +20,10 @@ from open_webui.utils.auth import get_admin_user, get_verified_user
 from open_webui.constants import TASKS
 
 from open_webui.routers.pipelines import process_pipeline_inlet_filter
+from open_webui.utils.filter import (
+    get_sorted_filter_ids,
+    process_filter_functions,
+)
 from open_webui.utils.task import get_task_model_id
 
 from open_webui.config import (
@@ -221,6 +225,12 @@ async def generate_title(
         },
     }
 
+    # Process the payload through the pipeline
+    try:
+        payload = await process_pipeline_inlet_filter(request, payload, user, models)
+    except Exception as e:
+        raise e
+
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:
@@ -290,6 +300,12 @@ async def generate_chat_tags(
         },
     }
 
+    # Process the payload through the pipeline
+    try:
+        payload = await process_pipeline_inlet_filter(request, payload, user, models)
+    except Exception as e:
+        raise e
+
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:
@@ -356,6 +372,12 @@ async def generate_image_prompt(
         },
     }
 
+    # Process the payload through the pipeline
+    try:
+        payload = await process_pipeline_inlet_filter(request, payload, user, models)
+    except Exception as e:
+        raise e
+
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:
@@ -433,6 +455,12 @@ async def generate_queries(
         },
     }
 
+    # Process the payload through the pipeline
+    try:
+        payload = await process_pipeline_inlet_filter(request, payload, user, models)
+    except Exception as e:
+        raise e
+
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:
@@ -514,6 +542,12 @@ async def generate_autocompletion(
         },
     }
 
+    # Process the payload through the pipeline
+    try:
+        payload = await process_pipeline_inlet_filter(request, payload, user, models)
+    except Exception as e:
+        raise e
+
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:
@@ -584,6 +618,12 @@ async def generate_emoji(
         },
     }
 
+    # Process the payload through the pipeline
+    try:
+        payload = await process_pipeline_inlet_filter(request, payload, user, models)
+    except Exception as e:
+        raise e
+
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:
@@ -644,6 +684,12 @@ async def generate_moa_response(
         },
     }
 
+    # Process the payload through the pipeline
+    try:
+        payload = await process_pipeline_inlet_filter(request, payload, user, models)
+    except Exception as e:
+        raise e
+
     try:
         return await generate_chat_completion(request, form_data=payload, user=user)
     except Exception as e:

+ 9 - 4
backend/open_webui/routers/tools.py

@@ -1,3 +1,4 @@
+import logging
 from pathlib import Path
 from typing import Optional
 
@@ -15,6 +16,10 @@ from fastapi import APIRouter, Depends, HTTPException, Request, status
 from open_webui.utils.tools import get_tools_specs
 from open_webui.utils.auth import get_admin_user, get_verified_user
 from open_webui.utils.access_control import has_access, has_permission
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
 
 
 router = APIRouter()
@@ -111,7 +116,7 @@ async def create_new_tools(
                     detail=ERROR_MESSAGES.DEFAULT("Error creating tools"),
                 )
         except Exception as e:
-            print(e)
+            log.exception(f"Failed to load the tool by id {form_data.id}: {e}")
             raise HTTPException(
                 status_code=status.HTTP_400_BAD_REQUEST,
                 detail=ERROR_MESSAGES.DEFAULT(str(e)),
@@ -193,7 +198,7 @@ async def update_tools_by_id(
             "specs": specs,
         }
 
-        print(updated)
+        log.debug(updated)
         tools = Tools.update_tool_by_id(id, updated)
 
         if tools:
@@ -343,7 +348,7 @@ async def update_tools_valves_by_id(
         Tools.update_tool_valves_by_id(id, valves.model_dump())
         return valves.model_dump()
     except Exception as e:
-        print(e)
+        log.exception(f"Failed to update tool valves by id {id}: {e}")
         raise HTTPException(
             status_code=status.HTTP_400_BAD_REQUEST,
             detail=ERROR_MESSAGES.DEFAULT(str(e)),
@@ -421,7 +426,7 @@ async def update_tools_user_valves_by_id(
                 )
                 return user_valves.model_dump()
             except Exception as e:
-                print(e)
+                log.exception(f"Failed to update user valves by id {id}: {e}")
                 raise HTTPException(
                     status_code=status.HTTP_400_BAD_REQUEST,
                     detail=ERROR_MESSAGES.DEFAULT(str(e)),

+ 6 - 1
backend/open_webui/routers/utils.py

@@ -1,4 +1,5 @@
 import black
+import logging
 import markdown
 
 from open_webui.models.chats import ChatTitleMessagesForm
@@ -13,8 +14,12 @@ from open_webui.utils.misc import get_gravatar_url
 from open_webui.utils.pdf_generator import PDFGenerator
 from open_webui.utils.auth import get_admin_user, get_verified_user
 from open_webui.utils.code_interpreter import execute_code_jupyter
+from open_webui.env import SRC_LOG_LEVELS
 
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
+
 router = APIRouter()
 
 
@@ -96,7 +101,7 @@ async def download_chat_as_pdf(
             headers={"Content-Disposition": "attachment;filename=chat.pdf"},
         )
     except Exception as e:
-        print(e)
+        log.exception(f"Error generating PDF: {e}")
         raise HTTPException(status_code=400, detail=str(e))
 
 

+ 18 - 3
backend/open_webui/storage/provider.py

@@ -1,10 +1,12 @@
 import os
 import shutil
 import json
+import logging
 from abc import ABC, abstractmethod
 from typing import BinaryIO, Tuple
 
 import boto3
+from botocore.config import Config
 from botocore.exceptions import ClientError
 from open_webui.config import (
     S3_ACCESS_KEY_ID,
@@ -13,6 +15,8 @@ from open_webui.config import (
     S3_KEY_PREFIX,
     S3_REGION_NAME,
     S3_SECRET_ACCESS_KEY,
+    S3_USE_ACCELERATE_ENDPOINT,
+    S3_ADDRESSING_STYLE,
     GCS_BUCKET_NAME,
     GOOGLE_APPLICATION_CREDENTIALS_JSON,
     AZURE_STORAGE_ENDPOINT,
@@ -27,6 +31,11 @@ from open_webui.constants import ERROR_MESSAGES
 from azure.identity import DefaultAzureCredential
 from azure.storage.blob import BlobServiceClient
 from azure.core.exceptions import ResourceNotFoundError
+from open_webui.env import SRC_LOG_LEVELS
+
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
 
 
 class StorageProvider(ABC):
@@ -71,7 +80,7 @@ class LocalStorageProvider(StorageProvider):
         if os.path.isfile(file_path):
             os.remove(file_path)
         else:
-            print(f"File {file_path} not found in local storage.")
+            log.warning(f"File {file_path} not found in local storage.")
 
     @staticmethod
     def delete_all_files() -> None:
@@ -85,9 +94,9 @@ class LocalStorageProvider(StorageProvider):
                     elif os.path.isdir(file_path):
                         shutil.rmtree(file_path)  # Remove the directory
                 except Exception as e:
-                    print(f"Failed to delete {file_path}. Reason: {e}")
+                    log.exception(f"Failed to delete {file_path}. Reason: {e}")
         else:
-            print(f"Directory {UPLOAD_DIR} not found in local storage.")
+            log.warning(f"Directory {UPLOAD_DIR} not found in local storage.")
 
 
 class S3StorageProvider(StorageProvider):
@@ -98,6 +107,12 @@ class S3StorageProvider(StorageProvider):
             endpoint_url=S3_ENDPOINT_URL,
             aws_access_key_id=S3_ACCESS_KEY_ID,
             aws_secret_access_key=S3_SECRET_ACCESS_KEY,
+            config=Config(
+                s3={
+                    "use_accelerate_endpoint": S3_USE_ACCELERATE_ENDPOINT,
+                    "addressing_style": S3_ADDRESSING_STYLE,
+                },
+            ),
         )
         self.bucket_name = S3_BUCKET_NAME
         self.key_prefix = S3_KEY_PREFIX if S3_KEY_PREFIX else ""

+ 249 - 0
backend/open_webui/utils/audit.py

@@ -0,0 +1,249 @@
+from contextlib import asynccontextmanager
+from dataclasses import asdict, dataclass
+from enum import Enum
+import re
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    AsyncGenerator,
+    Dict,
+    MutableMapping,
+    Optional,
+    cast,
+)
+import uuid
+
+from asgiref.typing import (
+    ASGI3Application,
+    ASGIReceiveCallable,
+    ASGIReceiveEvent,
+    ASGISendCallable,
+    ASGISendEvent,
+    Scope as ASGIScope,
+)
+from loguru import logger
+from starlette.requests import Request
+
+from open_webui.env import AUDIT_LOG_LEVEL, MAX_BODY_LOG_SIZE
+from open_webui.utils.auth import get_current_user, get_http_authorization_cred
+from open_webui.models.users import UserModel
+
+
+if TYPE_CHECKING:
+    from loguru import Logger
+
+
+@dataclass(frozen=True)
+class AuditLogEntry:
+    # `Metadata` audit level properties
+    id: str
+    user: dict[str, Any]
+    audit_level: str
+    verb: str
+    request_uri: str
+    user_agent: Optional[str] = None
+    source_ip: Optional[str] = None
+    # `Request` audit level properties
+    request_object: Any = None
+    # `Request Response` level
+    response_object: Any = None
+    response_status_code: Optional[int] = None
+
+
+class AuditLevel(str, Enum):
+    NONE = "NONE"
+    METADATA = "METADATA"
+    REQUEST = "REQUEST"
+    REQUEST_RESPONSE = "REQUEST_RESPONSE"
+
+
+class AuditLogger:
+    """
+    A helper class that encapsulates audit logging functionality. It uses Loguru’s logger with an auditable binding to ensure that audit log entries are filtered correctly.
+
+    Parameters:
+    logger (Logger): An instance of Loguru’s logger.
+    """
+
+    def __init__(self, logger: "Logger"):
+        self.logger = logger.bind(auditable=True)
+
+    def write(
+        self,
+        audit_entry: AuditLogEntry,
+        *,
+        log_level: str = "INFO",
+        extra: Optional[dict] = None,
+    ):
+
+        entry = asdict(audit_entry)
+
+        if extra:
+            entry["extra"] = extra
+
+        self.logger.log(
+            log_level,
+            "",
+            **entry,
+        )
+
+
+class AuditContext:
+    """
+    Captures and aggregates the HTTP request and response bodies during the processing of a request. It ensures that only a configurable maximum amount of data is stored to prevent excessive memory usage.
+
+    Attributes:
+    request_body (bytearray): Accumulated request payload.
+    response_body (bytearray): Accumulated response payload.
+    max_body_size (int): Maximum number of bytes to capture.
+    metadata (Dict[str, Any]): A dictionary to store additional audit metadata (user, http verb, user agent, etc.).
+    """
+
+    def __init__(self, max_body_size: int = MAX_BODY_LOG_SIZE):
+        self.request_body = bytearray()
+        self.response_body = bytearray()
+        self.max_body_size = max_body_size
+        self.metadata: Dict[str, Any] = {}
+
+    def add_request_chunk(self, chunk: bytes):
+        if len(self.request_body) < self.max_body_size:
+            self.request_body.extend(
+                chunk[: self.max_body_size - len(self.request_body)]
+            )
+
+    def add_response_chunk(self, chunk: bytes):
+        if len(self.response_body) < self.max_body_size:
+            self.response_body.extend(
+                chunk[: self.max_body_size - len(self.response_body)]
+            )
+
+
+class AuditLoggingMiddleware:
+    """
+    ASGI middleware that intercepts HTTP requests and responses to perform audit logging. It captures request/response bodies (depending on audit level), headers, HTTP methods, and user information, then logs a structured audit entry at the end of the request cycle.
+    """
+
+    AUDITED_METHODS = {"PUT", "PATCH", "DELETE", "POST"}
+
+    def __init__(
+        self,
+        app: ASGI3Application,
+        *,
+        excluded_paths: Optional[list[str]] = None,
+        max_body_size: int = MAX_BODY_LOG_SIZE,
+        audit_level: AuditLevel = AuditLevel.NONE,
+    ) -> None:
+        self.app = app
+        self.audit_logger = AuditLogger(logger)
+        self.excluded_paths = excluded_paths or []
+        self.max_body_size = max_body_size
+        self.audit_level = audit_level
+
+    async def __call__(
+        self,
+        scope: ASGIScope,
+        receive: ASGIReceiveCallable,
+        send: ASGISendCallable,
+    ) -> None:
+        if scope["type"] != "http":
+            return await self.app(scope, receive, send)
+
+        request = Request(scope=cast(MutableMapping, scope))
+
+        if self._should_skip_auditing(request):
+            return await self.app(scope, receive, send)
+
+        async with self._audit_context(request) as context:
+
+            async def send_wrapper(message: ASGISendEvent) -> None:
+                if self.audit_level == AuditLevel.REQUEST_RESPONSE:
+                    await self._capture_response(message, context)
+
+                await send(message)
+
+            original_receive = receive
+
+            async def receive_wrapper() -> ASGIReceiveEvent:
+                nonlocal original_receive
+                message = await original_receive()
+
+                if self.audit_level in (
+                    AuditLevel.REQUEST,
+                    AuditLevel.REQUEST_RESPONSE,
+                ):
+                    await self._capture_request(message, context)
+
+                return message
+
+            await self.app(scope, receive_wrapper, send_wrapper)
+
+    @asynccontextmanager
+    async def _audit_context(
+        self, request: Request
+    ) -> AsyncGenerator[AuditContext, None]:
+        """
+        async context manager that ensures that an audit log entry is recorded after the request is processed.
+        """
+        context = AuditContext()
+        try:
+            yield context
+        finally:
+            await self._log_audit_entry(request, context)
+
+    async def _get_authenticated_user(self, request: Request) -> UserModel:
+
+        auth_header = request.headers.get("Authorization")
+        assert auth_header
+        user = get_current_user(request, None, get_http_authorization_cred(auth_header))
+
+        return user
+
+    def _should_skip_auditing(self, request: Request) -> bool:
+        if (
+            request.method not in {"POST", "PUT", "PATCH", "DELETE"}
+            or AUDIT_LOG_LEVEL == "NONE"
+            or not request.headers.get("authorization")
+        ):
+            return True
+        # match either /api/<resource>/...(for the endpoint /api/chat case) or /api/v1/<resource>/...
+        pattern = re.compile(
+            r"^/api(?:/v1)?/(" + "|".join(self.excluded_paths) + r")\b"
+        )
+        if pattern.match(request.url.path):
+            return True
+
+        return False
+
+    async def _capture_request(self, message: ASGIReceiveEvent, context: AuditContext):
+        if message["type"] == "http.request":
+            body = message.get("body", b"")
+            context.add_request_chunk(body)
+
+    async def _capture_response(self, message: ASGISendEvent, context: AuditContext):
+        if message["type"] == "http.response.start":
+            context.metadata["response_status_code"] = message["status"]
+
+        elif message["type"] == "http.response.body":
+            body = message.get("body", b"")
+            context.add_response_chunk(body)
+
+    async def _log_audit_entry(self, request: Request, context: AuditContext):
+        try:
+            user = await self._get_authenticated_user(request)
+
+            entry = AuditLogEntry(
+                id=str(uuid.uuid4()),
+                user=user.model_dump(include={"id", "name", "email", "role"}),
+                audit_level=self.audit_level.value,
+                verb=request.method,
+                request_uri=str(request.url),
+                response_status_code=context.metadata.get("response_status_code", None),
+                source_ip=request.client.host if request.client else None,
+                user_agent=request.headers.get("user-agent"),
+                request_object=context.request_body.decode("utf-8", errors="replace"),
+                response_object=context.response_body.decode("utf-8", errors="replace"),
+            )
+
+            self.audit_logger.write(entry)
+        except Exception as e:
+            logger.error(f"Failed to log audit entry: {str(e)}")

+ 19 - 7
backend/open_webui/utils/auth.py

@@ -14,14 +14,22 @@ from typing import Optional, Union, List, Dict
 from open_webui.models.users import Users
 
 from open_webui.constants import ERROR_MESSAGES
-from open_webui.env import WEBUI_SECRET_KEY, TRUSTED_SIGNATURE_KEY, STATIC_DIR
-
-from fastapi import Depends, HTTPException, Request, Response, status
+from open_webui.env import (
+    WEBUI_SECRET_KEY,
+    TRUSTED_SIGNATURE_KEY,
+    STATIC_DIR,
+    SRC_LOG_LEVELS,
+)
+
+from fastapi import BackgroundTasks, Depends, HTTPException, Request, Response, status
 from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
 from passlib.context import CryptContext
 
+
 logging.getLogger("passlib").setLevel(logging.ERROR)
 
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["OAUTH"])
 
 SESSION_SECRET = WEBUI_SECRET_KEY
 ALGORITHM = "HS256"
@@ -50,7 +58,7 @@ def verify_signature(payload: str, signature: str) -> bool:
 def override_static(path: str, content: str):
     # Ensure path is safe
     if "/" in path or ".." in path:
-        print(f"Invalid path: {path}")
+        log.error(f"Invalid path: {path}")
         return
 
     file_path = os.path.join(STATIC_DIR, path)
@@ -82,11 +90,11 @@ def get_license_data(app, key):
 
                 return True
             else:
-                print(
+                log.error(
                     f"License: retrieval issue: {getattr(res, 'text', 'unknown error')}"
                 )
         except Exception as ex:
-            print(f"License: Uncaught Exception: {ex}")
+            log.exception(f"License: Uncaught Exception: {ex}")
     return False
 
 
@@ -142,6 +150,7 @@ def get_http_authorization_cred(auth_header: str):
 
 def get_current_user(
     request: Request,
+    background_tasks: BackgroundTasks,
     auth_token: HTTPAuthorizationCredentials = Depends(bearer_security),
 ):
     token = None
@@ -194,7 +203,10 @@ def get_current_user(
                 detail=ERROR_MESSAGES.INVALID_TOKEN,
             )
         else:
-            Users.update_user_last_active_by_id(user.id)
+            # Refresh the user's last active timestamp asynchronously
+            # to prevent blocking the request
+            if background_tasks:
+                background_tasks.add_task(Users.update_user_last_active_by_id, user.id)
         return user
     else:
         raise HTTPException(

+ 5 - 5
backend/open_webui/utils/chat.py

@@ -66,7 +66,7 @@ async def generate_direct_chat_completion(
     user: Any,
     models: dict,
 ):
-    print("generate_direct_chat_completion")
+    log.info("generate_direct_chat_completion")
 
     metadata = form_data.pop("metadata", {})
 
@@ -103,7 +103,7 @@ async def generate_direct_chat_completion(
             }
         )
 
-        print("res", res)
+        log.info(f"res: {res}")
 
         if res.get("status", False):
             # Define a generator to stream responses
@@ -285,7 +285,7 @@ chat_completion = generate_chat_completion
 
 async def chat_completed(request: Request, form_data: dict, user: Any):
     if not request.app.state.MODELS:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
 
     if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
         models = {
@@ -351,7 +351,7 @@ async def chat_action(request: Request, action_id: str, form_data: dict, user: A
         raise Exception(f"Action not found: {action_id}")
 
     if not request.app.state.MODELS:
-        await get_all_models(request)
+        await get_all_models(request, user=user)
 
     if getattr(request.state, "direct", False) and hasattr(request.state, "model"):
         models = {
@@ -432,7 +432,7 @@ async def chat_action(request: Request, action_id: str, form_data: dict, user: A
                             )
                         )
                 except Exception as e:
-                    print(e)
+                    log.exception(f"Failed to get user values: {e}")
 
                 params = {**params, "__user__": __user__}
 

+ 14 - 3
backend/open_webui/utils/filter.py

@@ -1,6 +1,12 @@
 import inspect
+import logging
+
 from open_webui.utils.plugin import load_function_module_by_id
 from open_webui.models.functions import Functions
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
 
 
 def get_sorted_filter_ids(model):
@@ -61,7 +67,12 @@ async def process_filter_functions(
         try:
             # Prepare parameters
             sig = inspect.signature(handler)
-            params = {"body": form_data} | {
+
+            params = {"body": form_data}
+            if filter_type == "stream":
+                params = {"event": form_data}
+
+            params = params | {
                 k: v
                 for k, v in {
                     **extra_params,
@@ -80,7 +91,7 @@ async def process_filter_functions(
                             )
                         )
                     except Exception as e:
-                        print(e)
+                        log.exception(f"Failed to get user values: {e}")
 
             # Execute handler
             if inspect.iscoroutinefunction(handler):
@@ -89,7 +100,7 @@ async def process_filter_functions(
                 form_data = handler(**params)
 
         except Exception as e:
-            print(f"Error in {filter_type} handler {filter_id}: {e}")
+            log.exception(f"Error in {filter_type} handler {filter_id}: {e}")
             raise e
 
     # Handle file cleanup for inlet

+ 140 - 0
backend/open_webui/utils/logger.py

@@ -0,0 +1,140 @@
+import json
+import logging
+import sys
+from typing import TYPE_CHECKING
+
+from loguru import logger
+
+from open_webui.env import (
+    AUDIT_LOG_FILE_ROTATION_SIZE,
+    AUDIT_LOG_LEVEL,
+    AUDIT_LOGS_FILE_PATH,
+    GLOBAL_LOG_LEVEL,
+)
+
+
+if TYPE_CHECKING:
+    from loguru import Record
+
+
+def stdout_format(record: "Record") -> str:
+    """
+    Generates a formatted string for log records that are output to the console. This format includes a timestamp, log level, source location (module, function, and line), the log message, and any extra data (serialized as JSON).
+
+    Parameters:
+    record (Record): A Loguru record that contains logging details including time, level, name, function, line, message, and any extra context.
+    Returns:
+    str: A formatted log string intended for stdout.
+    """
+    record["extra"]["extra_json"] = json.dumps(record["extra"])
+    return (
+        "<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> | "
+        "<level>{level: <8}</level> | "
+        "<cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - "
+        "<level>{message}</level> - {extra[extra_json]}"
+        "\n{exception}"
+    )
+
+
+class InterceptHandler(logging.Handler):
+    """
+    Intercepts log records from Python's standard logging module
+    and redirects them to Loguru's logger.
+    """
+
+    def emit(self, record):
+        """
+        Called by the standard logging module for each log event.
+        It transforms the standard `LogRecord` into a format compatible with Loguru
+        and passes it to Loguru's logger.
+        """
+        try:
+            level = logger.level(record.levelname).name
+        except ValueError:
+            level = record.levelno
+
+        frame, depth = sys._getframe(6), 6
+        while frame and frame.f_code.co_filename == logging.__file__:
+            frame = frame.f_back
+            depth += 1
+
+        logger.opt(depth=depth, exception=record.exc_info).log(
+            level, record.getMessage()
+        )
+
+
+def file_format(record: "Record"):
+    """
+    Formats audit log records into a structured JSON string for file output.
+
+    Parameters:
+    record (Record): A Loguru record containing extra audit data.
+    Returns:
+    str: A JSON-formatted string representing the audit data.
+    """
+
+    audit_data = {
+        "id": record["extra"].get("id", ""),
+        "timestamp": int(record["time"].timestamp()),
+        "user": record["extra"].get("user", dict()),
+        "audit_level": record["extra"].get("audit_level", ""),
+        "verb": record["extra"].get("verb", ""),
+        "request_uri": record["extra"].get("request_uri", ""),
+        "response_status_code": record["extra"].get("response_status_code", 0),
+        "source_ip": record["extra"].get("source_ip", ""),
+        "user_agent": record["extra"].get("user_agent", ""),
+        "request_object": record["extra"].get("request_object", b""),
+        "response_object": record["extra"].get("response_object", b""),
+        "extra": record["extra"].get("extra", {}),
+    }
+
+    record["extra"]["file_extra"] = json.dumps(audit_data, default=str)
+    return "{extra[file_extra]}\n"
+
+
+def start_logger():
+    """
+    Initializes and configures Loguru's logger with distinct handlers:
+
+    A console (stdout) handler for general log messages (excluding those marked as auditable).
+    An optional file handler for audit logs if audit logging is enabled.
+    Additionally, this function reconfigures Python’s standard logging to route through Loguru and adjusts logging levels for Uvicorn.
+
+    Parameters:
+    enable_audit_logging (bool): Determines whether audit-specific log entries should be recorded to file.
+    """
+    logger.remove()
+
+    logger.add(
+        sys.stdout,
+        level=GLOBAL_LOG_LEVEL,
+        format=stdout_format,
+        filter=lambda record: "auditable" not in record["extra"],
+    )
+
+    if AUDIT_LOG_LEVEL != "NONE":
+        try:
+            logger.add(
+                AUDIT_LOGS_FILE_PATH,
+                level="INFO",
+                rotation=AUDIT_LOG_FILE_ROTATION_SIZE,
+                compression="zip",
+                format=file_format,
+                filter=lambda record: record["extra"].get("auditable") is True,
+            )
+        except Exception as e:
+            logger.error(f"Failed to initialize audit log file handler: {str(e)}")
+
+    logging.basicConfig(
+        handlers=[InterceptHandler()], level=GLOBAL_LOG_LEVEL, force=True
+    )
+    for uvicorn_logger_name in ["uvicorn", "uvicorn.error"]:
+        uvicorn_logger = logging.getLogger(uvicorn_logger_name)
+        uvicorn_logger.setLevel(GLOBAL_LOG_LEVEL)
+        uvicorn_logger.handlers = []
+    for uvicorn_logger_name in ["uvicorn.access"]:
+        uvicorn_logger = logging.getLogger(uvicorn_logger_name)
+        uvicorn_logger.setLevel(GLOBAL_LOG_LEVEL)
+        uvicorn_logger.handlers = [InterceptHandler()]
+
+    logger.info(f"GLOBAL_LOG_LEVEL: {GLOBAL_LOG_LEVEL}")

+ 210 - 131
backend/open_webui/utils/middleware.py

@@ -351,24 +351,25 @@ async def chat_web_search_handler(
                 all_results.append(results)
                 files = form_data.get("files", [])
 
-                if request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT:
+                if results.get("collection_name"):
                     files.append(
                         {
-                            "docs": results.get("docs", []),
+                            "collection_name": results["collection_name"],
                             "name": searchQuery,
-                            "type": "web_search_docs",
+                            "type": "web_search",
                             "urls": results["filenames"],
                         }
                     )
-                else:
+                elif results.get("docs"):
                     files.append(
                         {
-                            "collection_name": results["collection_name"],
+                            "docs": results.get("docs", []),
                             "name": searchQuery,
-                            "type": "web_search_results",
+                            "type": "web_search",
                             "urls": results["filenames"],
                         }
                     )
+
                 form_data["files"] = files
         except Exception as e:
             log.exception(e)
@@ -518,6 +519,7 @@ async def chat_completion_files_handler(
     sources = []
 
     if files := body.get("metadata", {}).get("files", None):
+        queries = []
         try:
             queries_response = await generate_queries(
                 request,
@@ -543,8 +545,8 @@ async def chat_completion_files_handler(
                 queries_response = {"queries": [queries_response]}
 
             queries = queries_response.get("queries", [])
-        except Exception as e:
-            queries = []
+        except:
+            pass
 
         if len(queries) == 0:
             queries = [get_last_user_message(body["messages"])]
@@ -556,6 +558,7 @@ async def chat_completion_files_handler(
                 sources = await loop.run_in_executor(
                     executor,
                     lambda: get_sources_from_files(
+                        request=request,
                         files=files,
                         queries=queries,
                         embedding_function=lambda query: request.app.state.EMBEDDING_FUNCTION(
@@ -738,6 +741,7 @@ async def process_chat_payload(request, form_data, metadata, user, model):
 
     tool_ids = form_data.pop("tool_ids", None)
     files = form_data.pop("files", None)
+
     # Remove files duplicates
     if files:
         files = list({json.dumps(f, sort_keys=True): f for f in files}.values())
@@ -795,8 +799,6 @@ async def process_chat_payload(request, form_data, metadata, user, model):
     if len(sources) > 0:
         context_string = ""
         for source_idx, source in enumerate(sources):
-            source_id = source.get("source", {}).get("name", "")
-
             if "document" in source:
                 for doc_idx, doc_context in enumerate(source["document"]):
                     context_string += f"<source><source_id>{source_idx}</source_id><source_context>{doc_context}</source_context></source>\n"
@@ -1048,6 +1050,21 @@ async def process_chat_response(
     ):
         return response
 
+    extra_params = {
+        "__event_emitter__": event_emitter,
+        "__event_call__": event_caller,
+        "__user__": {
+            "id": user.id,
+            "email": user.email,
+            "name": user.name,
+            "role": user.role,
+        },
+        "__metadata__": metadata,
+        "__request__": request,
+        "__model__": metadata.get("model"),
+    }
+    filter_ids = get_sorted_filter_ids(form_data.get("model"))
+
     # Streaming response
     if event_emitter and event_caller:
         task_id = str(uuid4())  # Create a unique task ID.
@@ -1127,12 +1144,12 @@ async def process_chat_response(
 
                         if reasoning_duration is not None:
                             if raw:
-                                content = f'{content}\n<{block["tag"]}>{block["content"]}</{block["tag"]}>\n'
+                                content = f'{content}\n<{block["start_tag"]}>{block["content"]}<{block["end_tag"]}>\n'
                             else:
                                 content = f'{content}\n<details type="reasoning" done="true" duration="{reasoning_duration}">\n<summary>Thought for {reasoning_duration} seconds</summary>\n{reasoning_display_content}\n</details>\n'
                         else:
                             if raw:
-                                content = f'{content}\n<{block["tag"]}>{block["content"]}</{block["tag"]}>\n'
+                                content = f'{content}\n<{block["start_tag"]}>{block["content"]}<{block["end_tag"]}>\n'
                             else:
                                 content = f'{content}\n<details type="reasoning" done="false">\n<summary>Thinking…</summary>\n{reasoning_display_content}\n</details>\n'
 
@@ -1228,9 +1245,9 @@ async def process_chat_response(
                     return attributes
 
                 if content_blocks[-1]["type"] == "text":
-                    for tag in tags:
+                    for start_tag, end_tag in tags:
                         # Match start tag e.g., <tag> or <tag attr="value">
-                        start_tag_pattern = rf"<{tag}(\s.*?)?>"
+                        start_tag_pattern = rf"<{re.escape(start_tag)}(\s.*?)?>"
                         match = re.search(start_tag_pattern, content)
                         if match:
                             attr_content = (
@@ -1263,7 +1280,8 @@ async def process_chat_response(
                             content_blocks.append(
                                 {
                                     "type": content_type,
-                                    "tag": tag,
+                                    "start_tag": start_tag,
+                                    "end_tag": end_tag,
                                     "attributes": attributes,
                                     "content": "",
                                     "started_at": time.time(),
@@ -1275,9 +1293,10 @@ async def process_chat_response(
 
                             break
                 elif content_blocks[-1]["type"] == content_type:
-                    tag = content_blocks[-1]["tag"]
+                    start_tag = content_blocks[-1]["start_tag"]
+                    end_tag = content_blocks[-1]["end_tag"]
                     # Match end tag e.g., </tag>
-                    end_tag_pattern = rf"</{tag}>"
+                    end_tag_pattern = rf"<{re.escape(end_tag)}>"
 
                     # Check if the content has the end tag
                     if re.search(end_tag_pattern, content):
@@ -1285,7 +1304,7 @@ async def process_chat_response(
 
                         block_content = content_blocks[-1]["content"]
                         # Strip start and end tags from the content
-                        start_tag_pattern = rf"<{tag}(.*?)>"
+                        start_tag_pattern = rf"<{re.escape(start_tag)}(.*?)>"
                         block_content = re.sub(
                             start_tag_pattern, "", block_content
                         ).strip()
@@ -1350,7 +1369,7 @@ async def process_chat_response(
 
                         # Clean processed content
                         content = re.sub(
-                            rf"<{tag}(.*?)>(.|\n)*?</{tag}>",
+                            rf"<{re.escape(start_tag)}(.*?)>(.|\n)*?<{re.escape(end_tag)}>",
                             "",
                             content,
                             flags=re.DOTALL,
@@ -1388,19 +1407,24 @@ async def process_chat_response(
 
             # We might want to disable this by default
             DETECT_REASONING = True
+            DETECT_SOLUTION = True
             DETECT_CODE_INTERPRETER = metadata.get("features", {}).get(
                 "code_interpreter", False
             )
 
             reasoning_tags = [
-                "think",
-                "thinking",
-                "reason",
-                "reasoning",
-                "thought",
-                "Thought",
+                ("think", "/think"),
+                ("thinking", "/thinking"),
+                ("reason", "/reason"),
+                ("reasoning", "/reasoning"),
+                ("thought", "/thought"),
+                ("Thought", "/Thought"),
+                ("|begin_of_thought|", "|end_of_thought|"),
             ]
-            code_interpreter_tags = ["code_interpreter"]
+
+            code_interpreter_tags = [("code_interpreter", "/code_interpreter")]
+
+            solution_tags = [("|begin_of_solution|", "|end_of_solution|")]
 
             try:
                 for event in events:
@@ -1444,119 +1468,154 @@ async def process_chat_response(
                         try:
                             data = json.loads(data)
 
-                            if "selected_model_id" in data:
-                                model_id = data["selected_model_id"]
-                                Chats.upsert_message_to_chat_by_id_and_message_id(
-                                    metadata["chat_id"],
-                                    metadata["message_id"],
-                                    {
-                                        "selectedModelId": model_id,
-                                    },
-                                )
-                            else:
-                                choices = data.get("choices", [])
-                                if not choices:
-                                    continue
-
-                                delta = choices[0].get("delta", {})
-                                delta_tool_calls = delta.get("tool_calls", None)
-
-                                if delta_tool_calls:
-                                    for delta_tool_call in delta_tool_calls:
-                                        tool_call_index = delta_tool_call.get("index")
-
-                                        if tool_call_index is not None:
-                                            if (
-                                                len(response_tool_calls)
-                                                <= tool_call_index
-                                            ):
-                                                response_tool_calls.append(
-                                                    delta_tool_call
-                                                )
-                                            else:
-                                                delta_name = delta_tool_call.get(
-                                                    "function", {}
-                                                ).get("name")
-                                                delta_arguments = delta_tool_call.get(
-                                                    "function", {}
-                                                ).get("arguments")
-
-                                                if delta_name:
-                                                    response_tool_calls[
-                                                        tool_call_index
-                                                    ]["function"]["name"] += delta_name
-
-                                                if delta_arguments:
-                                                    response_tool_calls[
-                                                        tool_call_index
-                                                    ]["function"][
-                                                        "arguments"
-                                                    ] += delta_arguments
-
-                                value = delta.get("content")
-
-                                if value:
-                                    content = f"{content}{value}"
-
-                                    if not content_blocks:
-                                        content_blocks.append(
-                                            {
-                                                "type": "text",
-                                                "content": "",
-                                            }
-                                        )
+                            data, _ = await process_filter_functions(
+                                request=request,
+                                filter_ids=filter_ids,
+                                filter_type="stream",
+                                form_data=data,
+                                extra_params=extra_params,
+                            )
 
-                                    content_blocks[-1]["content"] = (
-                                        content_blocks[-1]["content"] + value
+                            if data:
+                                if "selected_model_id" in data:
+                                    model_id = data["selected_model_id"]
+                                    Chats.upsert_message_to_chat_by_id_and_message_id(
+                                        metadata["chat_id"],
+                                        metadata["message_id"],
+                                        {
+                                            "selectedModelId": model_id,
+                                        },
                                     )
+                                else:
+                                    choices = data.get("choices", [])
+                                    if not choices:
+                                        usage = data.get("usage", {})
+                                        if usage:
+                                            await event_emitter(
+                                                {
+                                                    "type": "chat:completion",
+                                                    "data": {
+                                                        "usage": usage,
+                                                    },
+                                                }
+                                            )
+                                        continue
+
+                                    delta = choices[0].get("delta", {})
+                                    delta_tool_calls = delta.get("tool_calls", None)
 
-                                    if DETECT_REASONING:
-                                        content, content_blocks, _ = (
-                                            tag_content_handler(
-                                                "reasoning",
-                                                reasoning_tags,
-                                                content,
-                                                content_blocks,
+                                    if delta_tool_calls:
+                                        for delta_tool_call in delta_tool_calls:
+                                            tool_call_index = delta_tool_call.get(
+                                                "index"
                                             )
-                                        )
 
-                                    if DETECT_CODE_INTERPRETER:
-                                        content, content_blocks, end = (
-                                            tag_content_handler(
-                                                "code_interpreter",
-                                                code_interpreter_tags,
-                                                content,
-                                                content_blocks,
+                                            if tool_call_index is not None:
+                                                if (
+                                                    len(response_tool_calls)
+                                                    <= tool_call_index
+                                                ):
+                                                    response_tool_calls.append(
+                                                        delta_tool_call
+                                                    )
+                                                else:
+                                                    delta_name = delta_tool_call.get(
+                                                        "function", {}
+                                                    ).get("name")
+                                                    delta_arguments = (
+                                                        delta_tool_call.get(
+                                                            "function", {}
+                                                        ).get("arguments")
+                                                    )
+
+                                                    if delta_name:
+                                                        response_tool_calls[
+                                                            tool_call_index
+                                                        ]["function"][
+                                                            "name"
+                                                        ] += delta_name
+
+                                                    if delta_arguments:
+                                                        response_tool_calls[
+                                                            tool_call_index
+                                                        ]["function"][
+                                                            "arguments"
+                                                        ] += delta_arguments
+
+                                    value = delta.get("content")
+
+                                    if value:
+                                        content = f"{content}{value}"
+
+                                        if not content_blocks:
+                                            content_blocks.append(
+                                                {
+                                                    "type": "text",
+                                                    "content": "",
+                                                }
                                             )
+
+                                        content_blocks[-1]["content"] = (
+                                            content_blocks[-1]["content"] + value
                                         )
 
-                                        if end:
-                                            break
+                                        if DETECT_REASONING:
+                                            content, content_blocks, _ = (
+                                                tag_content_handler(
+                                                    "reasoning",
+                                                    reasoning_tags,
+                                                    content,
+                                                    content_blocks,
+                                                )
+                                            )
 
-                                    if ENABLE_REALTIME_CHAT_SAVE:
-                                        # Save message in the database
-                                        Chats.upsert_message_to_chat_by_id_and_message_id(
-                                            metadata["chat_id"],
-                                            metadata["message_id"],
-                                            {
+                                        if DETECT_CODE_INTERPRETER:
+                                            content, content_blocks, end = (
+                                                tag_content_handler(
+                                                    "code_interpreter",
+                                                    code_interpreter_tags,
+                                                    content,
+                                                    content_blocks,
+                                                )
+                                            )
+
+                                            if end:
+                                                break
+
+                                        if DETECT_SOLUTION:
+                                            content, content_blocks, _ = (
+                                                tag_content_handler(
+                                                    "solution",
+                                                    solution_tags,
+                                                    content,
+                                                    content_blocks,
+                                                )
+                                            )
+
+                                        if ENABLE_REALTIME_CHAT_SAVE:
+                                            # Save message in the database
+                                            Chats.upsert_message_to_chat_by_id_and_message_id(
+                                                metadata["chat_id"],
+                                                metadata["message_id"],
+                                                {
+                                                    "content": serialize_content_blocks(
+                                                        content_blocks
+                                                    ),
+                                                },
+                                            )
+                                        else:
+                                            data = {
                                                 "content": serialize_content_blocks(
                                                     content_blocks
                                                 ),
-                                            },
-                                        )
-                                    else:
-                                        data = {
-                                            "content": serialize_content_blocks(
-                                                content_blocks
-                                            ),
-                                        }
+                                            }
 
-                            await event_emitter(
-                                {
-                                    "type": "chat:completion",
-                                    "data": data,
-                                }
-                            )
+                                await event_emitter(
+                                    {
+                                        "type": "chat:completion",
+                                        "data": data,
+                                    }
+                                )
                         except Exception as e:
                             done = "data: [DONE]" in line
                             if done:
@@ -1855,7 +1914,10 @@ async def process_chat_response(
                             }
                         )
 
-                        print(content_blocks, serialize_content_blocks(content_blocks))
+                        log.info(f"content_blocks={content_blocks}")
+                        log.info(
+                            f"serialize_content_blocks={serialize_content_blocks(content_blocks)}"
+                        )
 
                         try:
                             res = await generate_chat_completion(
@@ -1926,7 +1988,7 @@ async def process_chat_response(
 
                 await background_tasks_handler()
             except asyncio.CancelledError:
-                print("Task was cancelled!")
+                log.warning("Task was cancelled!")
                 await event_emitter({"type": "task-cancelled"})
 
                 if not ENABLE_REALTIME_CHAT_SAVE:
@@ -1947,17 +2009,34 @@ async def process_chat_response(
         return {"status": True, "task_id": task_id}
 
     else:
-
         # Fallback to the original response
         async def stream_wrapper(original_generator, events):
             def wrap_item(item):
                 return f"data: {item}\n\n"
 
             for event in events:
-                yield wrap_item(json.dumps(event))
+                event, _ = await process_filter_functions(
+                    request=request,
+                    filter_ids=filter_ids,
+                    filter_type="stream",
+                    form_data=event,
+                    extra_params=extra_params,
+                )
+
+                if event:
+                    yield wrap_item(json.dumps(event))
 
             async for data in original_generator:
-                yield data
+                data, _ = await process_filter_functions(
+                    request=request,
+                    filter_ids=filter_ids,
+                    filter_type="stream",
+                    form_data=data,
+                    extra_params=extra_params,
+                )
+
+                if data:
+                    yield data
 
         return StreamingResponse(
             stream_wrapper(response.body_iterator, events),

+ 6 - 1
backend/open_webui/utils/misc.py

@@ -2,12 +2,17 @@ import hashlib
 import re
 import time
 import uuid
+import logging
 from datetime import timedelta
 from pathlib import Path
 from typing import Callable, Optional
 
 
 import collections.abc
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MAIN"])
 
 
 def deep_update(d, u):
@@ -412,7 +417,7 @@ def parse_ollama_modelfile(model_text):
                 elif param_type is bool:
                     value = value.lower() == "true"
             except Exception as e:
-                print(e)
+                log.exception(f"Failed to parse parameter {param}: {e}")
                 continue
 
             data["params"][param] = value

+ 6 - 5
backend/open_webui/utils/models.py

@@ -22,6 +22,7 @@ from open_webui.config import (
 )
 
 from open_webui.env import SRC_LOG_LEVELS, GLOBAL_LOG_LEVEL
+from open_webui.models.users import UserModel
 
 
 logging.basicConfig(stream=sys.stdout, level=GLOBAL_LOG_LEVEL)
@@ -29,17 +30,17 @@ log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MAIN"])
 
 
-async def get_all_base_models(request: Request):
+async def get_all_base_models(request: Request, user: UserModel = None):
     function_models = []
     openai_models = []
     ollama_models = []
 
     if request.app.state.config.ENABLE_OPENAI_API:
-        openai_models = await openai.get_all_models(request)
+        openai_models = await openai.get_all_models(request, user=user)
         openai_models = openai_models["data"]
 
     if request.app.state.config.ENABLE_OLLAMA_API:
-        ollama_models = await ollama.get_all_models(request)
+        ollama_models = await ollama.get_all_models(request, user=user)
         ollama_models = [
             {
                 "id": model["model"],
@@ -58,8 +59,8 @@ async def get_all_base_models(request: Request):
     return models
 
 
-async def get_all_models(request):
-    models = await get_all_base_models(request)
+async def get_all_models(request, user: UserModel = None):
+    models = await get_all_base_models(request, user=user)
 
     # If there are no models, return an empty list
     if len(models) == 0:

+ 1 - 10
backend/open_webui/utils/oauth.py

@@ -146,7 +146,7 @@ class OAuthManager:
             nested_claims = oauth_claim.split(".")
             for nested_claim in nested_claims:
                 claim_data = claim_data.get(nested_claim, {})
-            user_oauth_groups = claim_data if isinstance(claim_data, list) else None
+            user_oauth_groups = claim_data if isinstance(claim_data, list) else []
 
         user_current_groups: list[GroupModel] = Groups.get_groups_by_member_id(user.id)
         all_available_groups: list[GroupModel] = Groups.get_groups()
@@ -315,15 +315,6 @@ class OAuthManager:
         if not user:
             user_count = Users.get_num_users()
 
-            if (
-                request.app.state.USER_COUNT
-                and user_count >= request.app.state.USER_COUNT
-            ):
-                raise HTTPException(
-                    403,
-                    detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
-                )
-
             # If the user does not exist, check if signups are enabled
             if auth_manager_config.ENABLE_OAUTH_SIGNUP:
                 # Check if an existing user with the same email already exists

+ 7 - 1
backend/open_webui/utils/payload.py

@@ -124,7 +124,7 @@ def convert_messages_openai_to_ollama(messages: list[dict]) -> list[dict]:
         tool_call_id = message.get("tool_call_id", None)
 
         # Check if the content is a string (just a simple message)
-        if isinstance(content, str):
+        if isinstance(content, str) and not tool_calls:
             # If the content is a string, it's pure text
             new_message["content"] = content
 
@@ -230,6 +230,12 @@ def convert_payload_openai_to_ollama(openai_payload: dict) -> dict:
                 "system"
             ]  # To prevent Ollama warning of invalid option provided
 
+    # If there is the "stop" parameter in the openai_payload, remap it to the ollama_payload.options
+    if "stop" in openai_payload:
+        ollama_options = ollama_payload.get("options", {})
+        ollama_options["stop"] = openai_payload.get("stop")
+        ollama_payload["options"] = ollama_options
+
     if "metadata" in openai_payload:
         ollama_payload["metadata"] = openai_payload["metadata"]
 

+ 1 - 1
backend/open_webui/utils/plugin.py

@@ -45,7 +45,7 @@ def extract_frontmatter(content):
                     frontmatter[key.strip()] = value.strip()
 
     except Exception as e:
-        print(f"An error occurred: {e}")
+        log.exception(f"Failed to extract frontmatter: {e}")
         return {}
 
     return frontmatter

+ 2 - 2
backend/open_webui/utils/response.py

@@ -104,7 +104,7 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response)
         data = json.loads(data)
 
         model = data.get("model", "ollama")
-        message_content = data.get("message", {}).get("content", "")
+        message_content = data.get("message", {}).get("content", None)
         tool_calls = data.get("message", {}).get("tool_calls", None)
         openai_tool_calls = None
 
@@ -118,7 +118,7 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response)
             usage = convert_ollama_usage_to_openai(data)
 
         data = openai_chat_chunk_message_template(
-            model, message_content if not done else None, openai_tool_calls, usage
+            model, message_content, openai_tool_calls, usage
         )
 
         line = f"data: {json.dumps(data)}\n\n"

+ 4 - 0
backend/requirements.txt

@@ -31,6 +31,9 @@ APScheduler==3.10.4
 
 RestrictedPython==8.0
 
+loguru==0.7.2
+asgiref==3.8.1
+
 # AI libraries
 openai
 anthropic
@@ -71,6 +74,7 @@ validators==0.34.0
 psutil
 sentencepiece
 soundfile==0.13.1
+azure-ai-documentintelligence==1.0.0
 
 opencv-python-headless==4.11.0.86
 rapidocr-onnxruntime==1.3.24

+ 2 - 2
package-lock.json

@@ -1,12 +1,12 @@
 {
 	"name": "open-webui",
-	"version": "0.5.16",
+	"version": "0.5.17",
 	"lockfileVersion": 3,
 	"requires": true,
 	"packages": {
 		"": {
 			"name": "open-webui",
-			"version": "0.5.16",
+			"version": "0.5.17",
 			"dependencies": {
 				"@codemirror/lang-javascript": "^6.2.2",
 				"@codemirror/lang-python": "^6.1.6",

+ 1 - 1
package.json

@@ -1,6 +1,6 @@
 {
 	"name": "open-webui",
-	"version": "0.5.16",
+	"version": "0.5.17",
 	"private": true,
 	"scripts": {
 		"dev": "npm run pyodide:fetch && vite dev --host",

+ 4 - 0
pyproject.toml

@@ -40,6 +40,9 @@ dependencies = [
 
     "RestrictedPython==8.0",
 
+    "loguru==0.7.2",
+    "asgiref==3.8.1",
+
     "openai",
     "anthropic",
     "google-generativeai==0.7.2",
@@ -78,6 +81,7 @@ dependencies = [
     "psutil",
     "sentencepiece",
     "soundfile==0.13.1",
+    "azure-ai-documentintelligence==1.0.0",
 
     "opencv-python-headless==4.11.0.86",
     "rapidocr-onnxruntime==1.3.24",

+ 7 - 0
src/lib/apis/retrieval/index.ts

@@ -32,9 +32,15 @@ type ChunkConfigForm = {
 	chunk_overlap: number;
 };
 
+type DocumentIntelligenceConfigForm = {
+	key: string;
+	endpoint: string;
+};
+
 type ContentExtractConfigForm = {
 	engine: string;
 	tika_server_url: string | null;
+	document_intelligence_config: DocumentIntelligenceConfigForm | null;
 };
 
 type YoutubeConfigForm = {
@@ -46,6 +52,7 @@ type YoutubeConfigForm = {
 type RAGConfigForm = {
 	pdf_extract_images?: boolean;
 	enable_google_drive_integration?: boolean;
+	enable_onedrive_integration?: boolean;
 	chunk?: ChunkConfigForm;
 	content_extraction?: ContentExtractConfigForm;
 	web_loader_ssl_verification?: boolean;

+ 7 - 6
src/lib/components/admin/Functions/FunctionEditor.svelte

@@ -1,8 +1,7 @@
 <script>
-	import { getContext, createEventDispatcher, onMount, tick } from 'svelte';
+	import { getContext, onMount, tick } from 'svelte';
 	import { goto } from '$app/navigation';
 
-	const dispatch = createEventDispatcher();
 	const i18n = getContext('i18n');
 
 	import CodeEditor from '$lib/components/common/CodeEditor.svelte';
@@ -15,6 +14,8 @@
 	let loading = false;
 	let showConfirm = false;
 
+	export let onSave = () => {};
+
 	export let edit = false;
 	export let clone = false;
 
@@ -256,7 +257,7 @@ class Pipe:
 
 	const saveHandler = async () => {
 		loading = true;
-		dispatch('save', {
+		onSave({
 			id,
 			name,
 			meta,
@@ -371,10 +372,10 @@ class Pipe:
 						value={content}
 						lang="python"
 						{boilerplate}
-						on:change={(e) => {
-							_content = e.detail.value;
+						onChange={(e) => {
+							_content = e;
 						}}
-						on:save={async () => {
+						onSave={async () => {
 							if (formElement) {
 								formElement.requestSubmit();
 							}

文件差异内容过多而无法显示
+ 461 - 430
src/lib/components/admin/Settings/Documents.svelte


+ 47 - 41
src/lib/components/admin/Settings/Evaluations.svelte

@@ -103,10 +103,12 @@
 	<div class="overflow-y-scroll scrollbar-hidden h-full">
 		{#if evaluationConfig !== null}
 			<div class="">
-				<div class="text-sm font-medium mb-2">{$i18n.t('General Settings')}</div>
+				<div class="mb-3">
+					<div class=" mb-2.5 text-base font-medium">{$i18n.t('General')}</div>
 
-				<div class=" mb-2">
-					<div class="flex justify-between items-center text-xs">
+					<hr class=" border-gray-100 dark:border-gray-850 my-2" />
+
+					<div class="mb-2.5 flex w-full justify-between">
 						<div class=" text-xs font-medium">{$i18n.t('Arena Models')}</div>
 
 						<Tooltip content={$i18n.t(`Message rating should be enabled to use this feature`)}>
@@ -116,46 +118,50 @@
 				</div>
 
 				{#if evaluationConfig.ENABLE_EVALUATION_ARENA_MODELS}
-					<hr class=" border-gray-50 dark:border-gray-700/10 my-2" />
-
-					<div class="flex justify-between items-center mb-2">
-						<div class="text-sm font-medium">{$i18n.t('Manage Arena Models')}</div>
-
-						<div>
-							<Tooltip content={$i18n.t('Add Arena Model')}>
-								<button
-									class="p-1"
-									type="button"
-									on:click={() => {
-										showAddModel = true;
-									}}
-								>
-									<Plus />
-								</button>
-							</Tooltip>
-						</div>
-					</div>
+					<div class="mb-3">
+						<div class=" mb-2.5 text-base font-medium flex justify-between items-center">
+							<div>
+								{$i18n.t('Manage')}
+							</div>
 
-					<div class="flex flex-col gap-2">
-						{#if (evaluationConfig?.EVALUATION_ARENA_MODELS ?? []).length > 0}
-							{#each evaluationConfig.EVALUATION_ARENA_MODELS as model, index}
-								<Model
-									{model}
-									on:edit={(e) => {
-										editModelHandler(e.detail, index);
-									}}
-									on:delete={(e) => {
-										deleteModelHandler(index);
-									}}
-								/>
-							{/each}
-						{:else}
-							<div class=" text-center text-xs text-gray-500">
-								{$i18n.t(
-									`Using the default arena model with all models. Click the plus button to add custom models.`
-								)}
+							<div>
+								<Tooltip content={$i18n.t('Add Arena Model')}>
+									<button
+										class="p-1"
+										type="button"
+										on:click={() => {
+											showAddModel = true;
+										}}
+									>
+										<Plus />
+									</button>
+								</Tooltip>
 							</div>
-						{/if}
+						</div>
+
+						<hr class=" border-gray-100 dark:border-gray-850 my-2" />
+
+						<div class="flex flex-col gap-2">
+							{#if (evaluationConfig?.EVALUATION_ARENA_MODELS ?? []).length > 0}
+								{#each evaluationConfig.EVALUATION_ARENA_MODELS as model, index}
+									<Model
+										{model}
+										on:edit={(e) => {
+											editModelHandler(e.detail, index);
+										}}
+										on:delete={(e) => {
+											deleteModelHandler(index);
+										}}
+									/>
+								{/each}
+							{:else}
+								<div class=" text-center text-xs text-gray-500">
+									{$i18n.t(
+										`Using the default arena model with all models. Click the plus button to add custom models.`
+									)}
+								</div>
+							{/if}
+						</div>
 					</div>
 				{/if}
 			</div>

+ 46 - 2
src/lib/components/admin/Settings/Models/ConfigureModelsModal.svelte

@@ -16,6 +16,8 @@
 	import Spinner from '$lib/components/common/Spinner.svelte';
 	import Minus from '$lib/components/icons/Minus.svelte';
 	import Plus from '$lib/components/icons/Plus.svelte';
+	import ChevronUp from '$lib/components/icons/ChevronUp.svelte';
+	import ChevronDown from '$lib/components/icons/ChevronDown.svelte';
 
 	export let show = false;
 	export let initHandler = () => {};
@@ -26,6 +28,9 @@
 	let defaultModelIds = [];
 	let modelIds = [];
 
+	let sortKey = '';
+	let sortOrder = '';
+
 	let loading = false;
 	let showResetModal = false;
 
@@ -71,6 +76,9 @@
 			// Add remaining IDs not in MODEL_ORDER_LIST, sorted alphabetically
 			...allModelIds.filter((id) => !orderedSet.has(id)).sort((a, b) => a.localeCompare(b))
 		];
+
+		sortKey = '';
+		sortOrder = '';
 	};
 	const submitHandler = async () => {
 		loading = true;
@@ -145,9 +153,45 @@
 					>
 						<div>
 							<div class="flex flex-col w-full">
-								<div class="mb-1 flex justify-between">
+								<button
+									class="mb-1 flex gap-2"
+									type="button"
+									on:click={() => {
+										sortKey = 'model';
+
+										if (sortOrder === 'asc') {
+											sortOrder = 'desc';
+										} else {
+											sortOrder = 'asc';
+										}
+
+										modelIds = modelIds
+											.filter((id) => id !== '')
+											.sort((a, b) => {
+												const nameA = $models.find((model) => model.id === a)?.name || a;
+												const nameB = $models.find((model) => model.id === b)?.name || b;
+												return sortOrder === 'desc'
+													? nameA.localeCompare(nameB)
+													: nameB.localeCompare(nameA);
+											});
+									}}
+								>
 									<div class="text-xs text-gray-500">{$i18n.t('Reorder Models')}</div>
-								</div>
+
+									{#if sortKey === 'model'}
+										<span class="font-normal self-center">
+											{#if sortOrder === 'asc'}
+												<ChevronUp className="size-3" />
+											{:else}
+												<ChevronDown className="size-3" />
+											{/if}
+										</span>
+									{:else}
+										<span class="invisible">
+											<ChevronUp className="size-3" />
+										</span>
+									{/if}
+								</button>
 
 								<ModelList bind:modelIds />
 							</div>

+ 18 - 8
src/lib/components/admin/Settings/Models/ModelList.svelte

@@ -21,14 +21,24 @@
 		modelIds = modelList;
 	};
 
-	onMount(() => {
-		sortable = Sortable.create(modelListElement, {
-			animation: 150,
-			onUpdate: async (event) => {
-				positionChangeHandler();
-			}
-		});
-	});
+	$: if (modelIds) {
+		init();
+	}
+
+	const init = () => {
+		if (sortable) {
+			sortable.destroy();
+		}
+
+		if (modelListElement) {
+			sortable = Sortable.create(modelListElement, {
+				animation: 150,
+				onUpdate: async (event) => {
+					positionChangeHandler();
+				}
+			});
+		}
+	};
 </script>
 
 {#if modelIds.length > 0}

+ 321 - 289
src/lib/components/admin/Settings/WebSearch.svelte

@@ -85,384 +85,416 @@
 >
 	<div class=" space-y-3 overflow-y-scroll scrollbar-hidden h-full">
 		{#if webConfig}
-			<div>
-				<div class=" mb-1 text-sm font-medium">
-					{$i18n.t('Web Search')}
-				</div>
+			<div class="">
+				<div class="mb-3">
+					<div class=" mb-2.5 text-base font-medium">{$i18n.t('General')}</div>
+
+					<hr class=" border-gray-100 dark:border-gray-850 my-2" />
 
-				<div>
-					<div class=" py-0.5 flex w-full justify-between">
+					<div class="  mb-2.5 flex w-full justify-between">
 						<div class=" self-center text-xs font-medium">
-							{$i18n.t('Enable Web Search')}
+							{$i18n.t('Web Search')}
+						</div>
+						<div class="flex items-center relative">
+							<Switch bind:state={webConfig.search.enabled} />
 						</div>
-
-						<Switch bind:state={webConfig.search.enabled} />
-					</div>
-				</div>
-
-				<div class=" py-0.5 flex w-full justify-between">
-					<div class=" self-center text-xs font-medium">{$i18n.t('Web Search Engine')}</div>
-					<div class="flex items-center relative">
-						<select
-							class="dark:bg-gray-900 w-fit pr-8 rounded-sm px-2 p-1 text-xs bg-transparent outline-hidden text-right"
-							bind:value={webConfig.search.engine}
-							placeholder={$i18n.t('Select a engine')}
-							required
-						>
-							<option disabled selected value="">{$i18n.t('Select a engine')}</option>
-							{#each webSearchEngines as engine}
-								<option value={engine}>{engine}</option>
-							{/each}
-						</select>
 					</div>
-				</div>
 
-				<div class=" py-0.5 flex w-full justify-between">
-					<div class=" self-center text-xs font-medium">{$i18n.t('Full Context Mode')}</div>
-					<div class="flex items-center relative">
-						<Tooltip
-							content={webConfig.RAG_WEB_SEARCH_FULL_CONTEXT
-								? 'Inject the entire web results as context for comprehensive processing, this is recommended for complex queries.'
-								: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
-						>
-							<Switch bind:state={webConfig.RAG_WEB_SEARCH_FULL_CONTEXT} />
-						</Tooltip>
+					<div class="  mb-2.5 flex w-full justify-between">
+						<div class=" self-center text-xs font-medium">
+							{$i18n.t('Web Search Engine')}
+						</div>
+						<div class="flex items-center relative">
+							<select
+								class="dark:bg-gray-900 w-fit pr-8 rounded-sm px-2 p-1 text-xs bg-transparent outline-hidden text-right"
+								bind:value={webConfig.search.engine}
+								placeholder={$i18n.t('Select a engine')}
+								required
+							>
+								<option disabled selected value="">{$i18n.t('Select a engine')}</option>
+								{#each webSearchEngines as engine}
+									<option value={engine}>{engine}</option>
+								{/each}
+							</select>
+						</div>
 					</div>
-				</div>
 
-				{#if webConfig.search.engine !== ''}
-					<div class="mt-1.5">
+					{#if webConfig.search.engine !== ''}
 						{#if webConfig.search.engine === 'searxng'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Searxng Query URL')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Searxng Query URL')}
+									</div>
 
-								<div class="flex w-full">
-									<div class="flex-1">
-										<input
-											class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
-											type="text"
-											placeholder={$i18n.t('Enter Searxng Query URL')}
-											bind:value={webConfig.search.searxng_query_url}
-											autocomplete="off"
-										/>
+									<div class="flex w-full">
+										<div class="flex-1">
+											<input
+												class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+												type="text"
+												placeholder={$i18n.t('Enter Searxng Query URL')}
+												bind:value={webConfig.search.searxng_query_url}
+												autocomplete="off"
+											/>
+										</div>
 									</div>
 								</div>
 							</div>
 						{:else if webConfig.search.engine === 'google_pse'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Google PSE API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Google PSE API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Google PSE API Key')}
-									bind:value={webConfig.search.google_pse_api_key}
-								/>
-							</div>
-							<div class="mt-1.5">
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Google PSE Engine Id')}
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Google PSE API Key')}
+										bind:value={webConfig.search.google_pse_api_key}
+									/>
 								</div>
+								<div class="mt-1.5">
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Google PSE Engine Id')}
+									</div>
 
-								<div class="flex w-full">
-									<div class="flex-1">
-										<input
-											class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
-											type="text"
-											placeholder={$i18n.t('Enter Google PSE Engine Id')}
-											bind:value={webConfig.search.google_pse_engine_id}
-											autocomplete="off"
-										/>
+									<div class="flex w-full">
+										<div class="flex-1">
+											<input
+												class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+												type="text"
+												placeholder={$i18n.t('Enter Google PSE Engine Id')}
+												bind:value={webConfig.search.google_pse_engine_id}
+												autocomplete="off"
+											/>
+										</div>
 									</div>
 								</div>
 							</div>
 						{:else if webConfig.search.engine === 'brave'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Brave Search API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Brave Search API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Brave Search API Key')}
-									bind:value={webConfig.search.brave_search_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Brave Search API Key')}
+										bind:value={webConfig.search.brave_search_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'kagi'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Kagi Search API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Kagi Search API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Kagi Search API Key')}
-									bind:value={webConfig.search.kagi_search_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Kagi Search API Key')}
+										bind:value={webConfig.search.kagi_search_api_key}
+									/>
+								</div>
+								.
 							</div>
 						{:else if webConfig.search.engine === 'mojeek'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Mojeek Search API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Mojeek Search API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Mojeek Search API Key')}
-									bind:value={webConfig.search.mojeek_search_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Mojeek Search API Key')}
+										bind:value={webConfig.search.mojeek_search_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'bocha'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Bocha Search API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Bocha Search API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Bocha Search API Key')}
-									bind:value={webConfig.search.bocha_search_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Bocha Search API Key')}
+										bind:value={webConfig.search.bocha_search_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'serpstack'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Serpstack API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Serpstack API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Serpstack API Key')}
-									bind:value={webConfig.search.serpstack_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Serpstack API Key')}
+										bind:value={webConfig.search.serpstack_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'serper'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Serper API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Serper API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Serper API Key')}
-									bind:value={webConfig.search.serper_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Serper API Key')}
+										bind:value={webConfig.search.serper_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'serply'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Serply API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Serply API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Serply API Key')}
-									bind:value={webConfig.search.serply_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Serply API Key')}
+										bind:value={webConfig.search.serply_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'searchapi'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('SearchApi API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('SearchApi API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter SearchApi API Key')}
-									bind:value={webConfig.search.searchapi_api_key}
-								/>
-							</div>
-							<div class="mt-1.5">
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('SearchApi Engine')}
+									<SensitiveInput
+										placeholder={$i18n.t('Enter SearchApi API Key')}
+										bind:value={webConfig.search.searchapi_api_key}
+									/>
 								</div>
+								<div class="mt-1.5">
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('SearchApi Engine')}
+									</div>
 
-								<div class="flex w-full">
-									<div class="flex-1">
-										<input
-											class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
-											type="text"
-											placeholder={$i18n.t('Enter SearchApi Engine')}
-											bind:value={webConfig.search.searchapi_engine}
-											autocomplete="off"
-										/>
+									<div class="flex w-full">
+										<div class="flex-1">
+											<input
+												class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+												type="text"
+												placeholder={$i18n.t('Enter SearchApi Engine')}
+												bind:value={webConfig.search.searchapi_engine}
+												autocomplete="off"
+											/>
+										</div>
 									</div>
 								</div>
 							</div>
 						{:else if webConfig.search.engine === 'serpapi'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('SerpApi API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('SerpApi API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter SerpApi API Key')}
-									bind:value={webConfig.search.serpapi_api_key}
-								/>
-							</div>
-							<div class="mt-1.5">
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('SerpApi Engine')}
+									<SensitiveInput
+										placeholder={$i18n.t('Enter SerpApi API Key')}
+										bind:value={webConfig.search.serpapi_api_key}
+									/>
 								</div>
+								<div class="mt-1.5">
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('SerpApi Engine')}
+									</div>
 
-								<div class="flex w-full">
-									<div class="flex-1">
-										<input
-											class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
-											type="text"
-											placeholder={$i18n.t('Enter SerpApi Engine')}
-											bind:value={webConfig.search.serpapi_engine}
-											autocomplete="off"
-										/>
+									<div class="flex w-full">
+										<div class="flex-1">
+											<input
+												class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+												type="text"
+												placeholder={$i18n.t('Enter SerpApi Engine')}
+												bind:value={webConfig.search.serpapi_engine}
+												autocomplete="off"
+											/>
+										</div>
 									</div>
 								</div>
 							</div>
 						{:else if webConfig.search.engine === 'tavily'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Tavily API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Tavily API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Tavily API Key')}
-									bind:value={webConfig.search.tavily_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Tavily API Key')}
+										bind:value={webConfig.search.tavily_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'jina'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Jina API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Jina API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Jina API Key')}
-									bind:value={webConfig.search.jina_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Jina API Key')}
+										bind:value={webConfig.search.jina_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'exa'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Exa API Key')}
-								</div>
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Exa API Key')}
+									</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Exa API Key')}
-									bind:value={webConfig.search.exa_api_key}
-								/>
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Exa API Key')}
+										bind:value={webConfig.search.exa_api_key}
+									/>
+								</div>
 							</div>
 						{:else if webConfig.search.engine === 'bing'}
-							<div>
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Bing Search V7 Endpoint')}
+							<div class="mb-2.5 flex w-full flex-col">
+								<div>
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Bing Search V7 Endpoint')}
+									</div>
+
+									<div class="flex w-full">
+										<div class="flex-1">
+											<input
+												class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+												type="text"
+												placeholder={$i18n.t('Enter Bing Search V7 Endpoint')}
+												bind:value={webConfig.search.bing_search_v7_endpoint}
+												autocomplete="off"
+											/>
+										</div>
+									</div>
 								</div>
 
-								<div class="flex w-full">
-									<div class="flex-1">
-										<input
-											class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
-											type="text"
-											placeholder={$i18n.t('Enter Bing Search V7 Endpoint')}
-											bind:value={webConfig.search.bing_search_v7_endpoint}
-											autocomplete="off"
-										/>
+								<div class="mt-2">
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Bing Search V7 Subscription Key')}
 									</div>
+
+									<SensitiveInput
+										placeholder={$i18n.t('Enter Bing Search V7 Subscription Key')}
+										bind:value={webConfig.search.bing_search_v7_subscription_key}
+									/>
 								</div>
 							</div>
+						{/if}
+					{/if}
+
+					{#if webConfig.search.enabled}
+						<div class="mb-2.5 flex w-full flex-col">
+							<div class="flex gap-2">
+								<div class="w-full">
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Search Result Count')}
+									</div>
 
-							<div class="mt-2">
-								<div class=" self-center text-xs font-medium mb-1">
-									{$i18n.t('Bing Search V7 Subscription Key')}
+									<input
+										class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+										placeholder={$i18n.t('Search Result Count')}
+										bind:value={webConfig.search.result_count}
+										required
+									/>
 								</div>
 
-								<SensitiveInput
-									placeholder={$i18n.t('Enter Bing Search V7 Subscription Key')}
-									bind:value={webConfig.search.bing_search_v7_subscription_key}
-								/>
-							</div>
-						{/if}
-					</div>
-				{/if}
+								<div class="w-full">
+									<div class=" self-center text-xs font-medium mb-1">
+										{$i18n.t('Concurrent Requests')}
+									</div>
 
-				{#if webConfig.search.enabled}
-					<div class="mt-2 flex gap-2 mb-1">
-						<div class="w-full">
-							<div class=" self-center text-xs font-medium mb-1">
-								{$i18n.t('Search Result Count')}
+									<input
+										class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+										placeholder={$i18n.t('Concurrent Requests')}
+										bind:value={webConfig.search.concurrent_requests}
+										required
+									/>
+								</div>
 							</div>
-
-							<input
-								class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
-								placeholder={$i18n.t('Search Result Count')}
-								bind:value={webConfig.search.result_count}
-								required
-							/>
 						</div>
 
-						<div class="w-full">
-							<div class=" self-center text-xs font-medium mb-1">
-								{$i18n.t('Concurrent Requests')}
+						<div class="mb-2.5 flex w-full flex-col">
+							<div class="  text-xs font-medium mb-1">
+								{$i18n.t('Domain Filter List')}
 							</div>
 
 							<input
 								class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
-								placeholder={$i18n.t('Concurrent Requests')}
-								bind:value={webConfig.search.concurrent_requests}
-								required
+								placeholder={$i18n.t(
+									'Enter domains separated by commas (e.g., example.com,site.org)'
+								)}
+								bind:value={webConfig.search.domain_filter_list}
 							/>
 						</div>
-					</div>
+					{/if}
 
-					<div class="mt-2">
-						<div class=" self-center text-xs font-medium mb-1">
-							{$i18n.t('Domain Filter List')}
+					<div class="  mb-2.5 flex w-full justify-between">
+						<div class=" self-center text-xs font-medium">
+							<Tooltip content={$i18n.t('Full Context Mode')} placement="top-start">
+								{$i18n.t('Bypass Embedding and Retrieval')}
+							</Tooltip>
+						</div>
+						<div class="flex items-center relative">
+							<Tooltip
+								content={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL
+									? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
+									: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
+							>
+								<Switch bind:state={webConfig.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL} />
+							</Tooltip>
 						</div>
+					</div>
 
-						<input
-							class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
-							placeholder={$i18n.t(
-								'Enter domains separated by commas (e.g., example.com,site.org)'
-							)}
-							bind:value={webConfig.search.domain_filter_list}
-						/>
+					<div class="  mb-2.5 flex w-full justify-between">
+						<div class=" self-center text-xs font-medium">
+							{$i18n.t('Trust Proxy Environment')}
+						</div>
+						<div class="flex items-center relative">
+							<Tooltip
+								content={webConfig.search.trust_env
+									? 'Use proxy designated by http_proxy and https_proxy environment variables to fetch page contents'
+									: 'Use no proxy to fetch page contents.'}
+							>
+								<Switch bind:state={webConfig.search.trust_env} />
+							</Tooltip>
+						</div>
 					</div>
-				{/if}
-			</div>
+				</div>
 
-			<hr class="border-gray-100 dark:border-gray-850 my-2" />
+				<div class="mb-3">
+					<div class=" mb-2.5 text-base font-medium">{$i18n.t('Loader')}</div>
 
-			<div>
-				<div class=" mb-1 text-sm font-medium">
-					{$i18n.t('Web Loader Settings')}
-				</div>
+					<hr class=" border-gray-100 dark:border-gray-850 my-2" />
 
-				<div>
-					<div class=" py-0.5 flex w-full justify-between">
+					<div class="  mb-2.5 flex w-full justify-between">
 						<div class=" self-center text-xs font-medium">
 							{$i18n.t('Bypass SSL verification for Websites')}
 						</div>
-
-						<button
-							class="p-1 px-3 text-xs flex rounded-sm transition"
-							on:click={() => {
-								webConfig.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION =
-									!webConfig.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION;
-								submitHandler();
-							}}
-							type="button"
-						>
-							{#if webConfig.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION === false}
-								<span class="ml-2 self-center">{$i18n.t('On')}</span>
-							{:else}
-								<span class="ml-2 self-center">{$i18n.t('Off')}</span>
-							{/if}
-						</button>
+						<div class="flex items-center relative">
+							<Switch bind:state={webConfig.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION} />
+						</div>
 					</div>
-				</div>
 
-				<div class=" mt-2 mb-1 text-sm font-medium">
-					{$i18n.t('Youtube Loader Settings')}
-				</div>
-
-				<div>
-					<div class=" py-0.5 flex w-full justify-between">
-						<div class=" w-20 text-xs font-medium self-center">{$i18n.t('Language')}</div>
-						<div class=" flex-1 self-center">
+					<div class="  mb-2.5 flex w-full justify-between">
+						<div class=" self-center text-xs font-medium">
+							{$i18n.t('Youtube Language')}
+						</div>
+						<div class="flex items-center relative">
 							<input
-								class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+								class="flex-1 w-full rounded-lg text-sm bg-transparent outline-hidden"
 								type="text"
 								placeholder={$i18n.t('Enter language codes')}
 								bind:value={youtubeLanguage}
@@ -470,14 +502,14 @@
 							/>
 						</div>
 					</div>
-				</div>
 
-				<div>
-					<div class=" py-0.5 flex w-full justify-between">
-						<div class=" w-20 text-xs font-medium self-center">{$i18n.t('Proxy URL')}</div>
-						<div class=" flex-1 self-center">
+					<div class="  mb-2.5 flex flex-col w-full justify-between">
+						<div class=" mb-1 text-xs font-medium">
+							{$i18n.t('Youtube Proxy URL')}
+						</div>
+						<div class="flex items-center relative">
 							<input
-								class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-hidden"
+								class="flex-1 w-full rounded-lg text-sm bg-transparent outline-hidden"
 								type="text"
 								placeholder={$i18n.t('Enter proxy URL (e.g. https://user:password@host:port)')}
 								bind:value={youtubeProxyUrl}

+ 0 - 16
src/lib/components/channel/MessageInput.svelte

@@ -157,22 +157,6 @@
 		}
 
 		files = [...files, fileItem];
-		// Check if the file is an audio file and transcribe/convert it to text file
-		if (['audio/mpeg', 'audio/wav', 'audio/ogg', 'audio/x-m4a'].includes(file['type'])) {
-			const res = await transcribeAudio(localStorage.token, file).catch((error) => {
-				toast.error(`${error}`);
-				return null;
-			});
-
-			if (res) {
-				console.log(res);
-				const blob = new Blob([res.text], { type: 'text/plain' });
-				file = blobToFile(blob, `${file.name}.txt`);
-
-				fileItem.name = file.name;
-				fileItem.size = file.size;
-			}
-		}
 
 		try {
 			// During the file upload, file content is automatically extracted.

+ 2 - 0
src/lib/components/chat/Artifacts.svelte

@@ -123,6 +123,8 @@
 		if (contents.length === 0) {
 			showControls.set(false);
 			showArtifacts.set(false);
+
+			toast.error($i18n.t('No HTML, CSS, or JavaScript content found.'));
 		}
 
 		selectedContentIdx = contents ? contents.length - 1 : 0;

+ 21 - 4
src/lib/components/chat/Chat.svelte

@@ -187,15 +187,20 @@
 		setToolIds();
 	}
 
+	$: if (atSelectedModel || selectedModels) {
+		setToolIds();
+	}
+
 	const setToolIds = async () => {
 		if (!$tools) {
 			tools.set(await getTools(localStorage.token));
 		}
 
-		if (selectedModels.length !== 1) {
+		if (selectedModels.length !== 1 && !atSelectedModel) {
 			return;
 		}
-		const model = $models.find((m) => m.id === selectedModels[0]);
+
+		const model = atSelectedModel ?? $models.find((m) => m.id === selectedModels[0]);
 		if (model) {
 			selectedToolIds = (model?.info?.meta?.toolIds ?? []).filter((id) =>
 				$tools.find((t) => t.id === id)
@@ -836,6 +841,7 @@
 				content: m.content,
 				info: m.info ? m.info : undefined,
 				timestamp: m.timestamp,
+				...(m.usage ? { usage: m.usage } : {}),
 				...(m.sources ? { sources: m.sources } : {})
 			})),
 			model_item: $models.find((m) => m.id === modelId),
@@ -1273,7 +1279,9 @@
 		const chatInputElement = document.getElementById('chat-input');
 
 		if (chatInputElement) {
+			await tick();
 			chatInputElement.style.height = '';
+			chatInputElement.style.height = Math.min(chatInputElement.scrollHeight, 320) + 'px';
 		}
 
 		const _files = JSON.parse(JSON.stringify(files));
@@ -1488,7 +1496,10 @@
 							params?.system ?? $settings?.system ?? '',
 							$user.name,
 							$settings?.userLocation
-								? await getAndUpdateUserLocation(localStorage.token)
+								? await getAndUpdateUserLocation(localStorage.token).catch((err) => {
+										console.error(err);
+										return undefined;
+									})
 								: undefined
 						)}${
 							(responseMessage?.userContext ?? null)
@@ -1573,7 +1584,12 @@
 				variables: {
 					...getPromptVariables(
 						$user.name,
-						$settings?.userLocation ? await getAndUpdateUserLocation(localStorage.token) : undefined
+						$settings?.userLocation
+							? await getAndUpdateUserLocation(localStorage.token).catch((err) => {
+									console.error(err);
+									return undefined;
+								})
+							: undefined
 					)
 				},
 				model_item: $models.find((m) => m.id === model.id),
@@ -1965,6 +1981,7 @@
 									bind:autoScroll
 									bind:prompt
 									{selectedModels}
+									{atSelectedModel}
 									{sendPrompt}
 									{showMessage}
 									{submitMessage}

+ 3 - 1
src/lib/components/chat/ChatPlaceholder.svelte

@@ -16,6 +16,7 @@
 
 	export let modelIds = [];
 	export let models = [];
+	export let atSelectedModel;
 
 	export let submitPrompt;
 
@@ -126,7 +127,8 @@
 		<div class=" w-full font-primary" in:fade={{ duration: 200, delay: 300 }}>
 			<Suggestions
 				className="grid grid-cols-2"
-				suggestionPrompts={models[selectedModelIdx]?.info?.meta?.suggestion_prompts ??
+				suggestionPrompts={atSelectedModel?.info?.meta?.suggestion_prompts ??
+					models[selectedModelIdx]?.info?.meta?.suggestion_prompts ??
 					$config?.default_prompt_suggestions ??
 					[]}
 				on:select={(e) => {

+ 36 - 25
src/lib/components/chat/MessageInput.svelte

@@ -2,6 +2,7 @@
 	import { toast } from 'svelte-sonner';
 	import { v4 as uuidv4 } from 'uuid';
 	import { createPicker, getAuthToken } from '$lib/utils/google-drive-picker';
+	import { pickAndDownloadFile } from '$lib/utils/onedrive-file-picker';
 
 	import { onMount, tick, getContext, createEventDispatcher, onDestroy } from 'svelte';
 	const dispatch = createEventDispatcher();
@@ -173,22 +174,6 @@
 		}
 
 		files = [...files, fileItem];
-		// Check if the file is an audio file and transcribe/convert it to text file
-		if (['audio/mpeg', 'audio/wav', 'audio/ogg', 'audio/x-m4a'].includes(file['type'])) {
-			const res = await transcribeAudio(localStorage.token, file).catch((error) => {
-				toast.error(`${error}`);
-				return null;
-			});
-
-			if (res) {
-				console.log(res);
-				const blob = new Blob([res.text], { type: 'text/plain' });
-				file = blobToFile(blob, `${file.name}.txt`);
-
-				fileItem.name = file.name;
-				fileItem.size = file.size;
-			}
-		}
 
 		try {
 			// During the file upload, file content is automatically extracted.
@@ -827,7 +812,11 @@
 															}
 
 															// Submit the prompt when Enter key is pressed
-															if (prompt !== '' && e.keyCode === 13 && !e.shiftKey) {
+															if (
+																(prompt !== '' || files.length > 0) &&
+																e.keyCode === 13 &&
+																!e.shiftKey
+															) {
 																dispatch('submit', prompt);
 															}
 														}
@@ -906,7 +895,11 @@
 													}
 
 													// Submit the prompt when Enter key is pressed
-													if (prompt !== '' && e.key === 'Enter' && !e.shiftKey) {
+													if (
+														(prompt !== '' || files.length > 0) &&
+														e.key === 'Enter' &&
+														!e.shiftKey
+													) {
 														dispatch('submit', prompt);
 													}
 												}
@@ -1108,6 +1101,21 @@
 													);
 												}
 											}}
+											uploadOneDriveHandler={async () => {
+												try {
+													const fileData = await pickAndDownloadFile();
+													if (fileData) {
+														const file = new File([fileData.blob], fileData.name, {
+															type: fileData.blob.type || 'application/octet-stream'
+														});
+														await uploadFileHandler(file);
+													} else {
+														console.log('No file was selected from OneDrive');
+													}
+												} catch (error) {
+													console.error('OneDrive Error:', error);
+												}
+											}}
 											onClose={async () => {
 												await tick();
 
@@ -1285,14 +1293,17 @@
 
 																	stream = null;
 
-																	if (!$TTSWorker) {
-																		await TTSWorker.set(
-																			new KokoroWorker({
-																				dtype: $settings.audio?.tts?.engineConfig?.dtype ?? 'fp32'
-																			})
-																		);
+																	if ($settings.audio?.tts?.engine === 'browser-kokoro') {
+																		// If the user has not initialized the TTS worker, initialize it
+																		if (!$TTSWorker) {
+																			await TTSWorker.set(
+																				new KokoroWorker({
+																					dtype: $settings.audio?.tts?.engineConfig?.dtype ?? 'fp32'
+																				})
+																			);
 
-																		await $TTSWorker.init();
+																			await $TTSWorker.init();
+																		}
 																	}
 
 																	showCallOverlay.set(true);

+ 7 - 1
src/lib/components/chat/MessageInput/Commands/Prompts.svelte

@@ -74,7 +74,13 @@
 		}
 
 		if (command.content.includes('{{USER_LOCATION}}')) {
-			const location = await getUserPosition();
+			let location;
+			try {
+				location = await getUserPosition();
+			} catch (error) {
+				toast.error($i18n.t('Location access not allowed'));
+				location = 'LOCATION_UNKNOWN';
+			}
 			text = text.replaceAll('{{USER_LOCATION}}', String(location));
 		}
 

+ 93 - 0
src/lib/components/chat/MessageInput/InputMenu.svelte

@@ -5,6 +5,7 @@
 
 	import { config, user, tools as _tools, mobile } from '$lib/stores';
 	import { createPicker } from '$lib/utils/google-drive-picker';
+
 	import { getTools } from '$lib/apis/tools';
 
 	import Dropdown from '$lib/components/common/Dropdown.svelte';
@@ -24,6 +25,7 @@
 	export let inputFilesHandler: Function;
 
 	export let uploadGoogleDriveHandler: Function;
+	export let uploadOneDriveHandler: Function;
 
 	export let selectedToolIds: string[] = [];
 
@@ -225,6 +227,97 @@
 					<div class="line-clamp-1">{$i18n.t('Google Drive')}</div>
 				</DropdownMenu.Item>
 			{/if}
+
+			{#if $config?.features?.enable_onedrive_integration}
+				<DropdownMenu.Item
+					class="flex gap-2 items-center px-3 py-2 text-sm font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-xl"
+					on:click={() => {
+						uploadOneDriveHandler();
+					}}
+				>
+					<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 32 32" class="w-5 h-5" fill="none">
+						<mask
+							id="mask0_87_7796"
+							style="mask-type:alpha"
+							maskUnits="userSpaceOnUse"
+							x="0"
+							y="6"
+							width="32"
+							height="20"
+						>
+							<path
+								d="M7.82979 26C3.50549 26 0 22.5675 0 18.3333C0 14.1921 3.35322 10.8179 7.54613 10.6716C9.27535 7.87166 12.4144 6 16 6C20.6308 6 24.5169 9.12183 25.5829 13.3335C29.1316 13.3603 32 16.1855 32 19.6667C32 23.0527 29 26 25.8723 25.9914L7.82979 26Z"
+								fill="#C4C4C4"
+							/>
+						</mask>
+						<g mask="url(#mask0_87_7796)">
+							<path
+								d="M7.83017 26.0001C5.37824 26.0001 3.18957 24.8966 1.75391 23.1691L18.0429 16.3335L30.7089 23.4647C29.5926 24.9211 27.9066 26.0001 26.0004 25.9915C23.1254 26.0001 12.0629 26.0001 7.83017 26.0001Z"
+								fill="url(#paint0_linear_87_7796)"
+							/>
+							<path
+								d="M25.5785 13.3149L18.043 16.3334L30.709 23.4647C31.5199 22.4065 32.0004 21.0916 32.0004 19.6669C32.0004 16.1857 29.1321 13.3605 25.5833 13.3337C25.5817 13.3274 25.5801 13.3212 25.5785 13.3149Z"
+								fill="url(#paint1_linear_87_7796)"
+							/>
+							<path
+								d="M7.06445 10.7028L18.0423 16.3333L25.5779 13.3148C24.5051 9.11261 20.6237 6 15.9997 6C12.4141 6 9.27508 7.87166 7.54586 10.6716C7.3841 10.6773 7.22358 10.6877 7.06445 10.7028Z"
+								fill="url(#paint2_linear_87_7796)"
+							/>
+							<path
+								d="M1.7535 23.1687L18.0425 16.3331L7.06471 10.7026C3.09947 11.0792 0 14.3517 0 18.3331C0 20.1665 0.657197 21.8495 1.7535 23.1687Z"
+								fill="url(#paint3_linear_87_7796)"
+							/>
+						</g>
+						<defs>
+							<linearGradient
+								id="paint0_linear_87_7796"
+								x1="4.42591"
+								y1="24.6668"
+								x2="27.2309"
+								y2="23.2764"
+								gradientUnits="userSpaceOnUse"
+							>
+								<stop stop-color="#2086B8" />
+								<stop offset="1" stop-color="#46D3F6" />
+							</linearGradient>
+							<linearGradient
+								id="paint1_linear_87_7796"
+								x1="23.8302"
+								y1="19.6668"
+								x2="30.2108"
+								y2="15.2082"
+								gradientUnits="userSpaceOnUse"
+							>
+								<stop stop-color="#1694DB" />
+								<stop offset="1" stop-color="#62C3FE" />
+							</linearGradient>
+							<linearGradient
+								id="paint2_linear_87_7796"
+								x1="8.51037"
+								y1="7.33333"
+								x2="23.3335"
+								y2="15.9348"
+								gradientUnits="userSpaceOnUse"
+							>
+								<stop stop-color="#0D3D78" />
+								<stop offset="1" stop-color="#063B83" />
+							</linearGradient>
+							<linearGradient
+								id="paint3_linear_87_7796"
+								x1="-0.340429"
+								y1="19.9998"
+								x2="14.5634"
+								y2="14.4649"
+								gradientUnits="userSpaceOnUse"
+							>
+								<stop stop-color="#16589B" />
+								<stop offset="1" stop-color="#1464B7" />
+							</linearGradient>
+						</defs>
+					</svg>
+					<div class="line-clamp-1">{$i18n.t('OneDrive')}</div>
+				</DropdownMenu.Item>
+			{/if}
 		</DropdownMenu.Content>
 	</div>
 </Dropdown>

+ 2 - 0
src/lib/components/chat/Messages.svelte

@@ -32,6 +32,7 @@
 	export let prompt;
 	export let history = {};
 	export let selectedModels;
+	export let atSelectedModel;
 
 	let messages = [];
 
@@ -349,6 +350,7 @@
 	{#if Object.keys(history?.messages ?? {}).length == 0}
 		<ChatPlaceholder
 			modelIds={selectedModels}
+			{atSelectedModel}
 			submitPrompt={async (p) => {
 				let text = p;
 

+ 2 - 1
src/lib/components/chat/Messages/Citations.svelte

@@ -43,6 +43,7 @@
 	}
 
 	$: {
+		console.log('sources', sources);
 		citations = sources.reduce((acc, source) => {
 			if (Object.keys(source).length === 0) {
 				return acc;
@@ -53,7 +54,7 @@
 				const distance = source.distances?.[index];
 
 				// Within the same citation there could be multiple documents
-				const id = metadata?.source ?? 'N/A';
+				const id = metadata?.source ?? source?.source?.id ?? 'N/A';
 				let _source = source?.source;
 
 				if (metadata?.name) {

+ 11 - 18
src/lib/components/chat/Messages/CodeBlock.svelte

@@ -1,18 +1,9 @@
 <script lang="ts">
-	import hljs from 'highlight.js';
-	import { loadPyodide } from 'pyodide';
 	import mermaid from 'mermaid';
 
 	import { v4 as uuidv4 } from 'uuid';
 
-	import {
-		getContext,
-		getAllContexts,
-		onMount,
-		tick,
-		createEventDispatcher,
-		onDestroy
-	} from 'svelte';
+	import { getContext, onMount, tick, onDestroy } from 'svelte';
 	import { copyToClipboard } from '$lib/utils';
 
 	import 'highlight.js/styles/github-dark.min.css';
@@ -25,10 +16,12 @@
 	import { toast } from 'svelte-sonner';
 
 	const i18n = getContext('i18n');
-	const dispatch = createEventDispatcher();
 
 	export let id = '';
 
+	export let onSave = (e) => {};
+	export let onCode = (e) => {};
+
 	export let save = false;
 	export let run = true;
 
@@ -71,7 +64,7 @@
 		saved = true;
 
 		code = _code;
-		dispatch('save', code);
+		onSave(code);
 
 		setTimeout(() => {
 			saved = false;
@@ -344,7 +337,7 @@
 		render();
 	}
 
-	$: dispatch('code', { lang, code });
+	$: onCode({ lang, code });
 
 	$: if (attributes) {
 		onAttributesUpdate();
@@ -380,7 +373,7 @@
 		console.log('codeblock', lang, code);
 
 		if (lang) {
-			dispatch('code', { lang, code });
+			onCode({ lang, code });
 		}
 		if (document.documentElement.classList.contains('dark')) {
 			mermaid.initialize({
@@ -468,11 +461,11 @@
 					value={code}
 					{id}
 					{lang}
-					on:save={() => {
+					onSave={() => {
 						saveCode();
 					}}
-					on:change={(e) => {
-						_code = e.detail.value;
+					onChange={(value) => {
+						_code = value;
 					}}
 				/>
 			</div>
@@ -514,7 +507,7 @@
 									<div class="flex flex-col gap-2">
 										{#each files as file}
 											{#if file.type.startsWith('image')}
-												<img src={file.data} alt="Output" />
+												<img src={file.data} alt="Output" class=" w-full max-w-[36rem]" />
 											{/if}
 										{/each}
 									</div>

+ 4 - 4
src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte

@@ -88,14 +88,14 @@
 				code={token?.text ?? ''}
 				{attributes}
 				{save}
-				on:code={(e) => {
-					dispatch('code', e.detail);
+				onCode={(value) => {
+					dispatch('code', value);
 				}}
-				on:save={(e) => {
+				onSave={(e) => {
 					dispatch('update', {
 						raw: token.raw,
 						oldContent: token.text,
-						newContent: e.detail
+						newContent: value
 					});
 				}}
 			/>

+ 20 - 6
src/lib/components/chat/Messages/ResponseMessage.svelte

@@ -129,6 +129,7 @@
 	export let isLastMessage = true;
 	export let readOnly = false;
 
+	let buttonsContainerElement: HTMLDivElement;
 	let showDeleteConfirm = false;
 
 	let model = null;
@@ -518,6 +519,18 @@
 		// console.log('ResponseMessage mounted');
 
 		await tick();
+		if (buttonsContainerElement) {
+			console.log(buttonsContainerElement);
+			buttonsContainerElement.addEventListener('wheel', function (event) {
+				// console.log(event.deltaY);
+
+				event.preventDefault();
+				if (event.deltaY !== 0) {
+					// Adjust horizontal scroll position based on vertical scroll
+					buttonsContainerElement.scrollLeft += event.deltaY;
+				}
+			});
+		}
 	});
 </script>
 
@@ -802,10 +815,11 @@
 				</div>
 
 				{#if !edit}
-					{#if message.done || siblings.length > 1}
-						<div
-							class=" flex justify-start overflow-x-auto buttons text-gray-600 dark:text-gray-500 mt-0.5"
-						>
+					<div
+						bind:this={buttonsContainerElement}
+						class="flex justify-start overflow-x-auto buttons text-gray-600 dark:text-gray-500 mt-0.5"
+					>
+						{#if message.done || siblings.length > 1}
 							{#if siblings.length > 1}
 								<div class="flex self-center min-w-fit" dir="ltr">
 									<button
@@ -1313,8 +1327,8 @@
 									{/if}
 								{/if}
 							{/if}
-						</div>
-					{/if}
+						{/if}
+					</div>
 
 					{#if message.done && showRateComment}
 						<RateComment

+ 2 - 1
src/lib/components/chat/Placeholder.svelte

@@ -213,7 +213,8 @@
 	<div class="mx-auto max-w-2xl font-primary" in:fade={{ duration: 200, delay: 200 }}>
 		<div class="mx-5">
 			<Suggestions
-				suggestionPrompts={models[selectedModelIdx]?.info?.meta?.suggestion_prompts ??
+				suggestionPrompts={atSelectedModel?.info?.meta?.suggestion_prompts ??
+					models[selectedModelIdx]?.info?.meta?.suggestion_prompts ??
 					$config?.default_prompt_suggestions ??
 					[]}
 				inputValue={prompt}

+ 31 - 9
src/lib/components/chat/Settings/Personalization/ManageModal.svelte

@@ -12,6 +12,7 @@
 	import { error } from '@sveltejs/kit';
 	import EditMemoryModal from './EditMemoryModal.svelte';
 	import localizedFormat from 'dayjs/plugin/localizedFormat';
+	import ConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 
 	const i18n = getContext('i18n');
 	dayjs.extend(localizedFormat);
@@ -26,6 +27,21 @@
 
 	let selectedMemory = null;
 
+	let showClearConfirmDialog = false;
+
+	let onClearConfirmed = async () => {
+		const res = await deleteMemoriesByUserId(localStorage.token).catch((error) => {
+			toast.error(`${error}`);
+			return null;
+		});
+
+		if (res && memories.length > 0) {
+			toast.success($i18n.t('Memory cleared successfully'));
+			memories = [];
+		}
+		showClearConfirmDialog = false;
+	};
+
 	$: if (show && memories.length === 0 && loading) {
 		(async () => {
 			memories = await getMemories(localStorage.token);
@@ -175,15 +191,11 @@
 				>
 				<button
 					class=" px-3.5 py-1.5 font-medium text-red-500 hover:bg-black/5 dark:hover:bg-white/5 outline outline-1 outline-red-300 dark:outline-red-800 rounded-3xl"
-					on:click={async () => {
-						const res = await deleteMemoriesByUserId(localStorage.token).catch((error) => {
-							toast.error(`${error}`);
-							return null;
-						});
-
-						if (res) {
-							toast.success($i18n.t('Memory cleared successfully'));
-							memories = [];
+					on:click={() => {
+						if (memories.length > 0) {
+							showClearConfirmDialog = true;
+						} else {
+							toast.error($i18n.t('No memories to clear'));
 						}
 					}}>{$i18n.t('Clear memory')}</button
 				>
@@ -192,6 +204,16 @@
 	</div>
 </Modal>
 
+<ConfirmDialog
+	title={$i18n.t('Clear Memory')}
+	message={$i18n.t('Are you sure you want to clear all memories? This action cannot be undone.')}
+	show={showClearConfirmDialog}
+	on:confirm={onClearConfirmed}
+	on:cancel={() => {
+		showClearConfirmDialog = false;
+	}}
+/>
+
 <AddMemoryModal
 	bind:show={showAddMemoryModal}
 	on:save={async () => {

+ 40 - 2
src/lib/components/chat/ShortcutsModal.svelte

@@ -122,6 +122,30 @@
 							</div>
 						</div>
 					</div>
+
+					<div class="w-full flex justify-between items-center">
+						<div class=" text-sm">{$i18n.t('Generate prompt pair')}</div>
+
+						<div class="flex space-x-1 text-xs">
+							<div
+								class=" h-fit py-1 px-2 flex items-center justify-center rounded-sm border border-black/10 capitalize text-gray-600 dark:border-white/10 dark:text-gray-300"
+							>
+								Ctrl/⌘
+							</div>
+
+							<div
+								class=" h-fit py-1 px-2 flex items-center justify-center rounded-sm border border-black/10 capitalize text-gray-600 dark:border-white/10 dark:text-gray-300"
+							>
+								Shift
+							</div>
+
+							<div
+								class=" h-fit py-1 px-2 flex items-center justify-center rounded-sm border border-black/10 capitalize text-gray-600 dark:border-white/10 dark:text-gray-300"
+							>
+								Enter
+							</div>
+						</div>
+					</div>
 				</div>
 
 				<div class="flex flex-col space-y-3 w-full self-start">
@@ -219,7 +243,7 @@
 				<div class="flex flex-col space-y-3 w-full self-start">
 					<div class="w-full flex justify-between items-center">
 						<div class=" text-sm">
-							{$i18n.t('Attach file')}
+							{$i18n.t('Attach file from knowledge')}
 						</div>
 
 						<div class="flex space-x-1 text-xs">
@@ -247,7 +271,7 @@
 
 					<div class="w-full flex justify-between items-center">
 						<div class=" text-sm">
-							{$i18n.t('Select model')}
+							{$i18n.t('Talk to model')}
 						</div>
 
 						<div class="flex space-x-1 text-xs">
@@ -258,6 +282,20 @@
 							</div>
 						</div>
 					</div>
+
+					<div class="w-full flex justify-between items-center">
+						<div class=" text-sm">
+							{$i18n.t('Accept autocomplete generation / Jump to prompt variable')}
+						</div>
+
+						<div class="flex space-x-1 text-xs">
+							<div
+								class=" h-fit py-1 px-2 flex items-center justify-center rounded-sm border border-black/10 capitalize text-gray-600 dark:border-white/10 dark:text-gray-300"
+							>
+								TAB
+							</div>
+						</div>
+					</div>
 				</div>
 			</div>
 		</div>

+ 12 - 3
src/lib/components/common/CodeEditor.svelte

@@ -21,6 +21,10 @@
 
 	export let boilerplate = '';
 	export let value = '';
+
+	export let onSave = () => {};
+	export let onChange = () => {};
+
 	let _value = '';
 
 	$: if (value) {
@@ -43,6 +47,10 @@
 
 	let codeEditor;
 
+	export const focus = () => {
+		codeEditor.focus();
+	};
+
 	let isDarkMode = false;
 	let editorTheme = new Compartment();
 	let editorLanguage = new Compartment();
@@ -75,7 +83,7 @@
 				});
 
 				_value = formattedCode;
-				dispatch('change', { value: _value });
+				onChange(_value);
 				await tick();
 
 				toast.success($i18n.t('Code formatted successfully'));
@@ -94,7 +102,7 @@
 		EditorView.updateListener.of((e) => {
 			if (e.docChanged) {
 				_value = e.state.doc.toString();
-				dispatch('change', { value: _value });
+				onChange(_value);
 			}
 		}),
 		editorTheme.of([]),
@@ -170,7 +178,8 @@
 		const keydownHandler = async (e) => {
 			if ((e.ctrlKey || e.metaKey) && e.key === 's') {
 				e.preventDefault();
-				dispatch('save');
+
+				onSave();
 			}
 
 			// Format code when Ctrl + Shift + F is pressed

+ 1 - 1
src/lib/components/common/FileItemModal.svelte

@@ -87,7 +87,7 @@
 						<div>
 							<Tooltip
 								content={enableFullContent
-									? 'Inject the entire document as context for comprehensive processing, this is recommended for complex queries.'
+									? 'Inject the entire content as context for comprehensive processing, this is recommended for complex queries.'
 									: 'Default to segmented retrieval for focused and relevant content extraction, this is recommended for most cases.'}
 							>
 								<div class="flex items-center gap-1.5 text-xs">

+ 0 - 14
src/lib/components/workspace/Knowledge/KnowledgeBase.svelte

@@ -133,20 +133,6 @@
 
 		knowledge.files = [...(knowledge.files ?? []), fileItem];
 
-		// Check if the file is an audio file and transcribe/convert it to text file
-		if (['audio/mpeg', 'audio/wav', 'audio/ogg', 'audio/x-m4a'].includes(file['type'])) {
-			const res = await transcribeAudio(localStorage.token, file).catch((error) => {
-				toast.error(`${error}`);
-				return null;
-			});
-
-			if (res) {
-				console.log(res);
-				const blob = new Blob([res.text], { type: 'text/plain' });
-				file = blobToFile(blob, `${file.name}.txt`);
-			}
-		}
-
 		try {
 			const uploadedFile = await uploadFile(localStorage.token, file).catch((e) => {
 				toast.error(`${e}`);

+ 0 - 1
src/lib/components/workspace/Models/ModelEditor.svelte

@@ -180,7 +180,6 @@
 		}
 
 		if (model) {
-			console.log(model);
 			name = model.name;
 			await tick();
 

+ 12 - 9
src/lib/components/workspace/Prompts/PromptMenu.svelte

@@ -2,6 +2,7 @@
 	import { DropdownMenu } from 'bits-ui';
 	import { flyAndScale } from '$lib/utils/transitions';
 	import { getContext } from 'svelte';
+	import { config } from '$lib/stores';
 
 	import Dropdown from '$lib/components/common/Dropdown.svelte';
 	import GarbageBin from '$lib/components/icons/GarbageBin.svelte';
@@ -44,15 +45,17 @@
 			align="start"
 			transition={flyAndScale}
 		>
-			<DropdownMenu.Item
-				class="flex gap-2 items-center px-3 py-2 text-sm  font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800  rounded-md"
-				on:click={() => {
-					shareHandler();
-				}}
-			>
-				<Share />
-				<div class="flex items-center">{$i18n.t('Share')}</div>
-			</DropdownMenu.Item>
+			{#if $config.features.enable_community_sharing}
+				<DropdownMenu.Item
+					class="flex gap-2 items-center px-3 py-2 text-sm  font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800  rounded-md"
+					on:click={() => {
+						shareHandler();
+					}}
+				>
+					<Share />
+					<div class="flex items-center">{$i18n.t('Share')}</div>
+				</DropdownMenu.Item>
+			{/if}
 
 			<DropdownMenu.Item
 				class="flex gap-2 items-center px-3 py-2 text-sm  font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"

+ 12 - 9
src/lib/components/workspace/Tools/ToolMenu.svelte

@@ -12,6 +12,7 @@
 	import ArchiveBox from '$lib/components/icons/ArchiveBox.svelte';
 	import DocumentDuplicate from '$lib/components/icons/DocumentDuplicate.svelte';
 	import ArrowDownTray from '$lib/components/icons/ArrowDownTray.svelte';
+	import { config } from '$lib/stores';
 
 	const i18n = getContext('i18n');
 
@@ -69,15 +70,17 @@
 				<div class="flex items-center">{$i18n.t('Edit')}</div>
 			</DropdownMenu.Item>
 
-			<DropdownMenu.Item
-				class="flex gap-2 items-center px-3 py-2 text-sm  font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800  rounded-md"
-				on:click={() => {
-					shareHandler();
-				}}
-			>
-				<Share />
-				<div class="flex items-center">{$i18n.t('Share')}</div>
-			</DropdownMenu.Item>
+			{#if $config.features.enable_community_sharing}
+				<DropdownMenu.Item
+					class="flex gap-2 items-center px-3 py-2 text-sm  font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800  rounded-md"
+					on:click={() => {
+						shareHandler();
+					}}
+				>
+					<Share />
+					<div class="flex items-center">{$i18n.t('Share')}</div>
+				</DropdownMenu.Item>
+			{/if}
 
 			<DropdownMenu.Item
 				class="flex gap-2 items-center px-3 py-2 text-sm  font-medium cursor-pointer hover:bg-gray-50 dark:hover:bg-gray-800 rounded-md"

+ 7 - 7
src/lib/components/workspace/Tools/ToolkitEditor.svelte

@@ -1,5 +1,5 @@
 <script>
-	import { getContext, createEventDispatcher, onMount, tick } from 'svelte';
+	import { getContext, onMount, tick } from 'svelte';
 
 	const i18n = getContext('i18n');
 
@@ -12,8 +12,6 @@
 	import LockClosed from '$lib/components/icons/LockClosed.svelte';
 	import AccessControlModal from '../common/AccessControlModal.svelte';
 
-	const dispatch = createEventDispatcher();
-
 	let formElement = null;
 	let loading = false;
 
@@ -23,6 +21,8 @@
 	export let edit = false;
 	export let clone = false;
 
+	export let onSave = () => {};
+
 	export let id = '';
 	export let name = '';
 	export let meta = {
@@ -150,7 +150,7 @@ class Tools:
 
 	const saveHandler = async () => {
 		loading = true;
-		dispatch('save', {
+		onSave({
 			id,
 			name,
 			meta,
@@ -284,10 +284,10 @@ class Tools:
 						value={content}
 						{boilerplate}
 						lang="python"
-						on:change={(e) => {
-							_content = e.detail.value;
+						onChange={(e) => {
+							_content = e;
 						}}
-						on:save={() => {
+						onSave={() => {
 							if (formElement) {
 								formElement.requestSubmit();
 							}

+ 23 - 13
src/lib/i18n/locales/ar-BH/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "يتم استخدام نموذج المهمة عند تنفيذ مهام مثل إنشاء عناوين للدردشات واستعلامات بحث الويب",
 	"a user": "مستخدم",
 	"About": "عن",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "أرشفة جميع الدردشات",
 	"Archived Chats": "الأرشيف المحادثات",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "أرفق ملف",
+	"Attach file from knowledge": "",
 	"Attention to detail": "انتبه للتفاصيل",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "مفتاح واجهة برمجة تطبيقات البحث الشجاع",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "تجاوز التحقق من SSL للموقع",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "البحث عن تحديثات",
 	"Choose a model before saving...": "أختار موديل قبل الحفظ",
 	"Chunk Overlap": "Chunk تداخل",
-	"Chunk Params": "Chunk المتغيرات",
 	"Chunk Size": "Chunk حجم",
 	"Ciphers": "",
 	"Citation": "اقتباس",
 	"Clear memory": "",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "أضغط هنا للمساعدة",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "الاتصال",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "طول السياق",
 	"Continue Response": "متابعة الرد",
 	"Continue with {{provider}}": "",
@@ -245,6 +248,7 @@
 	"Current Model": "الموديل المختار",
 	"Current Password": "كلمة السر الحالية",
 	"Custom": "مخصص",
+	"Danger Zone": "",
 	"Dark": "مظلم",
 	"Database": "قاعدة البيانات",
 	"December": "ديسمبر",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "المستند",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "مستندات",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "لا يجري أي اتصالات خارجية، وتظل بياناتك آمنة على الخادم المستضاف محليًا.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "البريد",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "نموذج التضمين",
 	"Embedding Model Engine": "تضمين محرك النموذج",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "تمكين مشاركة المجتمع",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "تفعيل عمليات التسجيل الجديدة",
-	"Enable Web Search": "تمكين بحث الويب",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "تأكد من أن ملف CSV الخاص بك يتضمن 4 أعمدة بهذا الترتيب: Name, Email, Password, Role.",
 	"Enter {{role}} message here": "أدخل رسالة {{role}} هنا",
 	"Enter a detail about yourself for your LLMs to recall": "ادخل معلومات عنك تريد أن يتذكرها الموديل",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "أدخل الChunk Overlap",
 	"Enter Chunk Size": "أدخل Chunk الحجم",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "أدخل عنوان URL ل Github Raw",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "عام",
-	"General Settings": "الاعدادات العامة",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "إنشاء استعلام بحث",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "إدخال الأوامر",
 	"Install from Github URL": "التثبيت من عنوان URL لجيثب",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "واجهه المستخدم",
 	"Invalid file format.": "",
 	"Invalid Tag": "تاق غير صالحة",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "يمكن أن تصدر بعض الأخطاء. لذلك يجب التحقق من المعلومات المهمة",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "من جهة اليسار إلى اليمين",
 	"Made by Open WebUI Community": "OpenWebUI تم إنشاؤه بواسطة مجتمع ",
 	"Make sure to enclose them with": "تأكد من إرفاقها",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "Ollama الاصدار",
 	"On": "تشغيل",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "يُسمح فقط بالأحرف الأبجدية الرقمية والواصلات في سلسلة الأمر.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "مطالبات",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com \"{{searchValue}}\" أسحب من ",
 	"Pull a model from Ollama.com": "Ollama.com سحب الموديل من ",
 	"Query Generation Prompt": "",
-	"Query Params": "Query Params",
 	"RAG Template": "RAG تنمبلت",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": " أختار موديل",
 	"Select only one model to call": "",
 	"Selected model(s) do not support image inputs": "النموذج (النماذج) المحددة لا تدعم مدخلات الصور",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "هل تواجه مشكلة في الوصول",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "TTS اعدادات",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web",
 	"Web API": "",
-	"Web Loader Settings": "Web تحميل اعدادات",
 	"Web Search": "بحث الويب",
 	"Web Search Engine": "محرك بحث الويب",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "Youtube تحميل اعدادات"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/bg-BG/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Моделът на задачите се използва при изпълнение на задачи като генериране на заглавия за чатове и заявки за търсене в мрежата",
 	"a user": "потребител",
 	"About": "Относно",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Достъп",
 	"Access Control": "Контрол на достъпа",
 	"Accessible to all users": "Достъпно за всички потребители",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Архив Всички чатове",
 	"Archived Chats": "Архивирани Чатове",
 	"archived-chat-export": "експорт-на-архивирани-чатове",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "Сигурни ли сте, че искате да изтриете този канал?",
 	"Are you sure you want to delete this message?": "Сигурни ли сте, че искате да изтриете това съобщение?",
 	"Are you sure you want to unarchive all archived chats?": "Сигурни ли сте, че искате да разархивирате всички архивирани чатове?",
@@ -93,7 +95,7 @@
 	"Artifacts": "Артефакти",
 	"Ask a question": "Задайте въпрос",
 	"Assistant": "Асистент",
-	"Attach file": "Прикачване на файл",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Внимание към детайлите",
 	"Attribute for Mail": "Атрибут за поща",
 	"Attribute for Username": "Атрибут за потребителско име",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "API ключ за Bocha Search",
 	"Brave Search API Key": "API ключ за Brave Search",
 	"By {{name}}": "От {{name}}",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Изключване на SSL проверката за сайтове",
 	"Calendar": "Календар",
 	"Call": "Обаждане",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Проверка за актуализации...",
 	"Choose a model before saving...": "Изберете модел преди запазване...",
 	"Chunk Overlap": "Припокриване на чънкове",
-	"Chunk Params": "Параметри на чънковете",
 	"Chunk Size": "Размер на чънк",
 	"Ciphers": "Шифри",
 	"Citation": "Цитат",
 	"Clear memory": "Изчистване на паметта",
+	"Clear Memory": "",
 	"click here": "натиснете тук",
 	"Click here for filter guides.": "Натиснете тук за ръководства за филтриране.",
 	"Click here for help.": "Натиснете тук за помощ.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Ограничава усилията за разсъждение при модели за разсъждение. Приложимо само за модели за разсъждение от конкретни доставчици, които поддържат усилия за разсъждение. (По подразбиране: средно)",
 	"Contact Admin for WebUI Access": "Свържете се с администратор за достъп до WebUI",
 	"Content": "Съдържание",
-	"Content Extraction": "Извличане на съдържание",
+	"Content Extraction Engine": "",
 	"Context Length": "Дължина на Контекста",
 	"Continue Response": "Продължи отговора",
 	"Continue with {{provider}}": "Продължете с {{provider}}",
@@ -245,6 +248,7 @@
 	"Current Model": "Текущ модел",
 	"Current Password": "Текуща Парола",
 	"Custom": "Персонализиран",
+	"Danger Zone": "",
 	"Dark": "Тъмен",
 	"Database": "База данни",
 	"December": "Декември",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "Не инсталирайте функции от източници, на които не се доверявате напълно.",
 	"Do not install tools from sources you do not fully trust.": "Не инсталирайте инструменти от източници, на които не се доверявате напълно.",
 	"Document": "Документ",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Документация",
 	"Documents": "Документи",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "няма външни връзки, и вашите данни остават сигурни на локално назначен сървър.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "Имейл",
 	"Embark on adventures": "Отправете се на приключения",
+	"Embedding": "",
 	"Embedding Batch Size": "Размер на партидата за вграждане",
 	"Embedding Model": "Модел за вграждане",
 	"Embedding Model Engine": "Двигател на модела за вграждане",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "Активиране на автоматично довършване за съобщения в чата",
 	"Enable Code Interpreter": "Активиране на интерпретатор на код",
 	"Enable Community Sharing": "Разрешаване на споделяне в общност",
-	"Enable Google Drive": "Активиране на Google Drive",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Активиране на заключване на паметта (mlock), за да се предотврати изваждането на данните на модела от RAM. Тази опция заключва работния набор от страници на модела в RAM, гарантирайки, че няма да бъдат изхвърлени на диска. Това може да помогне за поддържане на производителността, като се избягват грешки в страниците и се осигурява бърз достъп до данните.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Активиране на мапиране на паметта (mmap) за зареждане на данни на модела. Тази опция позволява на системата да използва дисковото пространство като разширение на RAM, третирайки дисковите файлове, сякаш са в RAM. Това може да подобри производителността на модела, като позволява по-бърз достъп до данните. Въпреки това, може да не работи правилно с всички системи и може да консумира значително количество дисково пространство.",
 	"Enable Message Rating": "Активиране на оценяване на съобщения",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Активиране на Mirostat семплиране за контрол на перплексията. (По подразбиране: 0, 0 = Деактивирано, 1 = Mirostat, 2 = Mirostat 2.0)",
 	"Enable New Sign Ups": "Включване на нови регистрации",
-	"Enable Web Search": "Разрешаване на търсене в уеб",
 	"Enabled": "Активирано",
-	"Engine": "Двигател",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Уверете се, че вашият CSV файл включва 4 колони в следния ред: Име, Имейл, Парола, Роля.",
 	"Enter {{role}} message here": "Въведете съобщение за {{role}} тук",
 	"Enter a detail about yourself for your LLMs to recall": "Въведете подробности за себе си, за да ги запомнят вашите LLMs",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Въведете припокриване на чънкове",
 	"Enter Chunk Size": "Въведете размер на чънк",
 	"Enter description": "Въведете описание",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Въведете домейни, разделени със запетаи (напр. example.com,site.org)",
 	"Enter Exa API Key": "Въведете API ключ за Exa",
 	"Enter Github Raw URL": "Въведете URL адрес на Github Raw",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Основни",
-	"General Settings": "Основни Настройки",
 	"Generate an image": "Генериране на изображение",
 	"Generate Image": "Генериране на изображение",
+	"Generate prompt pair": "",
 	"Generating search query": "Генериране на заявка за търсене",
 	"Get started": "Започнете",
 	"Get started with {{WEBUI_NAME}}": "Започнете с {{WEBUI_NAME}}",
@@ -565,6 +571,7 @@
 	"Input commands": "Въведете команди",
 	"Install from Github URL": "Инсталиране от URL адреса на Github",
 	"Instant Auto-Send After Voice Transcription": "Незабавно автоматично изпращане след гласова транскрипция",
+	"Integration": "",
 	"Interface": "Интерфейс",
 	"Invalid file format.": "Невалиден формат на файла.",
 	"Invalid Tag": "Невалиден таг",
@@ -612,16 +619,17 @@
 	"Listening...": "Слушане...",
 	"Llama.cpp": "Llama.cpp",
 	"LLMs can make mistakes. Verify important information.": "LLMs могат да правят грешки. Проверете важните данни.",
+	"Loader": "",
 	"Loading Kokoro.js...": "Зареждане на Kokoro.js...",
 	"Local": "Локално",
 	"Local Models": "Локални модели",
+	"Location access not allowed": "",
 	"Lost": "Изгубено",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Направено от OpenWebUI общността",
 	"Make sure to enclose them with": "Уверете се, че са заключени с",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Уверете се, че експортирате файл workflow.json като API формат от ComfyUI.",
 	"Manage": "Управление",
-	"Manage Arena Models": "Управление на Arena модели",
 	"Manage Direct Connections": "Управление на директни връзки",
 	"Manage Models": "Управление на модели",
 	"Manage Ollama": "Управление на Ollama",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "Не е намерено HTML, CSS или JavaScript съдържание.",
 	"No inference engine with management support found": "Не е намерен механизъм за извод с поддръжка на управление",
 	"No knowledge found": "Не са намерени знания",
+	"No memories to clear": "",
 	"No model IDs": "Няма ИД-та на модели",
 	"No models found": "Не са намерени модели",
 	"No models selected": "Няма избрани модели",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "Настройките на Ollama API са актуализирани",
 	"Ollama Version": "Ollama Версия",
 	"On": "Вкл.",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "Разрешени са само буквено-цифрови знаци и тирета",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Само алфанумерични знаци и тире са разрешени в командния низ.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Само колекции могат да бъдат редактирани, създайте нова база от знания, за да редактирате/добавяте документи.",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "Промптът е актуализиран успешно",
 	"Prompts": "Промптове",
 	"Prompts Access": "Достъп до промптове",
-	"Proxy URL": "URL на прокси",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Извади \"{{searchValue}}\" от Ollama.com",
 	"Pull a model from Ollama.com": "Издърпайте модел от Ollama.com",
 	"Query Generation Prompt": "Промпт за генериране на запитвания",
-	"Query Params": "Параметри на запитването",
 	"RAG Template": "RAG Шаблон",
 	"Rating": "Оценка",
 	"Re-rank models by topic similarity": "Преоценка на моделите по сходство на темата",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Известията за отговори не могат да бъдат активирани, тъй като разрешенията за уебсайта са отказани. Моля, посетете настройките на вашия браузър, за да дадете необходимия достъп.",
 	"Response splitting": "Разделяне на отговора",
 	"Result": "Резултат",
+	"Retrieval": "",
 	"Retrieval Query Generation": "Генериране на заявка за извличане",
 	"Rich Text Input for Chat": "Богат текстов вход за чат",
 	"RK": "RK",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "Изберете инстанция на Ollama",
 	"Select Engine": "Изберете двигател",
 	"Select Knowledge": "Изберете знание",
-	"Select model": "Изберете модел",
 	"Select only one model to call": "Изберете само един модел за извикване",
 	"Selected model(s) do not support image inputs": "Избраният(те) модел(и) не поддържа въвеждане на изображения",
 	"Semantic distance to query": "Семантично разстояние до заявката",
@@ -957,6 +965,7 @@
 	"Tags Generation": "Генериране на тагове",
 	"Tags Generation Prompt": "Промпт за генериране на тагове",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Безопашковото семплиране се използва за намаляване на влиянието на по-малко вероятните токени от изхода. По-висока стойност (напр. 2.0) ще намали влиянието повече, докато стойност 1.0 деактивира тази настройка. (по подразбиране: 1)",
+	"Talk to model": "",
 	"Tap to interrupt": "Докоснете за прекъсване",
 	"Tasks": "Задачи",
 	"Tavily API Key": "Tavily API Ключ",
@@ -1041,6 +1050,7 @@
 	"Top P": "Топ P",
 	"Transformers": "Трансформатори",
 	"Trouble accessing Ollama?": "Проблеми с достъпа до Ollama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "TTS Модел",
 	"TTS Settings": "TTS Настройки",
 	"TTS Voice": "TTS Глас",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Предупреждение: Изпълнението на Jupyter позволява произволно изпълнение на код, което представлява сериозни рискове за сигурността—продължете с изключително внимание.",
 	"Web": "Уеб",
 	"Web API": "Уеб API",
-	"Web Loader Settings": "Настройки за зареждане на уеб",
 	"Web Search": "Търсене в уеб",
 	"Web Search Engine": "Уеб търсачка",
 	"Web Search in Chat": "Уеб търсене в чата",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Статусът на вашия акаунт в момента очаква активиране.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Цялата ви вноска ще отиде директно при разработчика на плъгина; Open WebUI не взима никакъв процент. Въпреки това, избраната платформа за финансиране може да има свои собствени такси.",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "Настройки за зареждане от Youtube"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/bn-BD/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "চ্যাট এবং ওয়েব অনুসন্ধান প্রশ্নের জন্য শিরোনাম তৈরি করার মতো কাজগুলি সম্পাদন করার সময় একটি টাস্ক মডেল ব্যবহার করা হয়",
 	"a user": "একজন ব্যাবহারকারী",
 	"About": "সম্পর্কে",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "আর্কাইভ করুন সকল চ্যাট",
 	"Archived Chats": "চ্যাট ইতিহাস সংরক্ষণাগার",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "ফাইল যুক্ত করুন",
+	"Attach file from knowledge": "",
 	"Attention to detail": "বিস্তারিত বিশেষতা",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "সাহসী অনুসন্ধান API কী",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "ওয়েবসাইটের জন্য SSL যাচাই বাতিল করুন",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "নতুন আপডেট আছে কিনা চেক করা হচ্ছে...",
 	"Choose a model before saving...": "সেভ করার আগে একটি মডেল নির্বাচন করুন",
 	"Chunk Overlap": "চাঙ্ক ওভারল্যাপ",
-	"Chunk Params": "চাঙ্ক প্যারামিটার্স",
 	"Chunk Size": "চাঙ্ক সাইজ",
 	"Ciphers": "",
 	"Citation": "উদ্ধৃতি",
 	"Clear memory": "",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "সাহায্যের জন্য এখানে ক্লিক করুন",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "বিষয়বস্তু",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "কনটেক্সটের দৈর্ঘ্য",
 	"Continue Response": "যাচাই করুন",
 	"Continue with {{provider}}": "",
@@ -245,6 +248,7 @@
 	"Current Model": "বর্তমান মডেল",
 	"Current Password": "বর্তমান পাসওয়ার্ড",
 	"Custom": "কাস্টম",
+	"Danger Zone": "",
 	"Dark": "ডার্ক",
 	"Database": "ডেটাবেজ",
 	"December": "ডেসেম্বর",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "ডকুমেন্ট",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "ডকুমেন্টসমূহ",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "কোন এক্সটার্নাল কানেকশন তৈরি করে না, এবং আপনার ডেটা আর লোকালি হোস্টেড সার্ভারেই নিরাপদে থাকে।",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "ইমেইল",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "ইমেজ ইমেবডিং মডেল",
 	"Embedding Model Engine": "ইমেজ ইমেবডিং মডেল ইঞ্জিন",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "সম্প্রদায় শেয়ারকরণ সক্ষম করুন",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "নতুন সাইনআপ চালু করুন",
-	"Enable Web Search": "ওয়েব অনুসন্ধান সক্ষম করুন",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "আপনার সিএসভি ফাইলটিতে এই ক্রমে 4 টি কলাম অন্তর্ভুক্ত রয়েছে তা নিশ্চিত করুন: নাম, ইমেল, পাসওয়ার্ড, ভূমিকা।.",
 	"Enter {{role}} message here": "{{role}} মেসেজ এখানে লিখুন",
 	"Enter a detail about yourself for your LLMs to recall": "আপনার এলএলএমগুলি স্মরণ করার জন্য নিজের সম্পর্কে একটি বিশদ লিখুন",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "চাঙ্ক ওভারল্যাপ লিখুন",
 	"Enter Chunk Size": "চাংক সাইজ লিখুন",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "গিটহাব কাঁচা URL লিখুন",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "সাধারণ",
-	"General Settings": "সাধারণ সেটিংসমূহ",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "অনুসন্ধান ক্যোয়ারী তৈরি করা হচ্ছে",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "ইনপুট কমান্ডস",
 	"Install from Github URL": "Github URL থেকে ইনস্টল করুন",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "ইন্টারফেস",
 	"Invalid file format.": "",
 	"Invalid Tag": "অবৈধ ট্যাগ",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "LLM ভুল করতে পারে। গুরুত্বপূর্ণ তথ্য যাচাই করে নিন।",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "OpenWebUI কমিউনিটিকর্তৃক নির্মিত",
 	"Make sure to enclose them with": "এটা দিয়ে বন্ধনী দিতে ভুলবেন না",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "Ollama ভার্সন",
 	"On": "চালু",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "কমান্ড স্ট্রিং-এ শুধুমাত্র ইংরেজি অক্ষর, সংখ্যা এবং হাইফেন ব্যবহার করা যাবে।",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "প্রম্পটসমূহ",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ollama.com থেকে \"{{searchValue}}\" টানুন",
 	"Pull a model from Ollama.com": "Ollama.com থেকে একটি টেনে আনুন আনুন",
 	"Query Generation Prompt": "",
-	"Query Params": "Query প্যারামিটারসমূহ",
 	"RAG Template": "RAG টেম্পলেট",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": "মডেল নির্বাচন করুন",
 	"Select only one model to call": "",
 	"Selected model(s) do not support image inputs": "নির্বাচিত মডেল(গুলি) চিত্র ইনপুট সমর্থন করে না",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "Ollama এক্সেস করতে সমস্যা হচ্ছে?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "TTS সেটিংসমূহ",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "ওয়েব",
 	"Web API": "",
-	"Web Loader Settings": "ওয়েব লোডার সেটিংস",
 	"Web Search": "ওয়েব অনুসন্ধান",
 	"Web Search Engine": "ওয়েব সার্চ ইঞ্জিন",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "YouTube",
-	"Youtube Loader Settings": "YouTube লোডার সেটিংস"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 73 - 63
src/lib/i18n/locales/ca-ES/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Un model de tasca s'utilitza quan es realitzen tasques com ara generar títols per a xats i consultes de cerca per a la web",
 	"a user": "un usuari",
 	"About": "Sobre",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Accés",
 	"Access Control": "Control d'accés",
 	"Accessible to all users": "Accessible a tots els usuaris",
@@ -20,7 +21,7 @@
 	"Account Activation Pending": "Activació del compte pendent",
 	"Accurate information": "Informació precisa",
 	"Actions": "Accions",
-	"Activate": "",
+	"Activate": "Activar",
 	"Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Activa aquest comanda escrivint \"{{COMMAND}}\" en el xat",
 	"Active Users": "Usuaris actius",
 	"Add": "Afegir",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Arxiva tots els xats",
 	"Archived Chats": "Xats arxivats",
 	"archived-chat-export": "archived-chat-export",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "Estàs segur que vols eliminar aquest canal?",
 	"Are you sure you want to delete this message?": "Estàs segur que vols eliminar aquest missatge?",
 	"Are you sure you want to unarchive all archived chats?": "Estàs segur que vols desarxivar tots els xats arxivats?",
@@ -93,14 +95,14 @@
 	"Artifacts": "Artefactes",
 	"Ask a question": "Fer una pregunta",
 	"Assistant": "Assistent",
-	"Attach file": "Adjuntar arxiu",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Atenció al detall",
 	"Attribute for Mail": "Atribut per al Correu",
 	"Attribute for Username": "Atribut per al Nom d'usuari",
 	"Audio": "Àudio",
 	"August": "Agost",
 	"Authenticate": "Autenticar",
-	"Authentication": "",
+	"Authentication": "Autenticació",
 	"Auto-Copy Response to Clipboard": "Copiar la resposta automàticament al porta-retalls",
 	"Auto-playback response": "Reproduir la resposta automàticament",
 	"Autocomplete Generation": "Generació automàtica",
@@ -124,11 +126,12 @@
 	"Beta": "Beta",
 	"Bing Search V7 Endpoint": "Punt de connexió a Bing Search V7",
 	"Bing Search V7 Subscription Key": "Clau de subscripció a Bing Search V7",
-	"Bocha Search API Key": "",
+	"Bocha Search API Key": "Clau API de Bocha Search",
 	"Brave Search API Key": "Clau API de Brave Search",
 	"By {{name}}": "Per {{name}}",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Desactivar la verificació SSL per a l'accés a Internet",
-	"Calendar": "",
+	"Calendar": "Calendari",
 	"Call": "Trucada",
 	"Call feature is not supported when using Web STT engine": "La funció de trucada no s'admet quan s'utilitza el motor Web STT",
 	"Camera": "Càmera",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Comprovant actualitzacions...",
 	"Choose a model before saving...": "Triar un model abans de desar...",
 	"Chunk Overlap": "Solapament de blocs",
-	"Chunk Params": "Paràmetres dels blocs",
 	"Chunk Size": "Mida del bloc",
 	"Ciphers": "Xifradors",
 	"Citation": "Cita",
 	"Clear memory": "Esborrar la memòria",
+	"Clear Memory": "",
 	"click here": "prem aquí",
 	"Click here for filter guides.": "Clica aquí per filtrar les guies.",
 	"Click here for help.": "Clica aquí per obtenir ajuda.",
@@ -180,13 +183,13 @@
 	"Clone of {{TITLE}}": "Clon de {{TITLE}}",
 	"Close": "Tancar",
 	"Code execution": "Execució de codi",
-	"Code Execution": "",
-	"Code Execution Engine": "",
-	"Code Execution Timeout": "",
+	"Code Execution": "Excució de Codi",
+	"Code Execution Engine": "Motor d'execució de codi",
+	"Code Execution Timeout": "Temps màxim d'execució de codi",
 	"Code formatted successfully": "Codi formatat correctament",
 	"Code Interpreter": "Intèrpret de codi",
-	"Code Interpreter Engine": "",
-	"Code Interpreter Prompt Template": "",
+	"Code Interpreter Engine": "Motor de l'intèrpret de codi",
+	"Code Interpreter Prompt Template": "Plantilla de la indicació de l'intèrpret de codi",
 	"Collection": "Col·lecció",
 	"Color": "Color",
 	"ComfyUI": "ComfyUI",
@@ -203,19 +206,19 @@
 	"Confirm Password": "Confirmar la contrasenya",
 	"Confirm your action": "Confirma la teva acció",
 	"Confirm your new password": "Confirma la teva nova contrasenya",
-	"Connect to your own OpenAI compatible API endpoints.": "",
+	"Connect to your own OpenAI compatible API endpoints.": "Connecta als teus propis punts de connexió de l'API compatible amb OpenAI",
 	"Connections": "Connexions",
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Restringeix l'esforç de raonament dels models de raonament. Només aplicable a models de raonament de proveïdors específics que donen suport a l'esforç de raonament. (Per defecte: mitjà)",
 	"Contact Admin for WebUI Access": "Posat en contacte amb l'administrador per accedir a WebUI",
 	"Content": "Contingut",
-	"Content Extraction": "Extracció de contingut",
+	"Content Extraction Engine": "",
 	"Context Length": "Mida del context",
 	"Continue Response": "Continuar la resposta",
 	"Continue with {{provider}}": "Continuar amb {{provider}}",
 	"Continue with Email": "Continuar amb el correu",
 	"Continue with LDAP": "Continuar amb LDAP",
 	"Control how message text is split for TTS requests. 'Punctuation' splits into sentences, 'paragraphs' splits into paragraphs, and 'none' keeps the message as a single string.": "Controlar com es divideix el text del missatge per a les sol·licituds TTS. 'Puntuació' divideix en frases, 'paràgrafs' divideix en paràgrafs i 'cap' manté el missatge com una cadena única.",
-	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "",
+	"Control the repetition of token sequences in the generated text. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 1.1) will be more lenient. At 1, it is disabled. (Default: 1.1)": "Controlar la repetició de seqüències de tokens en el text generat. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 1,1) serà més indulgent. A l'1, està desactivat. (Per defecte: 1.1)",
 	"Controls": "Controls",
 	"Controls the balance between coherence and diversity of the output. A lower value will result in more focused and coherent text. (Default: 5.0)": "Controlar l'equilibri entre la coherència i la diversitat de la sortida. Un valor més baix donarà lloc a un text més enfocat i coherent. (Per defecte: 5.0)",
 	"Copied": "Copiat",
@@ -227,7 +230,7 @@
 	"Copy Link": "Copiar l'enllaç",
 	"Copy to clipboard": "Copiar al porta-retalls",
 	"Copying to clipboard was successful!": "La còpia al porta-retalls s'ha realitzat correctament",
-	"CORS must be properly configured by the provider to allow requests from Open WebUI.": "",
+	"CORS must be properly configured by the provider to allow requests from Open WebUI.": "CORS ha de ser configurat correctament pel proveïdor per permetre les sol·licituds d'Open WebUI",
 	"Create": "Crear",
 	"Create a knowledge base": "Crear una base de coneixement",
 	"Create a model": "Crear un model",
@@ -245,6 +248,7 @@
 	"Current Model": "Model actual",
 	"Current Password": "Contrasenya actual",
 	"Custom": "Personalitzat",
+	"Danger Zone": "",
 	"Dark": "Fosc",
 	"Database": "Base de dades",
 	"December": "Desembre",
@@ -271,7 +275,7 @@
 	"Delete folder?": "Eliminar la carpeta?",
 	"Delete function?": "Eliminar funció?",
 	"Delete Message": "Eleiminar el missatge",
-	"Delete message?": "",
+	"Delete message?": "Eliminar el missatge?",
 	"Delete prompt?": "Eliminar indicació?",
 	"delete this link": "Eliminar aquest enllaç",
 	"Delete tool?": "Eliminar eina?",
@@ -282,15 +286,15 @@
 	"Describe your knowledge base and objectives": "Descriu la teva base de coneixement i objectius",
 	"Description": "Descripció",
 	"Didn't fully follow instructions": "No s'han seguit les instruccions completament",
-	"Direct Connections": "",
-	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "",
-	"Direct Connections settings updated": "",
+	"Direct Connections": "Connexions directes",
+	"Direct Connections allow users to connect to their own OpenAI compatible API endpoints.": "Les connexions directes permeten als usuaris connectar-se als seus propis endpoints d'API compatibles amb OpenAI.",
+	"Direct Connections settings updated": "Configuració de les connexions directes actualitzada",
 	"Disabled": "Deshabilitat",
 	"Discover a function": "Descobrir una funció",
 	"Discover a model": "Descobrir un model",
 	"Discover a prompt": "Descobrir una indicació",
 	"Discover a tool": "Descobrir una eina",
-	"Discover how to use Open WebUI and seek support from the community.": "",
+	"Discover how to use Open WebUI and seek support from the community.": "Descobreix com utilitzar Open WebUI i demana suport a la comunitat.",
 	"Discover wonders": "Descobrir meravelles",
 	"Discover, download, and explore custom functions": "Descobrir, descarregar i explorar funcions personalitzades",
 	"Discover, download, and explore custom prompts": "Descobrir, descarregar i explorar indicacions personalitzades",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "No instal·lis funcions de fonts en què no confiïs plenament.",
 	"Do not install tools from sources you do not fully trust.": "No instal·lis eines de fonts en què no confiïs plenament.",
 	"Document": "Document",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Documentació",
 	"Documents": "Documents",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "no realitza connexions externes, i les teves dades romanen segures al teu servidor allotjat localment.",
@@ -315,14 +321,14 @@
 	"Don't like the style": "No t'agrada l'estil?",
 	"Done": "Fet",
 	"Download": "Descarregar",
-	"Download as SVG": "",
+	"Download as SVG": "Descarrega com a SVG",
 	"Download canceled": "Descàrrega cancel·lada",
 	"Download Database": "Descarregar la base de dades",
 	"Drag and drop a file to upload or select a file to view": "Arrossegar un arxiu per pujar o escull un arxiu a veure",
 	"Draw": "Dibuixar",
 	"Drop any files here to add to the conversation": "Deixa qualsevol arxiu aquí per afegir-lo a la conversa",
 	"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "p. ex. '30s','10m'. Les unitats de temps vàlides són 's', 'm', 'h'.",
-	"e.g. 60": "",
+	"e.g. 60": "p. ex. 60",
 	"e.g. A filter to remove profanity from text": "p. ex. Un filtre per eliminar paraules malsonants del text",
 	"e.g. My Filter": "p. ex. El meu filtre",
 	"e.g. My Tools": "p. ex. Les meves eines",
@@ -340,23 +346,21 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "Correu electrònic",
 	"Embark on adventures": "Embarcar en aventures",
+	"Embedding": "",
 	"Embedding Batch Size": "Mida del lot d'incrustació",
 	"Embedding Model": "Model d'incrustació",
 	"Embedding Model Engine": "Motor de model d'incrustació",
 	"Embedding model set to \"{{embedding_model}}\"": "Model d'incrustació configurat a \"{{embedding_model}}\"",
 	"Enable API Key": "Activar la Clau API",
 	"Enable autocomplete generation for chat messages": "Activar la generació automàtica per als missatges del xat",
-	"Enable Code Interpreter": "",
+	"Enable Code Interpreter": "Activar l'intèrpret de codi",
 	"Enable Community Sharing": "Activar l'ús compartit amb la comunitat",
-	"Enable Google Drive": "Activar Google Drive",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activar el bloqueig de memòria (mlock) per evitar que les dades del model s'intercanviïn fora de la memòria RAM. Aquesta opció bloqueja el conjunt de pàgines de treball del model a la memòria RAM, assegurant-se que no s'intercanviaran al disc. Això pot ajudar a mantenir el rendiment evitant errors de pàgina i garantint un accés ràpid a les dades.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Activar l'assignació de memòria (mmap) per carregar les dades del model. Aquesta opció permet que el sistema utilitzi l'emmagatzematge en disc com a extensió de la memòria RAM tractant els fitxers de disc com si estiguessin a la memòria RAM. Això pot millorar el rendiment del model permetent un accés més ràpid a les dades. Tanmateix, és possible que no funcioni correctament amb tots els sistemes i pot consumir una quantitat important d'espai en disc.",
 	"Enable Message Rating": "Permetre la qualificació de missatges",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Activar el mostreig de Mirostat per controlar la perplexitat. (Per defecte: 0, 0 = Inhabilitat, 1 = Mirostat, 2 = Mirostat 2.0)",
 	"Enable New Sign Ups": "Permetre nous registres",
-	"Enable Web Search": "Activar la cerca web",
 	"Enabled": "Habilitat",
-	"Engine": "Motor",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Assegura't que els teus fitxers CSV inclouen 4 columnes en aquest ordre: Nom, Correu electrònic, Contrasenya, Rol.",
 	"Enter {{role}} message here": "Introdueix aquí el missatge de {{role}}",
 	"Enter a detail about yourself for your LLMs to recall": "Introdueix un detall sobre tu què els teus models de llenguatge puguin recordar",
@@ -365,13 +369,15 @@
 	"Enter Application DN Password": "Introdueix la contrasenya del DN d'aplicació",
 	"Enter Bing Search V7 Endpoint": "Introdueix el punt de connexió de Bing Search V7",
 	"Enter Bing Search V7 Subscription Key": "Introdueix la clau de subscripció de Bing Search V7",
-	"Enter Bocha Search API Key": "",
+	"Enter Bocha Search API Key": "Introdueix la clau API de Bocha Search",
 	"Enter Brave Search API Key": "Introdueix la clau API de Brave Search",
 	"Enter certificate path": "Introdueix el camí del certificat",
 	"Enter CFG Scale (e.g. 7.0)": "Entra l'escala CFG (p.ex. 7.0)",
 	"Enter Chunk Overlap": "Introdueix la mida de solapament de blocs",
 	"Enter Chunk Size": "Introdueix la mida del bloc",
 	"Enter description": "Introdueix la descripció",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Introdueix els dominis separats per comes (p. ex. example.com,site.org)",
 	"Enter Exa API Key": "Introdueix la clau API de d'EXA",
 	"Enter Github Raw URL": "Introdueix l'URL en brut de Github",
@@ -379,9 +385,9 @@
 	"Enter Google PSE Engine Id": "Introdueix l'identificador del motor PSE de Google",
 	"Enter Image Size (e.g. 512x512)": "Introdueix la mida de la imatge (p. ex. 512x512)",
 	"Enter Jina API Key": "Introdueix la clau API de Jina",
-	"Enter Jupyter Password": "",
-	"Enter Jupyter Token": "",
-	"Enter Jupyter URL": "",
+	"Enter Jupyter Password": "Introdueix la contrasenya de Jupyter",
+	"Enter Jupyter Token": "Introdueix el token de Jupyter",
+	"Enter Jupyter URL": "Introdueix la URL de Jupyter",
 	"Enter Kagi Search API Key": "Introdueix la clau API de Kagi Search",
 	"Enter language codes": "Introdueix els codis de llenguatge",
 	"Enter Model ID": "Introdueix l'identificador del model",
@@ -397,8 +403,8 @@
 	"Enter SearchApi Engine": "Introdueix el motor SearchApi",
 	"Enter Searxng Query URL": "Introdueix l'URL de consulta de Searxng",
 	"Enter Seed": "Introdueix la llavor",
-	"Enter SerpApi API Key": "",
-	"Enter SerpApi Engine": "",
+	"Enter SerpApi API Key": "Introdueix la clau API SerpApi",
+	"Enter SerpApi Engine": "Introdueix el motor API SerpApi",
 	"Enter Serper API Key": "Introdueix la clau API Serper",
 	"Enter Serply API Key": "Introdueix la clau API Serply",
 	"Enter Serpstack API Key": "Introdueix la clau API Serpstack",
@@ -410,7 +416,7 @@
 	"Enter Tavily API Key": "Introdueix la clau API de Tavily",
 	"Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Entra la URL pública de WebUI. Aquesta URL s'utilitzarà per generar els enllaços en les notificacions.",
 	"Enter Tika Server URL": "Introdueix l'URL del servidor Tika",
-	"Enter timeout in seconds": "",
+	"Enter timeout in seconds": "Entra el temps màxim en segons",
 	"Enter Top K": "Introdueix Top K",
 	"Enter URL (e.g. http://127.0.0.1:7860/)": "Introdueix l'URL (p. ex. http://127.0.0.1:7860/)",
 	"Enter URL (e.g. http://localhost:11434)": "Introdueix l'URL (p. ex. http://localhost:11434)",
@@ -458,7 +464,7 @@
 	"Failed to save models configuration": "No s'ha pogut desar la configuració dels models",
 	"Failed to update settings": "No s'han pogut actualitzar les preferències",
 	"Failed to upload file.": "No s'ha pogut pujar l'arxiu.",
-	"Features": "",
+	"Features": "Característiques",
 	"Features Permissions": "Permisos de les característiques",
 	"February": "Febrer",
 	"Feedback History": "Històric de comentaris",
@@ -488,7 +494,7 @@
 	"Form": "Formulari",
 	"Format your variables using brackets like this:": "Formata les teves variables utilitzant claudàtors així:",
 	"Frequency Penalty": "Penalització per freqüència",
-	"Full Context Mode": "",
+	"Full Context Mode": "Mode de context complert",
 	"Function": "Funció",
 	"Function Calling": "Crida a funcions",
 	"Function created successfully": "La funció s'ha creat correctament",
@@ -503,13 +509,13 @@
 	"Functions allow arbitrary code execution": "Les funcions permeten l'execució de codi arbitrari",
 	"Functions allow arbitrary code execution.": "Les funcions permeten l'execució de codi arbitrari.",
 	"Functions imported successfully": "Les funcions s'han importat correctament",
-	"Gemini": "",
-	"Gemini API Config": "",
-	"Gemini API Key is required.": "",
+	"Gemini": "Gemini",
+	"Gemini API Config": "Configuració de Gemini API",
+	"Gemini API Key is required.": "La clau API de Gemini és necessària",
 	"General": "General",
-	"General Settings": "Preferències generals",
 	"Generate an image": "Generar una imatge",
 	"Generate Image": "Generar imatge",
+	"Generate prompt pair": "",
 	"Generating search query": "Generant consulta",
 	"Get started": "Començar",
 	"Get started with {{WEBUI_NAME}}": "Començar amb {{WEBUI_NAME}}",
@@ -532,7 +538,7 @@
 	"Hex Color": "Color hexadecimal",
 	"Hex Color - Leave empty for default color": "Color hexadecimal - Deixar buit per a color per defecte",
 	"Hide": "Amaga",
-	"Home": "",
+	"Home": "Inici",
 	"Host": "Servidor",
 	"How can I help you today?": "Com et puc ajudar avui?",
 	"How would you rate this response?": "Com avaluaries aquesta resposta?",
@@ -565,6 +571,7 @@
 	"Input commands": "Entra comandes",
 	"Install from Github URL": "Instal·lar des de l'URL de Github",
 	"Instant Auto-Send After Voice Transcription": "Enviament automàtic després de la transcripció de veu",
+	"Integration": "",
 	"Interface": "Interfície",
 	"Invalid file format.": "Format d'arxiu no vàlid.",
 	"Invalid Tag": "Etiqueta no vàlida",
@@ -576,8 +583,8 @@
 	"JSON Preview": "Vista prèvia del document JSON",
 	"July": "Juliol",
 	"June": "Juny",
-	"Jupyter Auth": "",
-	"Jupyter URL": "",
+	"Jupyter Auth": "Autenticació Jupyter",
+	"Jupyter URL": "URL de Jupyter",
 	"JWT Expiration": "Caducitat del JWT",
 	"JWT Token": "Token JWT",
 	"Kagi Search API Key": "Clau API de Kagi Search",
@@ -607,22 +614,23 @@
 	"Leave empty to include all models or select specific models": "Deixa-ho en blanc per incloure tots els models o selecciona models específics",
 	"Leave empty to use the default prompt, or enter a custom prompt": "Deixa-ho en blanc per utilitzar la indicació predeterminada o introdueix una indicació personalitzada",
 	"Leave model field empty to use the default model.": "Deixa el camp de model buit per utilitzar el model per defecte.",
-	"License": "",
+	"License": "Llicència",
 	"Light": "Clar",
 	"Listening...": "Escoltant...",
 	"Llama.cpp": "Llama.cpp",
 	"LLMs can make mistakes. Verify important information.": "Els models de llenguatge poden cometre errors. Verifica la informació important.",
-	"Loading Kokoro.js...": "",
+	"Loader": "",
+	"Loading Kokoro.js...": "Carregant Kokoro.js",
 	"Local": "Local",
 	"Local Models": "Models locals",
+	"Location access not allowed": "",
 	"Lost": "Perdut",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Creat per la Comunitat OpenWebUI",
 	"Make sure to enclose them with": "Assegura't d'envoltar-los amb",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Assegura't d'exportar un fitxer workflow.json com a format API des de ComfyUI.",
 	"Manage": "Gestionar",
-	"Manage Arena Models": "Gestionar els models de l'Arena",
-	"Manage Direct Connections": "",
+	"Manage Direct Connections": "Gestionar les connexions directes",
 	"Manage Models": "Gestionar els models",
 	"Manage Ollama": "Gestionar Ollama",
 	"Manage Ollama API Connections": "Gestionar les connexions a l'API d'Ollama",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "No s'ha trobat contingut HTML, CSS o JavaScript.",
 	"No inference engine with management support found": "No s'ha trobat un motor d'inferència amb suport de gestió",
 	"No knowledge found": "No s'ha trobat Coneixement",
+	"No memories to clear": "",
 	"No model IDs": "No hi ha IDs de model",
 	"No models found": "No s'han trobat models",
 	"No models selected": "No s'ha seleccionat cap model",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "La configuració de l'API d'Ollama s'ha actualitzat",
 	"Ollama Version": "Versió d'Ollama",
 	"On": "Activat",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "Només es permeten caràcters alfanumèrics i guions",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Només es permeten caràcters alfanumèrics i guions en la comanda.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Només es poden editar col·leccions, crea una nova base de coneixement per editar/afegir documents.",
@@ -766,7 +776,7 @@
 	"Plain text (.txt)": "Text pla (.txt)",
 	"Playground": "Zona de jocs",
 	"Please carefully review the following warnings:": "Si us plau, revisa els següents avisos amb cura:",
-	"Please do not close the settings page while loading the model.": "",
+	"Please do not close the settings page while loading the model.": "No tanquis la pàgina de configuració mentre carregues el model.",
 	"Please enter a prompt": "Si us plau, entra una indicació",
 	"Please fill in all fields.": "Emplena tots els camps, si us plau.",
 	"Please select a model first.": "Si us plau, selecciona un model primer",
@@ -776,7 +786,7 @@
 	"Positive attitude": "Actitud positiva",
 	"Prefix ID": "Identificador del prefix",
 	"Prefix ID is used to avoid conflicts with other connections by adding a prefix to the model IDs - leave empty to disable": "L'identificador de prefix s'utilitza per evitar conflictes amb altres connexions afegint un prefix als ID de model; deixa'l en blanc per desactivar-lo.",
-	"Presence Penalty": "",
+	"Presence Penalty": "Penalització de presència",
 	"Previous 30 days": "30 dies anteriors",
 	"Previous 7 days": "7 dies anteriors",
 	"Profile Image": "Imatge de perfil",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "Indicació actualitzada correctament",
 	"Prompts": "Indicacions",
 	"Prompts Access": "Accés a les indicacions",
-	"Proxy URL": "URL del proxy",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Obtenir \"{{searchValue}}\" de Ollama.com",
 	"Pull a model from Ollama.com": "Obtenir un model d'Ollama.com",
 	"Query Generation Prompt": "Indicació per a generació de consulta",
-	"Query Params": "Paràmetres de consulta",
 	"RAG Template": "Plantilla RAG",
 	"Rating": "Valoració",
 	"Re-rank models by topic similarity": "Reclassificar els models per similitud de temes",
@@ -813,7 +821,7 @@
 	"Rename": "Canviar el nom",
 	"Reorder Models": "Reordenar els models",
 	"Repeat Last N": "Repeteix els darrers N",
-	"Repeat Penalty (Ollama)": "",
+	"Repeat Penalty (Ollama)": "Penalització per repetició (Ollama)",
 	"Reply in Thread": "Respondre al fil",
 	"Request Mode": "Mode de sol·licitud",
 	"Reranking Model": "Model de reavaluació",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de resposta no es poden activar perquè els permisos del lloc web han estat rebutjats. Comprova les preferències del navegador per donar l'accés necessari.",
 	"Response splitting": "Divisió de la resposta",
 	"Result": "Resultat",
+	"Retrieval": "",
 	"Retrieval Query Generation": "Generació de consultes Retrieval",
 	"Rich Text Input for Chat": "Entrada de text ric per al xat",
 	"RK": "RK",
@@ -876,11 +885,10 @@
 	"Select a pipeline": "Seleccionar una Pipeline",
 	"Select a pipeline url": "Seleccionar l'URL d'una Pipeline",
 	"Select a tool": "Seleccionar una eina",
-	"Select an auth method": "",
+	"Select an auth method": "Seleccionar un mètode d'autenticació",
 	"Select an Ollama instance": "Seleccionar una instància d'Ollama",
 	"Select Engine": "Seleccionar el motor",
 	"Select Knowledge": "Seleccionar coneixement",
-	"Select model": "Seleccionar un model",
 	"Select only one model to call": "Seleccionar només un model per trucar",
 	"Selected model(s) do not support image inputs": "El(s) model(s) seleccionats no admeten l'entrada d'imatges",
 	"Semantic distance to query": "Distància semàntica a la pregunta",
@@ -889,8 +897,8 @@
 	"Send message": "Enviar missatge",
 	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Envia `stream_options: { include_usage: true }` a la sol·licitud.\nEls proveïdors compatibles retornaran la informació d'ús del token a la resposta quan s'estableixi.",
 	"September": "Setembre",
-	"SerpApi API Key": "",
-	"SerpApi Engine": "",
+	"SerpApi API Key": "Clau API de SerpApi",
+	"SerpApi Engine": "Motor de SerpApi",
 	"Serper API Key": "Clau API de Serper",
 	"Serply API Key": "Clau API de Serply",
 	"Serpstack API Key": "Clau API de Serpstack",
@@ -910,8 +918,8 @@
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Establir el nombre de fils de treball utilitzats per al càlcul. Aquesta opció controla quants fils s'utilitzen per processar les sol·licituds entrants simultàniament. Augmentar aquest valor pot millorar el rendiment amb càrregues de treball de concurrència elevada, però també pot consumir més recursos de CPU.",
 	"Set Voice": "Establir la veu",
 	"Set whisper model": "Establir el model whisper",
-	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "",
-	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "",
+	"Sets a flat bias against tokens that have appeared at least once. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 0)": "Estableix un biaix pla contra tokens que han aparegut almenys una vegada. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat. (Per defecte: 0)",
+	"Sets a scaling bias against tokens to penalize repetitions, based on how many times they have appeared. A higher value (e.g., 1.5) will penalize repetitions more strongly, while a lower value (e.g., 0.9) will be more lenient. At 0, it is disabled. (Default: 1.1)": "Estableix un biaix d'escala contra tokens per penalitzar les repeticions, en funció de quantes vegades han aparegut. Un valor més alt (p. ex., 1,5) penalitzarà les repeticions amb més força, mentre que un valor més baix (p. ex., 0,9) serà més indulgent. A 0, està desactivat. (Per defecte: 1.1)",
 	"Sets how far back for the model to look back to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)": "Establir fins a quin punt el model mira enrere per evitar la repetició. (Per defecte: 64, 0 = desactivat, -1 = num_ctx)",
 	"Sets the random number seed to use for generation. Setting this to a specific number will make the model generate the same text for the same prompt. (Default: random)": "Establir la llavor del nombre aleatori que s'utilitzarà per a la generació. Establir-ho a un número específic farà que el model generi el mateix text per a la mateixa sol·licitud. (Per defecte: aleatori)",
 	"Sets the size of the context window used to generate the next token. (Default: 2048)": "Estableix la mida de la finestra de context utilitzada per generar el següent token. (Per defecte: 2048)",
@@ -957,8 +965,9 @@
 	"Tags Generation": "Generació d'etiquetes",
 	"Tags Generation Prompt": "Indicació per a la generació d'etiquetes",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El mostreig sense cua s'utilitza per reduir l'impacte de tokens menys probables de la sortida. Un valor més alt (p. ex., 2,0) reduirà més l'impacte, mentre que un valor d'1,0 desactiva aquesta configuració. (per defecte: 1)",
+	"Talk to model": "",
 	"Tap to interrupt": "Prem per interrompre",
-	"Tasks": "",
+	"Tasks": "Tasques",
 	"Tavily API Key": "Clau API de Tavily",
 	"Tell us more:": "Dona'ns més informació:",
 	"Temperature": "Temperatura",
@@ -1005,7 +1014,7 @@
 	"Title (e.g. Tell me a fun fact)": "Títol (p. ex. Digues-me quelcom divertit)",
 	"Title Auto-Generation": "Generació automàtica de títol",
 	"Title cannot be an empty string.": "El títol no pot ser una cadena buida.",
-	"Title Generation": "",
+	"Title Generation": "Generació de títols",
 	"Title Generation Prompt": "Indicació de generació de títol",
 	"TLS": "TLS",
 	"To access the available model names for downloading,": "Per accedir als noms dels models disponibles per descarregar,",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "Transformadors",
 	"Trouble accessing Ollama?": "Problemes en accedir a Ollama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "Model TTS",
 	"TTS Settings": "Preferències de TTS",
 	"TTS Voice": "Veu TTS",
@@ -1062,7 +1072,7 @@
 	"Updated": "Actualitzat",
 	"Updated at": "Actualitzat el",
 	"Updated At": "Actualitzat el",
-	"Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "",
+	"Upgrade to a licensed plan for enhanced capabilities, including custom theming and branding, and dedicated support.": "Actualitzar a un pla amb llicència per obtenir capacitats millorades, com ara la temàtica personalitzada i la marca, i assistència dedicada.",
 	"Upload": "Pujar",
 	"Upload a GGUF model": "Pujar un model GGUF",
 	"Upload directory": "Pujar directori",
@@ -1101,10 +1111,9 @@
 	"Warning:": "Avís:",
 	"Warning: Enabling this will allow users to upload arbitrary code on the server.": "Avís: Habilitar això permetrà als usuaris penjar codi arbitrari al servidor.",
 	"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Avís: Si s'actualitza o es canvia el model d'incrustació, s'hauran de tornar a importar tots els documents.",
-	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
+	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Avís: l'execució de Jupyter permet l'execució de codi arbitrari, la qual cosa comporta greus riscos de seguretat; procediu amb extrema precaució.",
 	"Web": "Web",
 	"Web API": "Web API",
-	"Web Loader Settings": "Preferències del carregador web",
 	"Web Search": "Cerca la web",
 	"Web Search Engine": "Motor de cerca de la web",
 	"Web Search in Chat": "Cerca a internet al xat",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "El compte està actualment pendent d'activació",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Tota la teva contribució anirà directament al desenvolupador del complement; Open WebUI no se'n queda cap percentatge. Tanmateix, la plataforma de finançament escollida pot tenir les seves pròpies comissions.",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "Preferències del carregador de Youtube"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/ceb-PH/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "",
 	"a user": "usa ka user",
 	"About": "Mahitungod sa",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "",
 	"Archived Chats": "pagrekord sa chat",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "Ilakip ang usa ka file",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Pagtagad sa mga detalye",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Pagsusi alang sa mga update...",
 	"Choose a model before saving...": "Pagpili og template sa dili pa i-save...",
 	"Chunk Overlap": "Block overlap",
-	"Chunk Params": "Mga Setting sa Block",
 	"Chunk Size": "Gidak-on sa block",
 	"Ciphers": "",
 	"Citation": "Mga kinutlo",
 	"Clear memory": "",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "I-klik dinhi alang sa tabang.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "Kontento",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "Ang gitas-on sa konteksto",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
@@ -245,6 +248,7 @@
 	"Current Model": "Kasamtangang modelo",
 	"Current Password": "Kasamtangang Password",
 	"Custom": "Custom",
+	"Danger Zone": "",
 	"Dark": "Ngitngit",
 	"Database": "Database",
 	"December": "",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "Dokumento",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "Mga dokumento",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "wala maghimo ug eksternal nga koneksyon, ug ang imong data nagpabiling luwas sa imong lokal nga host server.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "E-mail",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "",
 	"Embedding Model Engine": "",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "I-enable ang bag-ong mga rehistro",
-	"Enable Web Search": "",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
 	"Enter {{role}} message here": "Pagsulod sa mensahe {{role}} dinhi",
 	"Enter a detail about yourself for your LLMs to recall": "",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Pagsulod sa block overlap",
 	"Enter Chunk Size": "Isulod ang block size",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Heneral",
-	"General Settings": "kinatibuk-ang mga setting",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "Pagsulod sa input commands",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "Interface",
 	"Invalid file format.": "",
 	"Invalid Tag": "",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "Ang mga LLM mahimong masayop. ",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "",
 	"Made by Open WebUI Community": "Gihimo sa komunidad sa OpenWebUI",
 	"Make sure to enclose them with": "Siguruha nga palibutan sila",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "Ollama nga bersyon",
 	"On": "Gipaandar",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Ang alphanumeric nga mga karakter ug hyphen lang ang gitugotan sa command string.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "Mga aghat",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "Pagkuha ug template gikan sa Ollama.com",
 	"Query Generation Prompt": "",
-	"Query Params": "Mga parameter sa pangutana",
 	"RAG Template": "RAG nga modelo",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": "Pagpili og modelo",
 	"Select only one model to call": "",
 	"Selected model(s) do not support image inputs": "",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "Ibabaw nga P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "Adunay mga problema sa pag-access sa Ollama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "Mga Setting sa TTS",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web",
 	"Web API": "",
-	"Web Loader Settings": "",
 	"Web Search": "",
 	"Web Search Engine": "",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "",
-	"Youtube Loader Settings": ""
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/cs-CZ/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Model úloh se používá při provádění úloh, jako je generování názvů pro chaty a vyhledávací dotazy na webu.",
 	"a user": "uživatel",
 	"About": "O programu",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Přístup",
 	"Access Control": "",
 	"Accessible to all users": "Přístupné pro všecny uživatele",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Archivovat všechny chaty",
 	"Archived Chats": "Archivované chaty",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "Artefakty",
 	"Ask a question": "Zeptejte se na otázku",
 	"Assistant": "Ano, jak vám mohu pomoci?",
-	"Attach file": "Připojit soubor",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Pozornost k detailům",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Klíč API pro Brave Search",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Obcházení ověření SSL pro webové stránky",
 	"Calendar": "",
 	"Call": "Volání",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Kontrola aktualizací...",
 	"Choose a model before saving...": "Vyberte model před uložením...",
 	"Chunk Overlap": "",
-	"Chunk Params": "",
 	"Chunk Size": "",
 	"Ciphers": "",
 	"Citation": "Odkaz",
 	"Clear memory": "Vymazat paměť",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "Klikněte zde pro nápovědu.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "Kontaktujte administrátora pro přístup k webovému rozhraní.",
 	"Content": "Obsah",
-	"Content Extraction": "Extrahování obsahu",
+	"Content Extraction Engine": "",
 	"Context Length": "Délka kontextu",
 	"Continue Response": "Pokračovat v odpovědi",
 	"Continue with {{provider}}": "Pokračovat s {{provider}}",
@@ -245,6 +248,7 @@
 	"Current Model": "Aktuální model",
 	"Current Password": "Aktuální heslo",
 	"Custom": "Na míru",
+	"Danger Zone": "",
 	"Dark": "Tmavý",
 	"Database": "Databáze",
 	"December": "Prosinec",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "Neinstalujte funkce ze zdrojů, kterým plně nedůvěřujete.",
 	"Do not install tools from sources you do not fully trust.": "Neinstalujte nástroje ze zdrojů, kterým plně nedůvěřujete.",
 	"Document": "Dokument",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Dokumentace",
 	"Documents": "Dokumenty",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "nevytváří žádná externí připojení a vaše data zůstávají bezpečně na vašem lokálním serveru.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "E-mail",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "Vkládací model (Embedding Model)",
 	"Embedding Model Engine": "",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "Povolit sdílení komunity",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Povolit hodnocení zpráv",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "Povolit nové registrace",
-	"Enable Web Search": "Povolit webové vyhledávání",
 	"Enabled": "Povoleno",
-	"Engine": "Engine",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Ujistěte se, že váš CSV soubor obsahuje 4 sloupce v tomto pořadí: Name, Email, Password, Role.",
 	"Enter {{role}} message here": "Zadejte zprávu {{role}} sem",
 	"Enter a detail about yourself for your LLMs to recall": "Zadejte podrobnost o sobě, kterou si vaše LLM mají pamatovat.",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Zadejte překryv části",
 	"Enter Chunk Size": "Zadejte velikost bloku",
 	"Enter description": "Zadejte popis",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "Zadejte URL adresu Github Raw",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Obecný",
-	"General Settings": "Obecná nastavení",
 	"Generate an image": "",
 	"Generate Image": "Vygenerovat obrázek",
+	"Generate prompt pair": "",
 	"Generating search query": "Generování vyhledávacího dotazu",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "Vstupní příkazy",
 	"Install from Github URL": "Instalace z URL adresy Githubu",
 	"Instant Auto-Send After Voice Transcription": "Okamžité automatické odeslání po přepisu hlasu",
+	"Integration": "",
 	"Interface": "Rozhraní",
 	"Invalid file format.": "Neplatný formát souboru.",
 	"Invalid Tag": "Neplatný tag",
@@ -612,16 +619,17 @@
 	"Listening...": "Poslouchání...",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "LLM mohou dělat chyby. Ověřte si důležité informace.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "Lokální modely",
+	"Location access not allowed": "",
 	"Lost": "Ztracený",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Vytvořeno komunitou OpenWebUI",
 	"Make sure to enclose them with": "Ujistěte se, že jsou uzavřeny pomocí",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Ujistěte se, že exportujete soubor workflow.json ve formátu API z ComfyUI.",
 	"Manage": "Spravovat",
-	"Manage Arena Models": "Správa modelů v Arena",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "Nebyl nalezen žádný obsah HTML, CSS ani JavaScriptu.",
 	"No inference engine with management support found": "",
 	"No knowledge found": "Nebyly nalezeny žádné znalosti",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "Nebyly nalezeny žádné modely",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "Verze Ollama",
 	"On": "Na",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Příkazový řetězec smí obsahovat pouze alfanumerické znaky a pomlčky.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Pouze kolekce mohou být upravovány, pro úpravu/přidání dokumentů vytvořte novou znalostní bázi.",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "Prompty",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Stáhněte \"{{searchValue}}\" z Ollama.com",
 	"Pull a model from Ollama.com": "Stáhněte model z Ollama.com",
 	"Query Generation Prompt": "",
-	"Query Params": "Parametry dotazu",
 	"RAG Template": "Šablona RAG",
 	"Rating": "Hodnocení",
 	"Re-rank models by topic similarity": "Znovu seřaďte modely podle podobnosti témat.",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Oznámení o odpovědích nelze aktivovat, protože oprávnění webu byla zamítnuta. Navštivte nastavení svého prohlížeče a udělte potřebný přístup.",
 	"Response splitting": "Rozdělení odpovědi",
 	"Result": "Výsledek",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "Vstup pro chat ve formátu Rich Text",
 	"RK": "RK",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "Vyberte engine",
 	"Select Knowledge": "Vybrat znalosti",
-	"Select model": "Vyberte model",
 	"Select only one model to call": "Vyberte pouze jeden model, který chcete použít",
 	"Selected model(s) do not support image inputs": "Vybraný(é) model(y) nepodporují vstupy v podobě obrázků.",
 	"Semantic distance to query": "Semantická vzdálenost k dotazu",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Prompt pro generování značek",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "Klepněte pro přerušení",
 	"Tasks": "",
 	"Tavily API Key": "Klíč API pro Tavily",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "Máte potíže s přístupem k Ollama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "Model převodu textu na řeč (TTS)",
 	"TTS Settings": "Nastavení TTS (Text-to-Speech)",
 	"TTS Voice": "TTS hlas",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web",
 	"Web API": "Webové API",
-	"Web Loader Settings": "Nastavení Web Loaderu",
 	"Web Search": "Vyhledávání na webu",
 	"Web Search Engine": "Webový vyhledávač",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Stav vašeho účtu je nyní čekající na aktivaci.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Váš celý příspěvek půjde přímo vývojáři pluginu; Open WebUI si nebere žádné procento. Vybraná platforma pro financování však může mít vlastní poplatky.",
 	"Youtube": "YouTube",
-	"Youtube Loader Settings": "Nastavení YouTube loaderu"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/da-DK/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "En 'task model' bliver brugt til at opgaver såsom at generere overskrifter til chats eller internetsøgninger",
 	"a user": "en bruger",
 	"About": "Information",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Arkiver alle chats",
 	"Archived Chats": "Arkiverede chats",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "Artifakter",
 	"Ask a question": "Stil et spørgsmål",
 	"Assistant": "",
-	"Attach file": "Vedhæft fil",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Detajleorientering",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Brave Search API nøgle",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Forbigå SSL verifikation på websider",
 	"Calendar": "",
 	"Call": "Opkald",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Søger efter opdateringer",
 	"Choose a model before saving...": "Vælg en model før du gemmer",
 	"Chunk Overlap": "Chunk overlap",
-	"Chunk Params": "Chunk parametre",
 	"Chunk Size": "Chunk størrelse",
 	"Ciphers": "",
 	"Citation": "Citat",
 	"Clear memory": "Slet hukommelse",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "Klik her for hjælp",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "Kontakt din administrator for adgang til WebUI",
 	"Content": "Indhold",
-	"Content Extraction": "Udtræk af indhold",
+	"Content Extraction Engine": "",
 	"Context Length": "Kontekst længde",
 	"Continue Response": "Fortsæt svar",
 	"Continue with {{provider}}": "Fortsæt med {{provider}}",
@@ -245,6 +248,7 @@
 	"Current Model": "Nuværende model",
 	"Current Password": "Nuværende password",
 	"Custom": "Custom",
+	"Danger Zone": "",
 	"Dark": "Mørk",
 	"Database": "Database",
 	"December": "december",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "Lad være med at installere funktioner fra kilder, som du ikke stoler på.",
 	"Do not install tools from sources you do not fully trust.": "Lad være med at installere værktøjer fra kilder, som du ikke stoler på.",
 	"Document": "Dokument",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Dokumentation",
 	"Documents": "Dokumenter",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "laver ikke eksterne kald, og din data bliver sikkert på din egen lokalt hostede server.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "Email",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "Embedding Batch størrelse",
 	"Embedding Model": "Embedding Model",
 	"Embedding Model Engine": "Embedding Model engine",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "Aktiver deling til Community",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "Aktiver rating af besked",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "Aktiver nye signups",
-	"Enable Web Search": "Aktiver websøgning",
 	"Enabled": "Aktiveret",
-	"Engine": "engine",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Sørg for at din CSV-fil indeholder 4 kolonner in denne rækkefølge: Name, Email, Password, Role.",
 	"Enter {{role}} message here": "Indtast {{role}} besked her",
 	"Enter a detail about yourself for your LLMs to recall": "Indtast en detalje om dig selv, som dine LLMs kan huske",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Indtast overlapning af tekststykker",
 	"Enter Chunk Size": "Indtast størrelse af tekststykker",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "Indtast Github Raw URL",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Generelt",
-	"General Settings": "Generelle indstillinger",
 	"Generate an image": "",
 	"Generate Image": "Generer billede",
+	"Generate prompt pair": "",
 	"Generating search query": "Genererer søgeforespørgsel",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "Inputkommandoer",
 	"Install from Github URL": "Installer fra Github URL",
 	"Instant Auto-Send After Voice Transcription": "Øjeblikkelig automatisk afsendelse efter stemmetransskription",
+	"Integration": "",
 	"Interface": "Grænseflade",
 	"Invalid file format.": "",
 	"Invalid Tag": "Ugyldigt tag",
@@ -612,16 +619,17 @@
 	"Listening...": "Lytter...",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "LLM'er kan lave fejl. Bekræft vigtige oplysninger.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "Lokale modeller",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Lavet af OpenWebUI Community",
 	"Make sure to enclose them with": "Sørg for at omslutte dem med",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Sørg for at eksportere en workflow.json-fil som API-format fra ComfyUI.",
 	"Manage": "Administrer",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "Intet HTML-, CSS- eller JavaScript-indhold fundet.",
 	"No inference engine with management support found": "",
 	"No knowledge found": "Ingen viden fundet",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "Ollama-version",
 	"On": "Til",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Kun alfanumeriske tegn og bindestreger er tilladt i kommandostrengen.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Kun samlinger kan redigeres, opret en ny vidensbase for at redigere/tilføje dokumenter.",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "Prompts",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Hent \"{{searchValue}}\" fra Ollama.com",
 	"Pull a model from Ollama.com": "Hent en model fra Ollama.com",
 	"Query Generation Prompt": "",
-	"Query Params": "Forespørgselsparametre",
 	"RAG Template": "RAG-skabelon",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Svarnotifikationer kan ikke aktiveres, da webstedets tilladelser er blevet nægtet. Besøg dine browserindstillinger for at give den nødvendige adgang.",
 	"Response splitting": "Svaropdeling",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "Vælg engine",
 	"Select Knowledge": "Vælg viden",
-	"Select model": "Vælg model",
 	"Select only one model to call": "Vælg kun én model at kalde",
 	"Selected model(s) do not support image inputs": "Valgte model(ler) understøtter ikke billedinput",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "Tryk for at afbryde",
 	"Tasks": "",
 	"Tavily API Key": "Tavily API-nøgle",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "Problemer med at få adgang til Ollama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "TTS-model",
 	"TTS Settings": "TTS-indstillinger",
 	"TTS Voice": "TTS-stemme",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web",
 	"Web API": "Web API",
-	"Web Loader Settings": "Web Loader-indstillinger",
 	"Web Search": "Websøgning",
 	"Web Search Engine": "Websøgemaskine",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Din kontostatus afventer i øjeblikket aktivering.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Hele dit bidrag går direkte til plugin-udvikleren; Open WebUI tager ikke nogen procentdel. Den valgte finansieringsplatform kan dog have sine egne gebyrer.",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "Youtube Loader-indstillinger"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/de-DE/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Aufgabenmodelle können Unterhaltungstitel oder Websuchanfragen generieren.",
 	"a user": "ein Benutzer",
 	"About": "Über",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Zugang",
 	"Access Control": "Zugangskontrolle",
 	"Accessible to all users": "Für alle Benutzer zugänglich",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Alle Unterhaltungen archivieren",
 	"Archived Chats": "Archivierte Unterhaltungen",
 	"archived-chat-export": "archivierter-chat-export",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "Sind Sie sicher, dass Sie diesen Kanal löschen möchten?",
 	"Are you sure you want to delete this message?": "Sind Sie sicher, dass Sie diese Nachricht löschen möchten?",
 	"Are you sure you want to unarchive all archived chats?": "Sind Sie sicher, dass Sie alle archivierten Unterhaltungen wiederherstellen möchten?",
@@ -93,7 +95,7 @@
 	"Artifacts": "Artefakte",
 	"Ask a question": "Stellen Sie eine Frage",
 	"Assistant": "Assistent",
-	"Attach file": "Datei anhängen",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Aufmerksamkeit für Details",
 	"Attribute for Mail": "Attribut für E-Mail",
 	"Attribute for Username": "Attribut für Benutzername",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Brave Search API-Schlüssel",
 	"By {{name}}": "Von {{name}}",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "SSL-Überprüfung für Webseiten umgehen",
 	"Calendar": "",
 	"Call": "Anrufen",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Sucht nach Updates...",
 	"Choose a model before saving...": "Wählen Sie ein Modell, bevor Sie speichern...",
 	"Chunk Overlap": "Blocküberlappung",
-	"Chunk Params": "Blockparameter",
 	"Chunk Size": "Blockgröße",
 	"Ciphers": "Verschlüsselungen",
 	"Citation": "Zitate",
 	"Clear memory": "Alle Erinnerungen entfernen",
+	"Clear Memory": "",
 	"click here": "hier klicken",
 	"Click here for filter guides.": "Klicken Sie hier für Filteranleitungen.",
 	"Click here for help.": "Klicken Sie hier für Hilfe.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Beschränkt den Aufwand für das Schlussfolgern bei Schlussfolgerungsmodellen. Nur anwendbar auf Schlussfolgerungsmodelle von spezifischen Anbietern, die den Schlussfolgerungsaufwand unterstützen. (Standard: medium)",
 	"Contact Admin for WebUI Access": "Kontaktieren Sie den Administrator für den Zugriff auf die Weboberfläche",
 	"Content": "Info",
-	"Content Extraction": "Inhaltsextraktion",
+	"Content Extraction Engine": "",
 	"Context Length": "Kontextlänge",
 	"Continue Response": "Antwort fortsetzen",
 	"Continue with {{provider}}": "Mit {{provider}} fortfahren",
@@ -245,6 +248,7 @@
 	"Current Model": "Aktuelles Modell",
 	"Current Password": "Aktuelles Passwort",
 	"Custom": "Benutzerdefiniert",
+	"Danger Zone": "",
 	"Dark": "Dunkel",
 	"Database": "Datenbank",
 	"December": "Dezember",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "Installieren Sie keine Funktionen aus Quellen, denen Sie nicht vollständig vertrauen.",
 	"Do not install tools from sources you do not fully trust.": "Installieren Sie keine Werkzeuge aus Quellen, denen Sie nicht vollständig vertrauen.",
 	"Document": "Dokument",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Dokumentation",
 	"Documents": "Dokumente",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "stellt keine externen Verbindungen her, und Ihre Daten bleiben sicher auf Ihrem lokal gehosteten Server.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "E-Mail",
 	"Embark on adventures": "Abenteuer erleben",
+	"Embedding": "",
 	"Embedding Batch Size": "Embedding-Stapelgröße",
 	"Embedding Model": "Embedding-Modell",
 	"Embedding Model Engine": "Embedding-Modell-Engine",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "Automatische Vervollständigung für Chat-Nachrichten aktivieren",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "Community-Freigabe aktivieren",
-	"Enable Google Drive": "Google Drive aktivieren",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Aktiviere Memory Locking (mlock), um zu verhindern, dass Modelldaten aus dem RAM ausgelagert werden. Diese Option sperrt die Arbeitsseiten des Modells im RAM, um sicherzustellen, dass sie nicht auf die Festplatte ausgelagert werden. Dies kann die Leistung verbessern, indem Page Faults vermieden und ein schneller Datenzugriff sichergestellt werden.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Aktiviere Memory Mapping (mmap), um Modelldaten zu laden. Diese Option ermöglicht es dem System, den Festplattenspeicher als Erweiterung des RAM zu verwenden, indem Festplattendateien so behandelt werden, als ob sie im RAM wären. Dies kann die Modellleistung verbessern, indem ein schnellerer Datenzugriff ermöglicht wird. Es kann jedoch nicht auf allen Systemen korrekt funktionieren und einen erheblichen Teil des Festplattenspeichers beanspruchen.",
 	"Enable Message Rating": "Nachrichtenbewertung aktivieren",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Mirostat Sampling zur Steuerung der Perplexität aktivieren. (Standard: 0, 0 = Deaktiviert, 1 = Mirostat, 2 = Mirostat 2.0)",
 	"Enable New Sign Ups": "Registrierung erlauben",
-	"Enable Web Search": "Websuche aktivieren",
 	"Enabled": "Aktiviert",
-	"Engine": "Engine",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Stellen Sie sicher, dass Ihre CSV-Datei 4 Spalten in dieser Reihenfolge enthält: Name, E-Mail, Passwort, Rolle.",
 	"Enter {{role}} message here": "Geben Sie die {{role}}-Nachricht hier ein",
 	"Enter a detail about yourself for your LLMs to recall": "Geben Sie ein Detail über sich selbst ein, das Ihre Sprachmodelle (LLMs) sich merken sollen",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Geben Sie die Blocküberlappung ein",
 	"Enter Chunk Size": "Geben Sie die Blockgröße ein",
 	"Enter description": "Geben Sie eine Beschreibung ein",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "Geben Sie den Exa-API-Schlüssel ein",
 	"Enter Github Raw URL": "Geben Sie die Github Raw-URL ein",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Allgemein",
-	"General Settings": "Allgemeine Einstellungen",
 	"Generate an image": "Bild erzeugen",
 	"Generate Image": "Bild erzeugen",
+	"Generate prompt pair": "",
 	"Generating search query": "Suchanfrage wird erstellt",
 	"Get started": "Loslegen",
 	"Get started with {{WEBUI_NAME}}": "Loslegen mit {{WEBUI_NAME}}",
@@ -565,6 +571,7 @@
 	"Input commands": "Eingabebefehle",
 	"Install from Github URL": "Installiere von der Github-URL",
 	"Instant Auto-Send After Voice Transcription": "Spracherkennung direkt absenden",
+	"Integration": "",
 	"Interface": "Benutzeroberfläche",
 	"Invalid file format.": "Ungültiges Dateiformat.",
 	"Invalid Tag": "Ungültiger Tag",
@@ -612,16 +619,17 @@
 	"Listening...": "Höre zu...",
 	"Llama.cpp": "Llama.cpp",
 	"LLMs can make mistakes. Verify important information.": "LLMs können Fehler machen. Überprüfe wichtige Informationen.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "Lokal",
 	"Local Models": "Lokale Modelle",
+	"Location access not allowed": "",
 	"Lost": "Verloren",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Von der OpenWebUI-Community",
 	"Make sure to enclose them with": "Umschließe Variablen mit",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Stellen Sie sicher, dass sie eine workflow.json-Datei im API-Format von ComfyUI exportieren.",
 	"Manage": "Verwalten",
-	"Manage Arena Models": "Arena-Modelle verwalten",
 	"Manage Direct Connections": "",
 	"Manage Models": "Modelle verwalten",
 	"Manage Ollama": "Ollama verwalten",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "Keine HTML-, CSS- oder JavaScript-Inhalte gefunden.",
 	"No inference engine with management support found": "Keine Inferenz-Engine mit Management-Unterstützung gefunden",
 	"No knowledge found": "Kein Wissen gefunden",
+	"No memories to clear": "",
 	"No model IDs": "Keine Modell-IDs",
 	"No models found": "Keine Modelle gefunden",
 	"No models selected": "Keine Modelle ausgewählt",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "Ollama-API-Einstellungen aktualisiert",
 	"Ollama Version": "Ollama-Version",
 	"On": "Ein",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "Nur alphanumerische Zeichen und Bindestriche sind erlaubt",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "In der Befehlszeichenfolge sind nur alphanumerische Zeichen und Bindestriche erlaubt.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Nur Sammlungen können bearbeitet werden. Erstellen Sie eine neue Wissensbasis, um Dokumente zu bearbeiten/hinzuzufügen.",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "Prompt erfolgreich aktualisiert",
 	"Prompts": "Prompts",
 	"Prompts Access": "Prompt-Zugriff",
-	"Proxy URL": "Proxy-URL",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" von Ollama.com beziehen",
 	"Pull a model from Ollama.com": "Modell von Ollama.com beziehen",
 	"Query Generation Prompt": "Abfragegenerierungsprompt",
-	"Query Params": "Abfrageparameter",
 	"RAG Template": "RAG-Vorlage",
 	"Rating": "Bewertung",
 	"Re-rank models by topic similarity": "Modelle nach thematischer Ähnlichkeit neu ordnen",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Benachrichtigungen können nicht aktiviert werden, da die Website-Berechtigungen abgelehnt wurden. Bitte besuchen Sie Ihre Browser-Einstellungen, um den erforderlichen Zugriff zu gewähren.",
 	"Response splitting": "Antwortaufteilung",
 	"Result": "Ergebnis",
+	"Retrieval": "",
 	"Retrieval Query Generation": "Abfragegenerierung",
 	"Rich Text Input for Chat": "Rich-Text-Eingabe für Unterhaltungen",
 	"RK": "RK",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "Wählen Sie eine Ollama-Instanz",
 	"Select Engine": "Engine auswählen",
 	"Select Knowledge": "Wissensdatenbank auswählen",
-	"Select model": "Modell auswählen",
 	"Select only one model to call": "Wählen Sie nur ein Modell zum Anrufen aus",
 	"Selected model(s) do not support image inputs": "Ihre ausgewählten Modelle unterstützen keine Bildeingaben",
 	"Semantic distance to query": "Semantische Distanz zur Abfrage",
@@ -957,6 +965,7 @@
 	"Tags Generation": "Tag-Generierung",
 	"Tags Generation Prompt": "Prompt für Tag-Generierung",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-Free Sampling wird verwendet, um den Einfluss weniger wahrscheinlicher Tokens auf die Ausgabe zu reduzieren. Ein höherer Wert (z.B. 2.0) reduziert den Einfluss stärker, während ein Wert von 1.0 diese Einstellung deaktiviert. (Standard: 1)",
+	"Talk to model": "",
 	"Tap to interrupt": "Zum Unterbrechen tippen",
 	"Tasks": "",
 	"Tavily API Key": "Tavily-API-Schlüssel",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "Transformers",
 	"Trouble accessing Ollama?": "Probleme beim Zugriff auf Ollama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "TTS-Modell",
 	"TTS Settings": "TTS-Einstellungen",
 	"TTS Voice": "TTS-Stimme",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web",
 	"Web API": "Web-API",
-	"Web Loader Settings": "Web Loader Einstellungen",
 	"Web Search": "Websuche",
 	"Web Search Engine": "Suchmaschine",
 	"Web Search in Chat": "Websuche im Chat",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Ihr Kontostatus ist derzeit ausstehend und wartet auf Aktivierung.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Ihr gesamter Beitrag geht direkt an den Plugin-Entwickler; Open WebUI behält keinen Prozentsatz ein. Die gewählte Finanzierungsplattform kann jedoch eigene Gebühren haben.",
 	"Youtube": "YouTube",
-	"Youtube Loader Settings": "YouTube-Ladeeinstellungen"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/dg-DG/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "",
 	"a user": "such user",
 	"About": "Much About",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "",
 	"Archived Chats": "",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "Attach file",
+	"Attach file from knowledge": "",
 	"Attention to detail": "",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Checking for updates... Such anticipation...",
 	"Choose a model before saving...": "Choose model before saving... Wow choose first.",
 	"Chunk Overlap": "Chunk Overlap",
-	"Chunk Params": "Chunk Params",
 	"Chunk Size": "Chunk Size",
 	"Ciphers": "",
 	"Citation": "",
 	"Clear memory": "",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "Click for help. Much assist.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "Content",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "Context Length",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
@@ -245,6 +248,7 @@
 	"Current Model": "Current Model",
 	"Current Password": "Current Password",
 	"Custom": "Custom",
+	"Danger Zone": "",
 	"Dark": "Dark",
 	"Database": "Database",
 	"December": "",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "Document",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "Documents",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "does not connect external, data stays safe locally.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "Email",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "",
 	"Embedding Model Engine": "",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "Enable New Bark Ups",
-	"Enable Web Search": "",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
 	"Enter {{role}} message here": "Enter {{role}} bork here",
 	"Enter a detail about yourself for your LLMs to recall": "",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Enter Overlap of Chunks",
 	"Enter Chunk Size": "Enter Size of Chunk",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Woweral",
-	"General Settings": "General Doge Settings",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "Input commands",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "Interface",
 	"Invalid file format.": "",
 	"Invalid Tag": "",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "LLMs can make borks. Verify important info.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "",
 	"Made by Open WebUI Community": "Made by Open WebUI Community",
 	"Make sure to enclose them with": "Make sure to enclose them with",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "Ollama Version",
 	"On": "On",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Only wow characters and hyphens are allowed in the bork string.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "Promptos",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "Pull a wowdel from Ollama.com",
 	"Query Generation Prompt": "",
-	"Query Params": "Query Bark",
 	"RAG Template": "RAG Template",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": "Select model much choice",
 	"Select only one model to call": "",
 	"Selected model(s) do not support image inputs": "",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P very top",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "Trouble accessing Ollama? Much trouble?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "TTS Settings much settings",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web very web",
 	"Web API": "",
-	"Web Loader Settings": "",
 	"Web Search": "",
 	"Web Search Engine": "",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "",
-	"Youtube Loader Settings": ""
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/el-GR/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Ένα μοντέλο εργασίας χρησιμοποιείται κατά την εκτέλεση εργασιών όπως η δημιουργία τίτλων για συνομιλίες και αναζητήσεις στο διαδίκτυο",
 	"a user": "ένας χρήστης",
 	"About": "Σχετικά",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Πρόσβαση",
 	"Access Control": "Έλεγχος Πρόσβασης",
 	"Accessible to all users": "Προσβάσιμο σε όλους τους χρήστες",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Αρχειοθέτηση Όλων των Συνομιλιών",
 	"Archived Chats": "Αρχειοθετημένες Συνομιλίες",
 	"archived-chat-export": "εξαγωγή-αρχείου-συνομιλίας",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "Είστε σίγουροι ότι θέλετε να απο-αρχειοθετήσετε όλες τις αρχειοθετημένες συνομιλίες;",
@@ -93,7 +95,7 @@
 	"Artifacts": "Αρχεία",
 	"Ask a question": "Ρωτήστε μια ερώτηση",
 	"Assistant": "Βοηθός",
-	"Attach file": "Συνημμένο αρχείο",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Προσοχή στη λεπτομέρεια",
 	"Attribute for Mail": "",
 	"Attribute for Username": "Ιδιότητα για Όνομα Χρήστη",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Κλειδί API Brave Search",
 	"By {{name}}": "Από {{name}}",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Παράκαμψη επαλήθευσης SSL για Ιστότοπους",
 	"Calendar": "",
 	"Call": "Κλήση",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Ελέγχεται για ενημερώσεις...",
 	"Choose a model before saving...": "Επιλέξτε ένα μοντέλο πριν αποθηκεύσετε...",
 	"Chunk Overlap": "Επικάλυψη Τμημάτων",
-	"Chunk Params": "Παράμετροι Τμημάτων",
 	"Chunk Size": "Μέγεθος Τμημάτων",
 	"Ciphers": "Κρυπτογραφήσεις",
 	"Citation": "Παράθεση",
 	"Clear memory": "Καθαρισμός μνήμης",
+	"Clear Memory": "",
 	"click here": "κλικ εδώ",
 	"Click here for filter guides.": "Κάντε κλικ εδώ για οδηγούς φίλτρων.",
 	"Click here for help.": "Κάντε κλικ εδώ για βοήθεια.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "Επικοινωνήστε με τον Διαχειριστή για Πρόσβαση στο WebUI",
 	"Content": "Περιεχόμενο",
-	"Content Extraction": "Εξαγωγή Περιεχομένου",
+	"Content Extraction Engine": "",
 	"Context Length": "Μήκος Πλαισίου",
 	"Continue Response": "Συνέχεια Απάντησης",
 	"Continue with {{provider}}": "Συνέχεια με {{provider}}",
@@ -245,6 +248,7 @@
 	"Current Model": "Τρέχον Μοντέλο",
 	"Current Password": "Τρέχων Κωδικός",
 	"Custom": "Προσαρμοσμένο",
+	"Danger Zone": "",
 	"Dark": "Σκούρο",
 	"Database": "Βάση Δεδομένων",
 	"December": "Δεκέμβριος",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "Μην εγκαθιστάτε λειτουργίες από πηγές που δεν εμπιστεύεστε πλήρως.",
 	"Do not install tools from sources you do not fully trust.": "Μην εγκαθιστάτε εργαλεία από πηγές που δεν εμπιστεύεστε πλήρως.",
 	"Document": "Έγγραφο",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Τεκμηρίωση",
 	"Documents": "Έγγραφα",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "δεν κάνει καμία εξωτερική σύνδεση, και τα δεδομένα σας παραμένουν ασφαλή στον τοπικά φιλοξενούμενο διακομιστή σας.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "Email",
 	"Embark on adventures": "Ξεκινήστε περιπέτειες",
+	"Embedding": "",
 	"Embedding Batch Size": "Μέγεθος Παρτίδας Ενσωμάτωσης",
 	"Embedding Model": "Μοντέλο Ενσωμάτωσης",
 	"Embedding Model Engine": "Μηχανή Μοντέλου Ενσωμάτωσης",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "Ενεργοποίηση Κοινοτικής Κοινής Χρήσης",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ενεργοποίηση Κλείδωσης Μνήμης (mlock) για την αποτροπή της ανταλλαγής δεδομένων του μοντέλου από τη μνήμη RAM. Αυτή η επιλογή κλειδώνει το σύνολο εργασίας των σελίδων του μοντέλου στη μνήμη RAM, διασφαλίζοντας ότι δεν θα ανταλλαχθούν στο δίσκο. Αυτό μπορεί να βοηθήσει στη διατήρηση της απόδοσης αποφεύγοντας σφάλματα σελίδων και διασφαλίζοντας γρήγορη πρόσβαση στα δεδομένα.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Ενεργοποίηση Χαρτογράφησης Μνήμης (mmap) για φόρτωση δεδομένων μοντέλου. Αυτή η επιλογή επιτρέπει στο σύστημα να χρησιμοποιεί αποθήκευση δίσκου ως επέκταση της μνήμης RAM, αντιμετωπίζοντας αρχεία δίσκου σαν να ήταν στη μνήμη RAM. Αυτό μπορεί να βελτιώσει την απόδοση του μοντέλου επιτρέποντας γρηγορότερη πρόσβαση στα δεδομένα. Ωστόσο, μπορεί να μην λειτουργεί σωστά με όλα τα συστήματα και να καταναλώνει σημαντικό χώρο στο δίσκο.",
 	"Enable Message Rating": "Ενεργοποίηση Αξιολόγησης Μηνυμάτων",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Ενεργοποίηση δειγματοληψίας Mirostat για έλεγχο της περιπλοκότητας. (Προεπιλογή: 0, 0 = Απενεργοποιημένο, 1 = Mirostat, 2 = Mirostat 2.0)",
 	"Enable New Sign Ups": "Ενεργοποίηση Νέων Εγγραφών",
-	"Enable Web Search": "Ενεργοποίηση Αναζήτησης στο Διαδίκτυο",
 	"Enabled": "Ενεργοποιημένο",
-	"Engine": "Μηχανή",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Βεβαιωθείτε ότι το αρχείο CSV σας περιλαμβάνει 4 στήλες με αυτή τη σειρά: Όνομα, Email, Κωδικός, Ρόλος.",
 	"Enter {{role}} message here": "Εισάγετε το μήνυμα {{role}} εδώ",
 	"Enter a detail about yourself for your LLMs to recall": "Εισάγετε μια λεπτομέρεια για τον εαυτό σας ώστε τα LLMs να την ανακαλούν",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Εισάγετε την Επικάλυψη Τμημάτων",
 	"Enter Chunk Size": "Εισάγετε το Μέγεθος Τμημάτων",
 	"Enter description": "Εισάγετε την περιγραφή",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "Εισάγετε το Github Raw URL",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Γενικά",
-	"General Settings": "Γενικές Ρυθμίσεις",
 	"Generate an image": "",
 	"Generate Image": "Δημιουργία Εικόνας",
+	"Generate prompt pair": "",
 	"Generating search query": "Γενιά αναζήτησης ερώτησης",
 	"Get started": "Ξεκινήστε",
 	"Get started with {{WEBUI_NAME}}": "Ξεκινήστε με {{WEBUI_NAME}}",
@@ -565,6 +571,7 @@
 	"Input commands": "Εισαγωγή εντολών",
 	"Install from Github URL": "Εγκατάσταση από URL Github",
 	"Instant Auto-Send After Voice Transcription": "Άμεση Αυτόματη Αποστολή μετά τη μεταγραφή φωνής",
+	"Integration": "",
 	"Interface": "Διεπαφή",
 	"Invalid file format.": "Μη έγκυρη μορφή αρχείου.",
 	"Invalid Tag": "Μη έγκυρη Ετικέτα",
@@ -612,16 +619,17 @@
 	"Listening...": "Ακούγεται...",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "Τα LLM μπορούν να κάνουν λάθη. Επαληθεύστε σημαντικές πληροφορίες.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "Τοπικό",
 	"Local Models": "Τοπικά Μοντέλα",
+	"Location access not allowed": "",
 	"Lost": "Χαμένος",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Δημιουργήθηκε από την Κοινότητα OpenWebUI",
 	"Make sure to enclose them with": "Βεβαιωθείτε ότι τα περικλείετε με",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Βεβαιωθείτε ότι εξάγετε ένα αρχείο workflow.json ως μορφή API από το ComfyUI.",
 	"Manage": "Διαχείριση",
-	"Manage Arena Models": "Διαχείριση Μοντέλων Arena",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "Διαχείριση Ollama",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "Δεν βρέθηκε περιεχόμενο HTML, CSS ή JavaScript.",
 	"No inference engine with management support found": "",
 	"No knowledge found": "Δεν βρέθηκε γνώση",
+	"No memories to clear": "",
 	"No model IDs": "Δεν υπάρχουν IDs μοντέλων",
 	"No models found": "Δεν βρέθηκαν μοντέλα",
 	"No models selected": "Δεν έχουν επιλεγεί μοντέλα",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "Οι ρυθμίσεις API Ollama ενημερώθηκαν",
 	"Ollama Version": "Έκδοση Ollama",
 	"On": "On",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "Επιτρέπονται μόνο αλφαριθμητικοί χαρακτήρες και παύλες",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Επιτρέπονται μόνο αλφαριθμητικοί χαρακτήρες και παύλες στο string της εντολής.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Μόνο συλλογές μπορούν να επεξεργαστούν, δημιουργήστε μια νέα βάση γνώσης για επεξεργασία/προσθήκη εγγράφων.",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "Η προτροπή ενημερώθηκε με επιτυχία",
 	"Prompts": "Προτροπές",
 	"Prompts Access": "Πρόσβαση Προτροπών",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Τραβήξτε \"{{searchValue}}\" από το Ollama.com",
 	"Pull a model from Ollama.com": "Τραβήξτε ένα μοντέλο από το Ollama.com",
 	"Query Generation Prompt": "Προτροπή Δημιουργίας Ερωτήσεων",
-	"Query Params": "Παράμετροι Ερωτήσεων",
 	"RAG Template": "Πρότυπο RAG",
 	"Rating": "Βαθμολογία",
 	"Re-rank models by topic similarity": "Επανατάξη μοντέλων κατά ομοιότητα θέματος",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Οι ειδοποιήσεις απάντησης δεν μπορούν να ενεργοποιηθούν καθώς οι άδειες του ιστότοπου έχουν αρνηθεί. Παρακαλώ επισκεφτείτε τις ρυθμίσεις του περιηγητή σας για να δώσετε την απαραίτητη πρόσβαση.",
 	"Response splitting": "Διαχωρισμός απάντησης",
 	"Result": "Αποτέλεσμα",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "Πλούσιο Εισαγωγή Κειμένου για Συνομιλία",
 	"RK": "RK",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "Επιλέξτε Μηχανή",
 	"Select Knowledge": "Επιλέξτε Γνώση",
-	"Select model": "Επιλέξτε μοντέλο",
 	"Select only one model to call": "Επιλέξτε μόνο ένα μοντέλο για κλήση",
 	"Selected model(s) do not support image inputs": "Τα επιλεγμένα μοντέλα δεν υποστηρίζουν είσοδο εικόνων",
 	"Semantic distance to query": "Σημαντική απόσταση προς την ερώτηση",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Προτροπή Γενιάς Ετικετών",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Η δειγματοληψία Tail free χρησιμοποιείται για να μειώσει την επίδραση των λιγότερο πιθανών tokens από την έξοδο. Μια υψηλότερη τιμή (π.χ., 2.0) θα μειώσει την επίδραση περισσότερο, ενώ μια τιμή 1.0 απενεργοποιεί αυτή τη ρύθμιση. (προεπιλογή: 1)",
+	"Talk to model": "",
 	"Tap to interrupt": "Πατήστε για παύση",
 	"Tasks": "",
 	"Tavily API Key": "Κλειδί API Tavily",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "Transformers",
 	"Trouble accessing Ollama?": "Προβλήματα πρόσβασης στο Ollama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "Μοντέλο TTS",
 	"TTS Settings": "Ρυθμίσεις TTS",
 	"TTS Voice": "Φωνή TTS",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Διαδίκτυο",
 	"Web API": "Web API",
-	"Web Loader Settings": "Ρυθμίσεις Φόρτωσης Web",
 	"Web Search": "Αναζήτηση στο Διαδίκτυο",
 	"Web Search Engine": "Μηχανή Αναζήτησης στο Διαδίκτυο",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Η κατάσταση του λογαριασμού σας είναι αυτή τη στιγμή σε εκκρεμότητα ενεργοποίησης.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Η ολόκληρη η συνεισφορά σας θα πάει απευθείας στον προγραμματιστή του plugin· το Open WebUI δεν παίρνει κανένα ποσοστό. Ωστόσο, η επιλεγμένη πλατφόρμα χρηματοδότησης μπορεί να έχει τα δικά της τέλη.",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "Ρυθμίσεις Φόρτωσης Youtube"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/en-GB/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "",
 	"a user": "",
 	"About": "",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "",
 	"Archived Chats": "",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "",
+	"Attach file from knowledge": "",
 	"Attention to detail": "",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "",
 	"Choose a model before saving...": "",
 	"Chunk Overlap": "",
-	"Chunk Params": "",
 	"Chunk Size": "",
 	"Ciphers": "",
 	"Citation": "",
 	"Clear memory": "",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
@@ -245,6 +248,7 @@
 	"Current Model": "",
 	"Current Password": "",
 	"Custom": "",
+	"Danger Zone": "",
 	"Dark": "",
 	"Database": "",
 	"December": "",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "",
 	"Embedding Model Engine": "",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "",
-	"Enable Web Search": "",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
 	"Enter {{role}} message here": "",
 	"Enter a detail about yourself for your LLMs to recall": "",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "",
 	"Enter Chunk Size": "",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "",
-	"General Settings": "",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "",
 	"Invalid file format.": "",
 	"Invalid Tag": "",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "",
 	"Made by Open WebUI Community": "",
 	"Make sure to enclose them with": "",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "",
 	"On": "",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Query Generation Prompt": "",
-	"Query Params": "",
 	"RAG Template": "",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": "",
 	"Select only one model to call": "",
 	"Selected model(s) do not support image inputs": "",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "",
 	"Web API": "",
-	"Web Loader Settings": "",
 	"Web Search": "",
 	"Web Search Engine": "",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "",
-	"Youtube Loader Settings": ""
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/en-US/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "",
 	"a user": "",
 	"About": "",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "",
 	"Archived Chats": "",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "",
+	"Attach file from knowledge": "",
 	"Attention to detail": "",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "",
 	"Choose a model before saving...": "",
 	"Chunk Overlap": "",
-	"Chunk Params": "",
 	"Chunk Size": "",
 	"Ciphers": "",
 	"Citation": "",
 	"Clear memory": "",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "",
 	"Continue Response": "",
 	"Continue with {{provider}}": "",
@@ -245,6 +248,7 @@
 	"Current Model": "",
 	"Current Password": "",
 	"Custom": "",
+	"Danger Zone": "",
 	"Dark": "",
 	"Database": "",
 	"December": "",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "",
 	"Embedding Model Engine": "",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "",
-	"Enable Web Search": "",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "",
 	"Enter {{role}} message here": "",
 	"Enter a detail about yourself for your LLMs to recall": "",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "",
 	"Enter Chunk Size": "",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "",
-	"General Settings": "",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "",
 	"Install from Github URL": "",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "",
 	"Invalid file format.": "",
 	"Invalid Tag": "",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "",
 	"Made by Open WebUI Community": "",
 	"Make sure to enclose them with": "",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "",
 	"On": "",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "",
 	"Pull a model from Ollama.com": "",
 	"Query Generation Prompt": "",
-	"Query Params": "",
 	"RAG Template": "",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": "",
 	"Select only one model to call": "",
 	"Selected model(s) do not support image inputs": "",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "",
 	"Web API": "",
-	"Web Loader Settings": "",
 	"Web Search": "",
 	"Web Search Engine": "",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "",
-	"Youtube Loader Settings": ""
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/es-ES/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modelo de tareas se utiliza cuando se realizan tareas como la generación de títulos para chats y consultas de búsqueda web",
 	"a user": "un usuario",
 	"About": "Sobre nosotros",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Acceso",
 	"Access Control": "Control de Acceso",
 	"Accessible to all users": "Accesible para todos los usuarios",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Archivar todos los chats",
 	"Archived Chats": "Chats archivados",
 	"archived-chat-export": "Exportación de chats archivados",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "¿Seguro de que quieres eliminar este canal?",
 	"Are you sure you want to delete this message?": "¿Seguro de que quieres eliminar este mensaje? ",
 	"Are you sure you want to unarchive all archived chats?": "¿Estás seguro de que quieres desarchivar todos los chats archivados?",
@@ -93,7 +95,7 @@
 	"Artifacts": "Artefactos",
 	"Ask a question": "Haz una pregunta",
 	"Assistant": "Asistente",
-	"Attach file": "Adjuntar archivo",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Detalle preciso",
 	"Attribute for Mail": "Atributo para correo",
 	"Attribute for Username": "Atributo para el nombre de usuario",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Clave de API de Brave Search",
 	"By {{name}}": "Por {{name}}",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Desactivar la verificación SSL para sitios web",
 	"Calendar": "",
 	"Call": "Llamada",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Verificando actualizaciones...",
 	"Choose a model before saving...": "Escoge un modelo antes de guardar los cambios...",
 	"Chunk Overlap": "Superposición de fragmentos",
-	"Chunk Params": "Parámetros de fragmentos",
 	"Chunk Size": "Tamaño de fragmentos",
 	"Ciphers": "Cifrado",
 	"Citation": "Cita",
 	"Clear memory": "Liberar memoria",
+	"Clear Memory": "",
 	"click here": "Clic aquí",
 	"Click here for filter guides.": "Clic aquí para guías de filtros",
 	"Click here for help.": "Presiona aquí para obtener ayuda.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": " Restringe el esfuerzo en la razonamiento para los modelos de razonamiento. Solo aplicable a los modelos de razonamiento de proveedores específicos que admiten el esfuerzo de razonamiento. (Por defecto: medio)",
 	"Contact Admin for WebUI Access": "Contacta el administrador para obtener acceso al WebUI",
 	"Content": "Contenido",
-	"Content Extraction": "Extracción de contenido",
+	"Content Extraction Engine": "",
 	"Context Length": "Longitud del contexto",
 	"Continue Response": "Continuar Respuesta",
 	"Continue with {{provider}}": "Continuar con {{provider}}",
@@ -245,6 +248,7 @@
 	"Current Model": "Modelo Actual",
 	"Current Password": "Contraseña Actual",
 	"Custom": "Personalizado",
+	"Danger Zone": "",
 	"Dark": "Oscuro",
 	"Database": "Base de datos",
 	"December": "Diciembre",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "No instale funciones desde fuentes que no confíe totalmente.",
 	"Do not install tools from sources you do not fully trust.": "No instale herramientas desde fuentes que no confíe totalmente.",
 	"Document": "Documento",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Documentación",
 	"Documents": "Documentos",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "no realiza ninguna conexión externa y sus datos permanecen seguros en su servidor alojado localmente.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "Email",
 	"Embark on adventures": "Emprende aventuras",
+	"Embedding": "",
 	"Embedding Batch Size": "Tamaño de Embedding",
 	"Embedding Model": "Modelo de Embedding",
 	"Embedding Model Engine": "Motor de Modelo de Embedding",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "Habilitar generación de autocompletado para mensajes de chat",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "Habilitar el uso compartido de la comunidad",
-	"Enable Google Drive": "Habilitar Google Drive",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Habilitar bloqueo de memoria (mlock) para evitar que los datos del modelo se intercambien fuera de la RAM. Esta opción bloquea el conjunto de páginas de trabajo del modelo en la RAM, asegurando que no se intercambiarán fuera del disco. Esto puede ayudar a mantener el rendimiento evitando fallos de página y asegurando un acceso rápido a los datos.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Habilitar asignación de memoria (mmap) para cargar datos del modelo. Esta opción permite al sistema usar el almacenamiento en disco como una extensión de la RAM al tratar los archivos en disco como si estuvieran en la RAM. Esto puede mejorar el rendimiento del modelo permitiendo un acceso más rápido a los datos. Sin embargo, puede no funcionar correctamente con todos los sistemas y puede consumir una cantidad significativa de espacio en disco.",
 	"Enable Message Rating": "Habilitar la calificación de los mensajes",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Habilitar muestreo Mirostat para controlar la perplejidad. (Predeterminado: 0, 0 = Deshabilitado, 1 = Mirostat, 2 = Mirostat 2.0)",
 	"Enable New Sign Ups": "Habilitar Nuevos Registros",
-	"Enable Web Search": "Habilitar la búsqueda web",
 	"Enabled": "Activado",
-	"Engine": "Motor",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Asegúrese de que su archivo CSV incluya 4 columnas en este orden: Nombre, Correo Electrónico, Contraseña, Rol.",
 	"Enter {{role}} message here": "Ingrese el mensaje {{role}} aquí",
 	"Enter a detail about yourself for your LLMs to recall": "Ingrese un detalle sobre usted para que sus LLMs recuerden",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Ingresar superposición de fragmentos",
 	"Enter Chunk Size": "Ingrese el tamaño del fragmento",
 	"Enter description": "Ingrese la descripción",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "Ingrese la clave API de Exa",
 	"Enter Github Raw URL": "Ingresa la URL sin procesar de Github",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "General",
-	"General Settings": "Opciones Generales",
 	"Generate an image": "Generar una imagen",
 	"Generate Image": "Generar imagen",
+	"Generate prompt pair": "",
 	"Generating search query": "Generación de consultas de búsqueda",
 	"Get started": "Empezar",
 	"Get started with {{WEBUI_NAME}}": "Empezar con {{WEBUI_NAME}}",
@@ -565,6 +571,7 @@
 	"Input commands": "Ingresar comandos",
 	"Install from Github URL": "Instalar desde la URL de Github",
 	"Instant Auto-Send After Voice Transcription": "Auto-Enviar Después de la Transcripción de Voz",
+	"Integration": "",
 	"Interface": "Interfaz",
 	"Invalid file format.": "Formato de archivo inválido.",
 	"Invalid Tag": "Etiqueta Inválida",
@@ -612,16 +619,17 @@
 	"Listening...": "Escuchando...",
 	"Llama.cpp": "Llama.cpp",
 	"LLMs can make mistakes. Verify important information.": "Los LLM pueden cometer errores. Verifica la información importante.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "Local",
 	"Local Models": "Modelos locales",
+	"Location access not allowed": "",
 	"Lost": "Perdido",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Hecho por la comunidad de OpenWebUI",
 	"Make sure to enclose them with": "Asegúrese de adjuntarlos con",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Asegúrese de exportar un archivo workflow.json en formato API desde ComfyUI.",
 	"Manage": "Gestionar",
-	"Manage Arena Models": "Gestionar modelos de Arena",
 	"Manage Direct Connections": "",
 	"Manage Models": "Gestionar modelos",
 	"Manage Ollama": "Gestionar Ollama",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "No se encontró contenido HTML, CSS, o JavaScript.",
 	"No inference engine with management support found": "No se encontró un motor de inferencia con soporte de gestión",
 	"No knowledge found": "No se encontró ningún conocimiento",
+	"No memories to clear": "",
 	"No model IDs": "No hay IDs de modelos",
 	"No models found": "No se encontraron modelos",
 	"No models selected": "No se seleccionaron modelos",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "Configuración de Ollama API actualizada",
 	"Ollama Version": "Versión de Ollama",
 	"On": "Activado",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "Sólo se permiten caracteres alfanuméricos y guiones",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Sólo se permiten caracteres alfanuméricos y guiones en la cadena de comando.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Solo se pueden editar las colecciones, crear una nueva base de conocimientos para editar / añadir documentos",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "Prompt actualizado exitosamente",
 	"Prompts": "Prompts",
 	"Prompts Access": "Acceso a Prompts",
-	"Proxy URL": "URL del proxy",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Extraer \"{{searchValue}}\" de Ollama.com",
 	"Pull a model from Ollama.com": "Obtener un modelo de Ollama.com",
 	"Query Generation Prompt": "Prompt de generación de consulta",
-	"Query Params": "Parámetros de consulta",
 	"RAG Template": "Plantilla de RAG",
 	"Rating": "Calificación",
 	"Re-rank models by topic similarity": "Re-clasificar modelos por similitud de tema",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Las notificaciones de respuesta no pueden activarse debido a que los permisos del sitio web han sido denegados. Por favor, visite las configuraciones de su navegador para otorgar el acceso necesario.",
 	"Response splitting": "División de respuestas",
 	"Result": "Resultado",
+	"Retrieval": "",
 	"Retrieval Query Generation": "Generación de consulta de recuperación",
 	"Rich Text Input for Chat": "Entrada de texto enriquecido para chat",
 	"RK": "RK",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "Seleccionar una instancia de Ollama",
 	"Select Engine": "Selecciona Motor",
 	"Select Knowledge": "Selecciona Conocimiento",
-	"Select model": "Selecciona un modelo",
 	"Select only one model to call": "Selecciona sólo un modelo para llamar",
 	"Selected model(s) do not support image inputs": "Los modelos seleccionados no admiten entradas de imagen",
 	"Semantic distance to query": "Distancia semántica a la consulta",
@@ -957,6 +965,7 @@
 	"Tags Generation": "Generación de etiquetas",
 	"Tags Generation Prompt": "Prompt de generación de etiquetas",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "El muestreo libre de cola se utiliza para reducir el impacto de los tokens menos probables en la salida. Un valor más alto (p.ej., 2.0) reducirá el impacto más, mientras que un valor de 1.0 deshabilitará esta configuración. (predeterminado: 1)",
+	"Talk to model": "",
 	"Tap to interrupt": "Toca para interrumpir",
 	"Tasks": "",
 	"Tavily API Key": "Clave API de Tavily",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "Transformadores",
 	"Trouble accessing Ollama?": "¿Problemas para acceder a Ollama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "Modelo TTS",
 	"TTS Settings": "Configuración de TTS",
 	"TTS Voice": "Voz del TTS",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web",
 	"Web API": "API Web",
-	"Web Loader Settings": "Configuración del cargador web",
 	"Web Search": "Búsqueda en la Web",
 	"Web Search Engine": "Motor de búsqueda web",
 	"Web Search in Chat": "Búsqueda web en chat",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "El estado de su cuenta actualmente se encuentra pendiente de activación.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Su contribución completa irá directamente a el desarrollador del plugin; Open WebUI no toma ningun porcentaje. Sin embargo, la plataforma de financiación elegida podría tener sus propias tarifas.",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "Configuración del cargador de Youtube"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/eu-ES/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Ataza eredua erabiltzen da txatentzako izenburuak eta web bilaketa kontsultak sortzeko bezalako atazak egitean",
 	"a user": "erabiltzaile bat",
 	"About": "Honi buruz",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Sarbidea",
 	"Access Control": "Sarbide Kontrola",
 	"Accessible to all users": "Erabiltzaile guztientzat eskuragarri",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Artxibatu Txat Guztiak",
 	"Archived Chats": "Artxibatutako Txatak",
 	"archived-chat-export": "artxibatutako-txat-esportazioa",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "Ziur zaude artxibatutako txat guztiak desartxibatu nahi dituzula?",
@@ -93,7 +95,7 @@
 	"Artifacts": "Artefaktuak",
 	"Ask a question": "Egin galdera bat",
 	"Assistant": "Laguntzailea",
-	"Attach file": "Erantsi fitxategia",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Xehetasunei arreta",
 	"Attribute for Mail": "",
 	"Attribute for Username": "Erabiltzaile-izenerako atributua",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Brave Bilaketa API Gakoa",
 	"By {{name}}": "{{name}}-k",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Saihestu SSL egiaztapena Webguneentzat",
 	"Calendar": "",
 	"Call": "Deia",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Eguneraketak bilatzen...",
 	"Choose a model before saving...": "Aukeratu eredu bat gorde aurretik...",
 	"Chunk Overlap": "Zatien Gainjartzea",
-	"Chunk Params": "Zatien Parametroak",
 	"Chunk Size": "Zati Tamaina",
 	"Ciphers": "Zifratuak",
 	"Citation": "Aipamena",
 	"Clear memory": "Garbitu memoria",
+	"Clear Memory": "",
 	"click here": "klikatu hemen",
 	"Click here for filter guides.": "Klikatu hemen iragazkien gidak ikusteko.",
 	"Click here for help.": "Klikatu hemen laguntzarako.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "Jarri harremanetan Administratzailearekin WebUI Sarbiderako",
 	"Content": "Edukia",
-	"Content Extraction": "Eduki Erauzketa",
+	"Content Extraction Engine": "",
 	"Context Length": "Testuinguru Luzera",
 	"Continue Response": "Jarraitu Erantzuna",
 	"Continue with {{provider}}": "Jarraitu {{provider}}-rekin",
@@ -245,6 +248,7 @@
 	"Current Model": "Uneko Eredua",
 	"Current Password": "Uneko Pasahitza",
 	"Custom": "Pertsonalizatua",
+	"Danger Zone": "",
 	"Dark": "Iluna",
 	"Database": "Datu-basea",
 	"December": "Abendua",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "Ez instalatu guztiz fidagarriak ez diren iturrietatik datozen funtzioak.",
 	"Do not install tools from sources you do not fully trust.": "Ez instalatu guztiz fidagarriak ez diren iturrietatik datozen tresnak.",
 	"Document": "Dokumentua",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Dokumentazioa",
 	"Documents": "Dokumentuak",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "ez du kanpo konexiorik egiten, eta zure datuak modu seguruan mantentzen dira zure zerbitzari lokalean.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "Posta elektronikoa",
 	"Embark on adventures": "Hasi abenturak",
+	"Embedding": "",
 	"Embedding Batch Size": "Embedding Batch Tamaina",
 	"Embedding Model": "Embedding Eredua",
 	"Embedding Model Engine": "Embedding Eredu Motorea",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "Gaitu Komunitatearen Partekatzea",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Gaitu Memoria Blokeatzea (mlock) ereduaren datuak RAM memoriatik kanpo ez trukatzeko. Aukera honek ereduaren lan-orri multzoa RAMean blokatzen du, diskora ez direla trukatuko ziurtatuz. Honek errendimendua mantentzen lagun dezake, orri-hutsegiteak saihestuz eta datuen sarbide azkarra bermatuz.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Gaitu Memoria Mapaketa (mmap) ereduaren datuak kargatzeko. Aukera honek sistemari disko-biltegiratzea RAM memoriaren luzapen gisa erabiltzea ahalbidetzen dio, diskoko fitxategiak RAMean baleude bezala tratatuz. Honek ereduaren errendimendua hobe dezake, datuen sarbide azkarragoa ahalbidetuz. Hala ere, baliteke sistema guztietan behar bezala ez funtzionatzea eta disko-espazio handia kontsumitu dezake.",
 	"Enable Message Rating": "Gaitu Mezuen Balorazioa",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Gaitu Mirostat laginketa nahasmena kontrolatzeko. (Lehenetsia: 0, 0 = Desgaituta, 1 = Mirostat, 2 = Mirostat 2.0)",
 	"Enable New Sign Ups": "Gaitu Izena Emate Berriak",
-	"Enable Web Search": "Gaitu Web Bilaketa",
 	"Enabled": "Gaituta",
-	"Engine": "Motorea",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Ziurtatu zure CSV fitxategiak 4 zutabe dituela ordena honetan: Izena, Posta elektronikoa, Pasahitza, Rola.",
 	"Enter {{role}} message here": "Sartu {{role}} mezua hemen",
 	"Enter a detail about yourself for your LLMs to recall": "Sartu zure buruari buruzko xehetasun bat LLMek gogoratzeko",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Sartu Zatien Gainjartzea (chunk overlap)",
 	"Enter Chunk Size": "Sartu Zati Tamaina",
 	"Enter description": "Sartu deskribapena",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "Sartu Github Raw URLa",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Orokorra",
-	"General Settings": "Ezarpen Orokorrak",
 	"Generate an image": "",
 	"Generate Image": "Sortu Irudia",
+	"Generate prompt pair": "",
 	"Generating search query": "Bilaketa kontsulta sortzen",
 	"Get started": "Hasi",
 	"Get started with {{WEBUI_NAME}}": "Hasi {{WEBUI_NAME}}-rekin",
@@ -565,6 +571,7 @@
 	"Input commands": "Sartu komandoak",
 	"Install from Github URL": "Instalatu Github URLtik",
 	"Instant Auto-Send After Voice Transcription": "Bidalketa Automatiko Berehalakoa Ahots Transkripzioaren Ondoren",
+	"Integration": "",
 	"Interface": "Interfazea",
 	"Invalid file format.": "Fitxategi formatu baliogabea.",
 	"Invalid Tag": "Etiketa Baliogabea",
@@ -612,16 +619,17 @@
 	"Listening...": "Entzuten...",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "LLMek akatsak egin ditzakete. Egiaztatu informazio garrantzitsua.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "Lokala",
 	"Local Models": "Modelo lokalak",
+	"Location access not allowed": "",
 	"Lost": "Galduta",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "OpenWebUI Komunitateak egina",
 	"Make sure to enclose them with": "Ziurtatu hauek gehitzen dituzula",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Ziurtatu workflow.json fitxategia API formatu gisa esportatzen duzula ComfyUI-tik.",
 	"Manage": "Kudeatu",
-	"Manage Arena Models": "Kudeatu Arena Modeloak",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "Kudeatu Ollama",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "Ez da HTML, CSS, edo JavaScript edukirik aurkitu.",
 	"No inference engine with management support found": "",
 	"No knowledge found": "Ez da ezagutzarik aurkitu",
+	"No memories to clear": "",
 	"No model IDs": "Ez dago modelo IDrik",
 	"No models found": "Ez da modelorik aurkitu",
 	"No models selected": "Ez da modelorik hautatu",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "Ollama API ezarpenak eguneratu dira",
 	"Ollama Version": "Ollama bertsioa",
 	"On": "Piztuta",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "Karaktere alfanumerikoak eta marratxoak soilik onartzen dira",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Karaktere alfanumerikoak eta marratxoak soilik onartzen dira komando katean.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Bildumak soilik edita daitezke, sortu ezagutza-base berri bat dokumentuak editatzeko/gehitzeko.",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "Prompt-a ongi eguneratu da",
 	"Prompts": "Prompt-ak",
 	"Prompts Access": "Prompt sarbidea",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Ekarri \"{{searchValue}}\" Ollama.com-etik",
 	"Pull a model from Ollama.com": "Ekarri modelo bat Ollama.com-etik",
 	"Query Generation Prompt": "Kontsulta sortzeko prompt-a",
-	"Query Params": "Kontsulta parametroak",
 	"RAG Template": "RAG txantiloia",
 	"Rating": "Balorazioa",
 	"Re-rank models by topic similarity": "Berrantolatu modeloak gai antzekotasunaren arabera",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Erantzunen jakinarazpenak ezin dira aktibatu webgunearen baimenak ukatu direlako. Mesedez, bisitatu zure nabigatzailearen ezarpenak beharrezko sarbidea emateko.",
 	"Response splitting": "Erantzun banaketa",
 	"Result": "Emaitza",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "Testu aberastuko sarrera txaterako",
 	"RK": "RK",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "Hautatu motorra",
 	"Select Knowledge": "Hautatu ezagutza",
-	"Select model": "Hautatu modeloa",
 	"Select only one model to call": "Hautatu modelo bakarra deitzeko",
 	"Selected model(s) do not support image inputs": "Hautatutako modelo(e)k ez dute irudi sarrerarik onartzen",
 	"Semantic distance to query": "Kontsultarako distantzia semantikoa",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "Etiketa sortzeko prompta",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Isats-libre laginketa erabiltzen da irteran probabilitate txikiagoko tokenen eragina murrizteko. Balio altuago batek (adib., 2.0) eragina gehiago murriztuko du, 1.0 balioak ezarpen hau desgaitzen duen bitartean. (lehenetsia: 1)",
+	"Talk to model": "",
 	"Tap to interrupt": "Ukitu eteteko",
 	"Tasks": "",
 	"Tavily API Key": "Tavily API gakoa",
@@ -1041,6 +1050,7 @@
 	"Top P": "Goiko P",
 	"Transformers": "Transformatzaileak",
 	"Trouble accessing Ollama?": "Arazoak Ollama atzitzeko?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "TTS modeloa",
 	"TTS Settings": "TTS ezarpenak",
 	"TTS Voice": "TTS ahotsa",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Weba",
 	"Web API": "Web APIa",
-	"Web Loader Settings": "Web kargatzailearen ezarpenak",
 	"Web Search": "Web bilaketa",
 	"Web Search Engine": "Web bilaketa motorra",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Zure kontuaren egoera aktibazio zain dago.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Zure ekarpen osoa zuzenean plugin garatzaileari joango zaio; Open WebUI-k ez du ehunekorik hartzen. Hala ere, aukeratutako finantzaketa plataformak bere komisioak izan ditzake.",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "Youtube kargatzailearen ezarpenak"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/fa-IR/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "یک مدل وظیفه هنگام انجام وظایف مانند تولید عناوین برای چت ها و نمایش های جستجوی وب استفاده می شود.",
 	"a user": "یک کاربر",
 	"About": "درباره",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "بایگانی همه گفتگوها",
 	"Archived Chats": "گفتگوهای بایگانی\u200cشده",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "سوالی بپرسید",
 	"Assistant": "دستیار",
-	"Attach file": "پیوست پرونده",
+	"Attach file from knowledge": "",
 	"Attention to detail": "دقیق",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "کلید API جستجوی شجاع",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "عبور از تأیید SSL برای وب سایت ها",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "در حال بررسی برای به\u200cروزرسانی..",
 	"Choose a model before saving...": "قبل از ذخیره یک مدل را انتخاب کنید...",
 	"Chunk Overlap": "همپوشانی تکه",
-	"Chunk Params": "پارامترهای تکه",
 	"Chunk Size": "اندازه تکه",
 	"Ciphers": "",
 	"Citation": "استناد",
 	"Clear memory": "پاک کردن حافظه",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "برای کمک اینجا را کلیک کنید.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "برای دسترسی به WebUI با مدیر تماس بگیرید",
 	"Content": "محتوا",
-	"Content Extraction": "استخراج محتوا",
+	"Content Extraction Engine": "",
 	"Context Length": "طول زمینه",
 	"Continue Response": "ادامه پاسخ",
 	"Continue with {{provider}}": "با {{provider}} ادامه دهید",
@@ -245,6 +248,7 @@
 	"Current Model": "مدل فعلی",
 	"Current Password": "رمز عبور فعلی",
 	"Custom": "دلخواه",
+	"Danger Zone": "",
 	"Dark": "تیره",
 	"Database": "پایگاه داده",
 	"December": "دسامبر",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "سند",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "اسناد",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "هیچ اتصال خارجی ایجاد نمی کند و داده های شما به طور ایمن در سرور میزبان محلی شما باقی می ماند.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "ایمیل",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "مدل پیدائش",
 	"Embedding Model Engine": "محرک مدل پیدائش",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "فعالسازی اشتراک انجمن",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "فعال کردن ثبت نام\u200cهای جدید",
-	"Enable Web Search": "فعالسازی جستجوی وب",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "اطمینان حاصل کنید که فایل CSV شما شامل چهار ستون در این ترتیب است: نام، ایمیل، رمز عبور، نقش.",
 	"Enter {{role}} message here": "پیام {{role}} را اینجا وارد کنید",
 	"Enter a detail about yourself for your LLMs to recall": "برای ذخیره سازی اطلاعات خود، یک توضیح کوتاه درباره خود را وارد کنید",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "مقدار Chunk Overlap را وارد کنید",
 	"Enter Chunk Size": "مقدار Chunk Size را وارد کنید",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "ادرس Github Raw را وارد کنید",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "عمومی",
-	"General Settings": "تنظیمات عمومی",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "در حال تولید پرسوجوی جستجو",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "ورودی دستورات",
 	"Install from Github URL": "نصب از ادرس Github",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "رابط",
 	"Invalid file format.": "",
 	"Invalid Tag": "تگ نامعتبر",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "مدل\u200cهای زبانی بزرگ می\u200cتوانند اشتباه کنند. اطلاعات مهم را راستی\u200cآزمایی کنید.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "ساخته شده توسط OpenWebUI Community",
 	"Make sure to enclose them with": "مطمئن شوید که آنها را با این محصور کنید:",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "نسخه اولاما",
 	"On": "روشن",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "فقط کاراکترهای الفبایی و خط فاصله در رشته فرمان مجاز هستند.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "پرامپت\u200cها",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "بازگرداندن \"{{searchValue}}\" از Ollama.com",
 	"Pull a model from Ollama.com": "دریافت یک مدل از Ollama.com",
 	"Query Generation Prompt": "",
-	"Query Params": "پارامترهای پرس و جو",
 	"RAG Template": "RAG الگوی",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "نتیجه",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "انتخاب موتور",
 	"Select Knowledge": "انتخاب دانش",
-	"Select model": "انتخاب یک مدل",
 	"Select only one model to call": "تنها یک مدل را برای صدا زدن انتخاب کنید",
 	"Selected model(s) do not support image inputs": "مدل) های (انتخاب شده ورودیهای تصویر را پشتیبانی نمیکند",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "در دسترسی به اولاما مشکل دارید؟",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "تنظیمات TTS",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "وب",
 	"Web API": "",
-	"Web Loader Settings": "تنظیمات لودر وب",
 	"Web Search": "جستجوی وب",
 	"Web Search Engine": "موتور جستجوی وب",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "یوتیوب",
-	"Youtube Loader Settings": "تنظیمات لودر یوتیوب"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 89 - 79
src/lib/i18n/locales/fi-FI/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Tehtävämallia käytetään tehtävien suorittamiseen, kuten otsikoiden luomiseen keskusteluille ja verkkohakukyselyille",
 	"a user": "käyttäjä",
 	"About": "Tietoja",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Pääsy",
 	"Access Control": "Käyttöoikeuksien hallinta",
 	"Accessible to all users": "Käytettävissä kaikille käyttäjille",
@@ -20,7 +21,7 @@
 	"Account Activation Pending": "Tilin aktivointi odottaa",
 	"Accurate information": "Tarkkaa tietoa",
 	"Actions": "Toiminnot",
-	"Activate": "",
+	"Activate": "Aktivoi",
 	"Activate this command by typing \"/{{COMMAND}}\" to chat input.": "Aktivoi tämä komento kirjoittamalla \"/{{COMMAND}}\" chat-syötteeseen.",
 	"Active Users": "Aktiiviset käyttäjät",
 	"Add": "Lisää",
@@ -34,7 +35,7 @@
 	"Add custom prompt": "Lisää mukautettu kehote",
 	"Add Files": "Lisää tiedostoja",
 	"Add Group": "Lisää ryhmä",
-	"Add Memory": "Lisää muistia",
+	"Add Memory": "Lisää muistiin",
 	"Add Model": "Lisää malli",
 	"Add Reaction": "Lisää reaktio",
 	"Add Tag": "Lisää tagi",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Arkistoi kaikki keskustelut",
 	"Archived Chats": "Arkistoidut keskustelut",
 	"archived-chat-export": "arkistoitu-keskustelu-vienti",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "Haluatko varmasti poistaa tämän kanavan?",
 	"Are you sure you want to delete this message?": "Haluatko varmasti poistaa tämän viestin?",
 	"Are you sure you want to unarchive all archived chats?": "Haluatko varmasti purkaa kaikkien arkistoitujen keskustelujen arkistoinnin?",
@@ -93,14 +95,14 @@
 	"Artifacts": "Artefaktit",
 	"Ask a question": "Kysyä kysymys",
 	"Assistant": "Avustaja",
-	"Attach file": "Liitä tiedosto",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Huomio yksityiskohtiin",
 	"Attribute for Mail": "",
 	"Attribute for Username": "Käyttäjänimi-määritämä",
 	"Audio": "Ääni",
 	"August": "elokuu",
 	"Authenticate": "Todentaa",
-	"Authentication": "",
+	"Authentication": "Todennus",
 	"Auto-Copy Response to Clipboard": "Kopioi vastaus automaattisesti leikepöydälle",
 	"Auto-playback response": "Soita vastaus automaattisesti",
 	"Autocomplete Generation": "Automaattisen täydennyksen luonti",
@@ -121,16 +123,17 @@
 	"Batch Size (num_batch)": "Erän koko (num_batch)",
 	"before": "ennen",
 	"Being lazy": "Oli laiska",
-	"Beta": "",
+	"Beta": "Beta",
 	"Bing Search V7 Endpoint": "Bing Search V7 -päätepisteen osoite",
 	"Bing Search V7 Subscription Key": "Bing Search V7 -tilauskäyttäjäavain",
-	"Bocha Search API Key": "",
+	"Bocha Search API Key": "Bocha Search API -avain",
 	"Brave Search API Key": "Brave Search API -avain",
 	"By {{name}}": "Tekijä {{name}}",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Ohita SSL-varmennus verkkosivustoille",
-	"Calendar": "",
-	"Call": "Soitto",
-	"Call feature is not supported when using Web STT engine": "Soittotoimintoa ei tueta käytettäessä web-puheentunnistusmoottoria",
+	"Calendar": "Kalenteri",
+	"Call": "Puhelu",
+	"Call feature is not supported when using Web STT engine": "Puhelutoimintoa ei tueta käytettäessä web-puheentunnistusmoottoria",
 	"Camera": "Kamera",
 	"Cancel": "Peruuta",
 	"Capabilities": "Ominaisuuksia",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Tarkistetaan päivityksiä...",
 	"Choose a model before saving...": "Valitse malli ennen tallentamista...",
 	"Chunk Overlap": "Päällekkäisten osien määrä",
-	"Chunk Params": "Osien parametrit",
 	"Chunk Size": "Osien koko",
 	"Ciphers": "Salausalgoritmi",
 	"Citation": "Lähdeviite",
 	"Clear memory": "Tyhjennä muisti",
+	"Clear Memory": "",
 	"click here": "klikkaa tästä",
 	"Click here for filter guides.": "Katso suodatinohjeita klikkaamalla tästä.",
 	"Click here for help.": "Klikkaa tästä saadaksesi apua.",
@@ -180,9 +183,9 @@
 	"Clone of {{TITLE}}": "{{TITLE}} klooni",
 	"Close": "Sulje",
 	"Code execution": "Koodin suorittaminen",
-	"Code Execution": "",
-	"Code Execution Engine": "",
-	"Code Execution Timeout": "",
+	"Code Execution": "Koodin suorittaminen",
+	"Code Execution Engine": "Koodin suoritusmoottori",
+	"Code Execution Timeout": "Koodin suorittamisen aikakatkaisu",
 	"Code formatted successfully": "Koodin muotoilu onnistui",
 	"Code Interpreter": "Ohjelmatulkki",
 	"Code Interpreter Engine": "Ohjelmatulkin moottori",
@@ -190,7 +193,7 @@
 	"Collection": "Kokoelma",
 	"Color": "Väri",
 	"ComfyUI": "ComfyUI",
-	"ComfyUI API Key": "",
+	"ComfyUI API Key": "ComfyUI API -avain",
 	"ComfyUI Base URL": "ComfyUI-perus-URL",
 	"ComfyUI Base URL is required.": "ComfyUI-perus-URL vaaditaan.",
 	"ComfyUI Workflow": "ComfyUI-työnkulku",
@@ -203,12 +206,12 @@
 	"Confirm Password": "Vahvista salasana",
 	"Confirm your action": "Vahvista toimintasi",
 	"Confirm your new password": "Vahvista uusi salasanasi",
-	"Connect to your own OpenAI compatible API endpoints.": "",
+	"Connect to your own OpenAI compatible API endpoints.": "Yhdistä oma OpenAI yhteensopiva API päätepiste.",
 	"Connections": "Yhteydet",
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "Ota yhteyttä ylläpitäjään WebUI-käyttöä varten",
 	"Content": "Sisältö",
-	"Content Extraction": "Sisällön erottelu",
+	"Content Extraction Engine": "",
 	"Context Length": "Kontekstin pituus",
 	"Continue Response": "Jatka vastausta",
 	"Continue with {{provider}}": "Jatka palvelulla {{provider}}",
@@ -227,7 +230,7 @@
 	"Copy Link": "Kopioi linkki",
 	"Copy to clipboard": "Kopioi leikepöydälle",
 	"Copying to clipboard was successful!": "Kopioiminen leikepöydälle onnistui!",
-	"CORS must be properly configured by the provider to allow requests from Open WebUI.": "",
+	"CORS must be properly configured by the provider to allow requests from Open WebUI.": "CORS täytyy olla konfiguroitu palveluntarjoajan toimesta pyyntöjen hyväksymiseksi Open WebUI:sta.",
 	"Create": "Luo",
 	"Create a knowledge base": "Luo tietokanta",
 	"Create a model": "Luo malli",
@@ -245,6 +248,7 @@
 	"Current Model": "Nykyinen malli",
 	"Current Password": "Nykyinen salasana",
 	"Custom": "Mukautettu",
+	"Danger Zone": "",
 	"Dark": "Tumma",
 	"Database": "Tietokanta",
 	"December": "joulukuu",
@@ -271,7 +275,7 @@
 	"Delete folder?": "Haluatko varmasti poistaa tämän kansion?",
 	"Delete function?": "Haluatko varmasti poistaa tämän toiminnon?",
 	"Delete Message": "Poista viesti",
-	"Delete message?": "",
+	"Delete message?": "Poista viesti?",
 	"Delete prompt?": "Haluatko varmasti poistaa tämän kehotteen?",
 	"delete this link": "poista tämä linkki",
 	"Delete tool?": "Haluatko varmasti poistaa tämän työkalun?",
@@ -290,7 +294,7 @@
 	"Discover a model": "Tutustu malliin",
 	"Discover a prompt": "Löydä kehote",
 	"Discover a tool": "Löydä työkalu",
-	"Discover how to use Open WebUI and seek support from the community.": "",
+	"Discover how to use Open WebUI and seek support from the community.": "Tutustu Open WebUI:n käyttöön ja pyydä tukea yhteisöltä.",
 	"Discover wonders": "Löydä ihmeellisiä asioita",
 	"Discover, download, and explore custom functions": "Etsi, lataa ja tutki mukautettuja toimintoja",
 	"Discover, download, and explore custom prompts": "Löydä ja lataa mukautettuja kehotteita",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "Älä asenna toimintoja lähteistä, joihin et luota täysin.",
 	"Do not install tools from sources you do not fully trust.": "Älä asenna työkaluja lähteistä, joihin et luota täysin.",
 	"Document": "Asiakirja",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Dokumentaatio",
 	"Documents": "Asiakirjat",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "ei tee ulkoisia yhteyksiä, ja tietosi pysyvät turvallisesti paikallisesti isännöidyllä palvelimellasi.",
@@ -315,14 +321,14 @@
 	"Don't like the style": "En pidä tyylistä",
 	"Done": "Valmis",
 	"Download": "Lataa",
-	"Download as SVG": "",
+	"Download as SVG": "Lataa SVG:nä",
 	"Download canceled": "Lataus peruutettu",
 	"Download Database": "Lataa tietokanta",
 	"Drag and drop a file to upload or select a file to view": "Raahaa ja pudota tiedosto ladattavaksi tai valitse tiedosto katseltavaksi",
 	"Draw": "Piirros",
 	"Drop any files here to add to the conversation": "Pudota tiedostoja tähän lisätäksesi ne keskusteluun",
 	"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "esim. '30s', '10m'. Kelpoiset aikayksiköt ovat 's', 'm', 'h'.",
-	"e.g. 60": "",
+	"e.g. 60": "esim. 60",
 	"e.g. A filter to remove profanity from text": "esim. suodatin, joka poistaa kirosanoja tekstistä",
 	"e.g. My Filter": "esim. Oma suodatin",
 	"e.g. My Tools": "esim. Omat työkalut",
@@ -340,23 +346,21 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "Sähköposti",
 	"Embark on adventures": "Lähde seikkailuille",
+	"Embedding": "",
 	"Embedding Batch Size": "Upotuksen eräkoko",
 	"Embedding Model": "Upotusmalli",
 	"Embedding Model Engine": "Upotusmallin moottori",
 	"Embedding model set to \"{{embedding_model}}\"": "\"{{embedding_model}}\" valittu upotusmalliksi",
-	"Enable API Key": "",
+	"Enable API Key": "Ota API -avain käyttöön",
 	"Enable autocomplete generation for chat messages": "Ota automaattinen täydennys käyttöön keskusteluviesteissä",
 	"Enable Code Interpreter": "Ota ohjelmatulkki käyttöön",
 	"Enable Community Sharing": "Ota yhteisön jakaminen käyttöön",
-	"Enable Google Drive": "Salli Google Drive",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Ota Memory Locking (mlock) käyttöön estääksesi mallidatan vaihtamisen pois RAM-muistista. Tämä lukitsee mallin työsivut RAM-muistiin, varmistaen että niitä ei vaihdeta levylle. Tämä voi parantaa suorituskykyä välttämällä sivuvikoja ja varmistamalla nopean tietojen käytön.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Ota Memory Mapping (mmap) käyttöön ladataksesi mallidataa. Tämä vaihtoehto sallii järjestelmän käyttää levytilaa RAM-laajennuksena käsittelemällä levytiedostoja kuin ne olisivat RAM-muistissa. Tämä voi parantaa mallin suorituskykyä sallimalla nopeamman tietojen käytön. Kuitenkin se ei välttämättä toimi oikein kaikissa järjestelmissä ja voi kuluttaa huomattavasti levytilaa.",
 	"Enable Message Rating": "Ota viestiarviointi käyttöön",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Ota Mirostat-näytteenotto käyttöön hallinnan monimerkityksellisyydelle. (Oletus: 0, 0 = Ei käytössä, 1 = Mirostat, 2 = Mirostat 2.0)",
 	"Enable New Sign Ups": "Salli uudet rekisteröitymiset",
-	"Enable Web Search": "Ota verkkohaku käyttöön",
 	"Enabled": "Käytössä",
-	"Engine": "Moottori",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Varmista, että CSV-tiedostossasi on 4 saraketta tässä järjestyksessä: Nimi, Sähköposti, Salasana, Rooli.",
 	"Enter {{role}} message here": "Kirjoita {{role}}-viesti tähän",
 	"Enter a detail about yourself for your LLMs to recall": "Kirjoita yksityiskohta itsestäsi, jonka LLM-ohjelmat voivat muistaa",
@@ -365,40 +369,42 @@
 	"Enter Application DN Password": "Kirjoita sovelluksen DN-salasana",
 	"Enter Bing Search V7 Endpoint": "Kirjoita Bing Search V7 -päätepisteen osoite",
 	"Enter Bing Search V7 Subscription Key": "Kirjoita Bing Search V7 -tilauskäyttäjäavain",
-	"Enter Bocha Search API Key": "",
+	"Enter Bocha Search API Key": "Kirjoita Bocha Search API -avain",
 	"Enter Brave Search API Key": "Kirjoita Brave Search API -avain",
 	"Enter certificate path": "Kirjoita varmennepolku",
 	"Enter CFG Scale (e.g. 7.0)": "Kirjoita CFG-mitta (esim. 7.0)",
 	"Enter Chunk Overlap": "Syötä osien päällekkäisyys",
 	"Enter Chunk Size": "Syötä osien koko",
 	"Enter description": "Kirjoita kuvaus",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "Verkko-osoitteet erotetaan pilkulla (esim. esimerkki.com,sivu.org",
-	"Enter Exa API Key": "",
-	"Enter Github Raw URL": "Kirjoita Github Raw -URL-osoite",
+	"Enter Exa API Key": "Kirjoita Exa API -avain",
+	"Enter Github Raw URL": "Kirjoita Github Raw -verkko-osoite",
 	"Enter Google PSE API Key": "Kirjoita Google PSE API -avain",
 	"Enter Google PSE Engine Id": "Kirjoita Google PSE -moottorin tunnus",
 	"Enter Image Size (e.g. 512x512)": "Kirjoita kuvan koko (esim. 512x512)",
 	"Enter Jina API Key": "Kirjoita Jina API -avain",
-	"Enter Jupyter Password": "",
-	"Enter Jupyter Token": "",
-	"Enter Jupyter URL": "",
-	"Enter Kagi Search API Key": "",
+	"Enter Jupyter Password": "Kirjoita Jupyter salasana",
+	"Enter Jupyter Token": "Kirjoita Juypyter token",
+	"Enter Jupyter URL": "Kirjoita Jupyter verkko-osoite",
+	"Enter Kagi Search API Key": "Kirjoita Kagi Search API -avain",
 	"Enter language codes": "Kirjoita kielikoodit",
 	"Enter Model ID": "Kirjoita mallitunnus",
 	"Enter model tag (e.g. {{modelTag}})": "Kirjoita mallitagi (esim. {{modelTag}})",
 	"Enter Mojeek Search API Key": "Kirjoita Mojeek Search API -avain",
 	"Enter Number of Steps (e.g. 50)": "Kirjoita askelten määrä (esim. 50)",
-	"Enter proxy URL (e.g. https://user:password@host:port)": "Kirjoita välityspalvelimen URL-osoite (esim. https://käyttäjä:salasana@host:portti)",
+	"Enter proxy URL (e.g. https://user:password@host:port)": "Kirjoita välityspalvelimen verkko-osoite (esim. https://käyttäjä:salasana@host:portti)",
 	"Enter reasoning effort": "",
 	"Enter Sampler (e.g. Euler a)": "Kirjoita näytteistäjä (esim. Euler a)",
 	"Enter Scheduler (e.g. Karras)": "Kirjoita ajoitin (esim. Karras)",
 	"Enter Score": "Kirjoita pistemäärä",
 	"Enter SearchApi API Key": "Kirjoita SearchApi API -avain",
 	"Enter SearchApi Engine": "Kirjoita SearchApi-moottori",
-	"Enter Searxng Query URL": "Kirjoita Searxng-kyselyn URL-osoite",
+	"Enter Searxng Query URL": "Kirjoita Searxng-kyselyn verkko-osoite",
 	"Enter Seed": "Kirjoita siemenluku",
-	"Enter SerpApi API Key": "",
-	"Enter SerpApi Engine": "",
+	"Enter SerpApi API Key": "Kirjoita SerpApi API -avain",
+	"Enter SerpApi Engine": "Valitse SerpApi Moottori",
 	"Enter Serper API Key": "Kirjoita Serper API -avain",
 	"Enter Serply API Key": "Kirjoita Serply API -avain",
 	"Enter Serpstack API Key": "Kirjoita Serpstack API -avain",
@@ -410,28 +416,28 @@
 	"Enter Tavily API Key": "Kirjoita Tavily API -avain",
 	"Enter the public URL of your WebUI. This URL will be used to generate links in the notifications.": "Kirjoita julkinen WebUI verkko-osoitteesi. Verkko-osoitetta käytetään osoitteiden luontiin ilmoituksissa.",
 	"Enter Tika Server URL": "Kirjoita Tika Server URL",
-	"Enter timeout in seconds": "",
+	"Enter timeout in seconds": "Aseta aikakatkaisu sekunneissa",
 	"Enter Top K": "Kirjoita Top K",
-	"Enter URL (e.g. http://127.0.0.1:7860/)": "Kirjoita URL-osoite (esim. http://127.0.0.1:7860/)",
-	"Enter URL (e.g. http://localhost:11434)": "Kirjoita URL-osoite (esim. http://localhost:11434)",
+	"Enter URL (e.g. http://127.0.0.1:7860/)": "Kirjoita verkko-osoite (esim. http://127.0.0.1:7860/)",
+	"Enter URL (e.g. http://localhost:11434)": "Kirjoita verkko-osoite (esim. http://localhost:11434)",
 	"Enter your current password": "Kirjoita nykyinen salasanasi",
 	"Enter Your Email": "Kirjoita sähköpostiosoitteesi",
 	"Enter Your Full Name": "Kirjoita koko nimesi",
 	"Enter your message": "Kirjoita viestisi",
-	"Enter your new password": "",
+	"Enter your new password": "Kirjoita uusi salasanasi",
 	"Enter Your Password": "Kirjoita salasanasi",
 	"Enter Your Role": "Kirjoita roolisi",
 	"Enter Your Username": "Kirjoita käyttäjätunnuksesi",
 	"Enter your webhook URL": "Kirjoita webhook osoitteesi",
 	"Error": "Virhe",
 	"ERROR": "VIRHE",
-	"Error accessing Google Drive: {{error}}": "",
-	"Error uploading file: {{error}}": "",
+	"Error accessing Google Drive: {{error}}": "Virhe yhdistäessä Google Drive: {{error}}",
+	"Error uploading file: {{error}}": "Virhe ladattaessa tiedostoa: {{error}}",
 	"Evaluations": "Arvioinnit",
-	"Exa API Key": "",
+	"Exa API Key": "Exa API -avain",
 	"Example: (&(objectClass=inetOrgPerson)(uid=%s))": "Esimerkki: (&(objectClass=inetOrgPerson)(uid=%s))",
 	"Example: ALL": "Esimerkki: KAIKKI",
-	"Example: mail": "",
+	"Example: mail": "Esimerkki: posti",
 	"Example: ou=users,dc=foo,dc=example": "Esimerkki: ou=käyttäjät,dc=foo,dc=example",
 	"Example: sAMAccountName or uid or userPrincipalName": "Esimerkki: sAMAccountName tai uid tai userPrincipalName",
 	"Exclude": "Jätä pois",
@@ -458,7 +464,7 @@
 	"Failed to save models configuration": "Mallien määrityksen tallentaminen epäonnistui",
 	"Failed to update settings": "Asetusten päivittäminen epäonnistui",
 	"Failed to upload file.": "Tiedoston lataaminen epäonnistui.",
-	"Features": "",
+	"Features": "Ominaisuudet",
 	"Features Permissions": "Ominaisuuksien käyttöoikeudet",
 	"February": "helmikuu",
 	"Feedback History": "Palautehistoria",
@@ -488,9 +494,9 @@
 	"Form": "Lomake",
 	"Format your variables using brackets like this:": "Muotoile muuttujasi hakasulkeilla tällä tavalla:",
 	"Frequency Penalty": "Taajuussakko",
-	"Full Context Mode": "",
+	"Full Context Mode": "Koko kontekstitila",
 	"Function": "Toiminto",
-	"Function Calling": "",
+	"Function Calling": "Toiminto kutsu",
 	"Function created successfully": "Toiminto luotu onnistuneesti",
 	"Function deleted successfully": "Toiminto poistettu onnistuneesti",
 	"Function Description": "Toiminnon kuvaus",
@@ -503,19 +509,19 @@
 	"Functions allow arbitrary code execution": "Toiminnot sallivat mielivaltaisen koodin suorittamisen",
 	"Functions allow arbitrary code execution.": "Toiminnot sallivat mielivaltaisen koodin suorittamisen.",
 	"Functions imported successfully": "Toiminnot tuotu onnistuneesti",
-	"Gemini": "",
-	"Gemini API Config": "",
-	"Gemini API Key is required.": "",
+	"Gemini": "Gemini",
+	"Gemini API Config": "Gemini API konfiguraatio",
+	"Gemini API Key is required.": "Gemini API -avain on vaaditaan.",
 	"General": "Yleinen",
-	"General Settings": "Yleiset asetukset",
 	"Generate an image": "Luo kuva",
 	"Generate Image": "Luo kuva",
+	"Generate prompt pair": "",
 	"Generating search query": "Luodaan hakukyselyä",
 	"Get started": "Aloita",
 	"Get started with {{WEBUI_NAME}}": "Aloita käyttämään {{WEBUI_NAME}}:iä",
 	"Global": "Yleinen",
 	"Good Response": "Hyvä vastaus",
-	"Google Drive": "",
+	"Google Drive": "Google Drive",
 	"Google PSE API Key": "Google PSE API -avain",
 	"Google PSE Engine Id": "Google PSE -moottorin tunnus",
 	"Group created successfully": "Ryhmä luotu onnistuneesti",
@@ -532,7 +538,7 @@
 	"Hex Color": "Heksadesimaaliväri",
 	"Hex Color - Leave empty for default color": "Heksadesimaaliväri - Jätä tyhjäksi, jos haluat oletusvärin",
 	"Hide": "Piilota",
-	"Home": "",
+	"Home": "Koti",
 	"Host": "Palvelin",
 	"How can I help you today?": "Miten voin auttaa sinua tänään?",
 	"How would you rate this response?": "Kuinka arvioisit tätä vastausta?",
@@ -565,6 +571,7 @@
 	"Input commands": "Syötekäskyt",
 	"Install from Github URL": "Asenna Github-URL:stä",
 	"Instant Auto-Send After Voice Transcription": "Heti automaattinen lähetys äänitunnistuksen jälkeen",
+	"Integration": "",
 	"Interface": "Käyttöliittymä",
 	"Invalid file format.": "Virheellinen tiedostomuoto.",
 	"Invalid Tag": "Virheellinen tagi",
@@ -576,11 +583,11 @@
 	"JSON Preview": "JSON-esikatselu",
 	"July": "heinäkuu",
 	"June": "kesäkuu",
-	"Jupyter Auth": "",
-	"Jupyter URL": "",
+	"Jupyter Auth": "Jupyter todennus",
+	"Jupyter URL": "Jupyter verkko-osoite",
 	"JWT Expiration": "JWT-vanheneminen",
 	"JWT Token": "JWT-token",
-	"Kagi Search API Key": "",
+	"Kagi Search API Key": "Kagi Search API -avain",
 	"Keep Alive": "Pysy aktiivisena",
 	"Key": "Avain",
 	"Keyboard shortcuts": "Pikanäppäimet",
@@ -597,7 +604,7 @@
 	"Language": "Kieli",
 	"Last Active": "Viimeksi aktiivinen",
 	"Last Modified": "Viimeksi muokattu",
-	"Last reply": "",
+	"Last reply": "Viimeksi vastattu",
 	"LDAP": "LDAP",
 	"LDAP server updated": "LDAP-palvelin päivitetty",
 	"Leaderboard": "Tulosluettelo",
@@ -606,22 +613,23 @@
 	"Leave empty to include all models from \"{{URL}}/models\" endpoint": "Jätä tyhjäksi, jos haluat sisällyttää kaikki mallit \"{{URL}}/models\" -päätepistestä",
 	"Leave empty to include all models or select specific models": "Jätä tyhjäksi, jos haluat sisällyttää kaikki mallit tai valitse tietyt mallit",
 	"Leave empty to use the default prompt, or enter a custom prompt": "Jätä tyhjäksi käyttääksesi oletuskehotetta tai kirjoita mukautettu kehote",
-	"Leave model field empty to use the default model.": "",
-	"License": "",
+	"Leave model field empty to use the default model.": "Jätä malli kenttä tyhjäksi käyttääksesi oletus mallia.",
+	"License": "Lisenssi",
 	"Light": "Vaalea",
 	"Listening...": "Kuuntelee...",
-	"Llama.cpp": "",
+	"Llama.cpp": "Llama.cpp",
 	"LLMs can make mistakes. Verify important information.": "Kielimallit voivat tehdä virheitä. Tarkista tärkeät tiedot.",
+	"Loader": "",
 	"Loading Kokoro.js...": "Ladataan Kokoro.js...",
 	"Local": "Paikallinen",
 	"Local Models": "Paikalliset mallit",
+	"Location access not allowed": "",
 	"Lost": "Mennyt",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Tehnyt OpenWebUI-yhteisö",
 	"Make sure to enclose them with": "Varmista, että suljet ne",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Muista viedä workflow.json-tiedosto API-muodossa ComfyUI:sta.",
 	"Manage": "Hallitse",
-	"Manage Arena Models": "Hallitse Arena-malleja",
 	"Manage Direct Connections": "Hallitse suoria yhteyksiä",
 	"Manage Models": "Hallitse malleja",
 	"Manage Ollama": "Hallitse Ollamaa",
@@ -642,7 +650,7 @@
 	"Memory updated successfully": "Muisti päivitetty onnistuneesti",
 	"Merge Responses": "Yhdistä vastaukset",
 	"Message rating should be enabled to use this feature": "Tämän toiminnon käyttämiseksi viestiarviointi on otettava käyttöön",
-	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Linkin luomisen jälkeen lähettämäsi viestit eivät ole jaettuja. Käyttäjät, joilla on URL-osoite, voivat tarkastella jaettua keskustelua.",
+	"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Linkin luomisen jälkeen lähettämäsi viestit eivät ole jaettuja. Käyttäjät, joilla on verkko-osoite, voivat tarkastella jaettua keskustelua.",
 	"Min P": "Min P",
 	"Minimum Score": "Vähimmäispisteet",
 	"Mirostat": "Mirostat",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "HTML-, CSS- tai JavaScript-sisältöä ei löytynyt.",
 	"No inference engine with management support found": "",
 	"No knowledge found": "Tietoa ei löytynyt",
+	"No memories to clear": "",
 	"No model IDs": "Ei mallitunnuksia",
 	"No models found": "Malleja ei löytynyt",
 	"No models selected": "Malleja ei ole valittu",
@@ -718,11 +727,12 @@
 	"Ollama API settings updated": "Ollama API -asetukset päivitetty",
 	"Ollama Version": "Ollama-versio",
 	"On": "Päällä",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "Vain kirjaimet, numerot ja väliviivat ovat sallittuja",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Vain kirjaimet, numerot ja väliviivat ovat sallittuja komentosarjassa.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Vain kokoelmia voi muokata, luo uusi tietokanta muokataksesi/lisätäksesi asiakirjoja.",
 	"Only select users and groups with permission can access": "Vain valitut käyttäjät ja ryhmät, joilla on käyttöoikeus, pääsevät käyttämään",
-	"Oops! Looks like the URL is invalid. Please double-check and try again.": "Hups! Näyttää siltä, että URL-osoite on virheellinen. Tarkista se ja yritä uudelleen.",
+	"Oops! Looks like the URL is invalid. Please double-check and try again.": "Hups! Näyttää siltä, että verkko-osoite on virheellinen. Tarkista se ja yritä uudelleen.",
 	"Oops! There are files still uploading. Please wait for the upload to complete.": "Hups! Tiedostoja on vielä ladattavana. Odota, että lataus on valmis.",
 	"Oops! There was an error in the previous response.": "Hups! Edellisessä vastauksessa oli virhe.",
 	"Oops! You're using an unsupported method (frontend only). Please serve the WebUI from the backend.": "Hups! Käytät ei-tuettua menetelmää (vain frontend). Palvele WebUI:ta backendistä.",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "Kehote päivitetty onnistuneesti",
 	"Prompts": "Kehotteet",
 	"Prompts Access": "Kehoitteiden käyttöoikeudet",
-	"Proxy URL": "Välityspalvelimen URL-osoite",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Lataa \"{{searchValue}}\" Ollama.comista",
 	"Pull a model from Ollama.com": "Lataa malli Ollama.comista",
 	"Query Generation Prompt": "Kyselytulosten luontikehote",
-	"Query Params": "Kyselyparametrit",
 	"RAG Template": "RAG-malline",
 	"Rating": "Arviointi",
 	"Re-rank models by topic similarity": "Uudelleenjärjestä mallit aiheyhteyden mukaan",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Vastausilmoituksia ei voida ottaa käyttöön, koska verkkosivuston käyttöoikeudet on evätty. Myönnä tarvittavat käyttöoikeudet selaimesi asetuksista.",
 	"Response splitting": "Vastauksen jakaminen",
 	"Result": "Tulos",
+	"Retrieval": "",
 	"Retrieval Query Generation": "Hakukyselyn luominen",
 	"Rich Text Input for Chat": "Rikasteksti-syöte chattiin",
 	"RK": "RK",
@@ -864,7 +873,7 @@
 	"Searched {{count}} sites": "Etsitty {{count}} sivulta",
 	"Searching \"{{searchQuery}}\"": "Haetaan \"{{searchQuery}}\"",
 	"Searching Knowledge for \"{{searchQuery}}\"": "Haetaan tietämystä \"{{searchQuery}}\"",
-	"Searxng Query URL": "Searxng-kyselyn URL-osoite",
+	"Searxng Query URL": "Searxng-kyselyn verkko-osoite",
 	"See readme.md for instructions": "Katso ohjeet readme.md-tiedostosta",
 	"See what's new": "Katso, mitä uutta",
 	"Seed": "Siemenluku",
@@ -874,13 +883,12 @@
 	"Select a group": "Valitse ryhmä",
 	"Select a model": "Valitse malli",
 	"Select a pipeline": "Valitse putki",
-	"Select a pipeline url": "Valitse putken URL-osoite",
+	"Select a pipeline url": "Valitse putken verkko-osoite",
 	"Select a tool": "Valitse työkalu",
 	"Select an auth method": "Valitse kirjautumistapa",
 	"Select an Ollama instance": "Valitse Ollama instanssi",
 	"Select Engine": "Valitse moottori",
 	"Select Knowledge": "Valitse tietämys",
-	"Select model": "Valitse malli",
 	"Select only one model to call": "Valitse vain yksi malli kutsuttavaksi",
 	"Selected model(s) do not support image inputs": "Valitut mallit eivät tue kuvasöytteitä",
 	"Semantic distance to query": "Semanttinen etäisyys kyselyyn",
@@ -889,8 +897,8 @@
 	"Send message": "Lähetä viesti",
 	"Sends `stream_options: { include_usage: true }` in the request.\nSupported providers will return token usage information in the response when set.": "Lähettää `stream_options: { include_usage: true }` pyynnössä.\nTuetut tarjoajat palauttavat tokenkäyttötiedot vastauksessa, kun se on asetettu.",
 	"September": "syyskuu",
-	"SerpApi API Key": "",
-	"SerpApi Engine": "",
+	"SerpApi API Key": "SerpApi API -avain",
+	"SerpApi Engine": "SerpApi moottori",
 	"Serper API Key": "Serper API -avain",
 	"Serply API Key": "Serply API -avain",
 	"Serpstack API Key": "Serpstack API -avain",
@@ -906,7 +914,7 @@
 	"Set Scheduler": "Aseta ajoitin",
 	"Set Steps": "Aseta askeleet",
 	"Set Task Model": "Aseta tehtävämalli",
-	"Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "",
+	"Set the number of layers, which will be off-loaded to GPU. Increasing this value can significantly improve performance for models that are optimized for GPU acceleration but may also consume more power and GPU resources.": "Aseta näytönohjaimelle ladattavien tasojen määrä. Tämän arvon kasvattaminen voi parantaa merkittävästi näytönohjaimelle optimoitujen mallien suorituskykyä, mutta se voi myös lisätä näytönohjaimen virrankulutusta ja resurssien käyttöä.",
 	"Set the number of worker threads used for computation. This option controls how many threads are used to process incoming requests concurrently. Increasing this value can improve performance under high concurrency workloads but may also consume more CPU resources.": "Aseta työntekijäsäikeiden määrä laskentaa varten. Tämä asetus kontrolloi, kuinka monta säiettä käytetään saapuvien pyyntöjen rinnakkaiseen käsittelyyn. Arvon kasvattaminen voi parantaa suorituskykyä suurissa samanaikaisissa työkuormissa, mutta voi myös kuluttaa enemmän keskussuorittimen resursseja.",
 	"Set Voice": "Aseta puheääni",
 	"Set whisper model": "Aseta whisper-malli",
@@ -957,8 +965,9 @@
 	"Tags Generation": "Tagien luonti",
 	"Tags Generation Prompt": "Tagien luontikehote",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "Tail-free-otanta käytetään vähentämään vähemmän todennäköisten tokenien vaikutusta tulokseen. Korkeampi arvo (esim. 2,0) vähentää vaikutusta enemmän, kun taas arvo 1,0 poistaa tämän asetuksen käytöstä. (oletus: 1)",
+	"Talk to model": "",
 	"Tap to interrupt": "Napauta keskeyttääksesi",
-	"Tasks": "",
+	"Tasks": "Tehtävät",
 	"Tavily API Key": "Tavily API -avain",
 	"Tell us more:": "Kerro lisää:",
 	"Temperature": "Lämpötila",
@@ -1005,14 +1014,14 @@
 	"Title (e.g. Tell me a fun fact)": "Otsikko (esim. Kerro hauska fakta)",
 	"Title Auto-Generation": "Otsikon automaattinen luonti",
 	"Title cannot be an empty string.": "Otsikko ei voi olla tyhjä merkkijono.",
-	"Title Generation": "",
+	"Title Generation": "Otsikon luonti",
 	"Title Generation Prompt": "Otsikon luontikehote",
 	"TLS": "TLS",
 	"To access the available model names for downloading,": "Päästäksesi käsiksi ladattavissa oleviin mallinimiin,",
 	"To access the GGUF models available for downloading,": "Päästäksesi käsiksi ladattavissa oleviin GGUF-malleihin,",
 	"To access the WebUI, please reach out to the administrator. Admins can manage user statuses from the Admin Panel.": "Päästäksesi käyttämään WebUI:ta, ota yhteyttä ylläpitäjään. Ylläpitäjät voivat hallita käyttäjien tiloja Ylläpitopaneelista.",
 	"To attach knowledge base here, add them to the \"Knowledge\" workspace first.": "Liittääksesi tietokantasi tähän, lisää ne ensin \"Tietämys\"-työtilaan.",
-	"To learn more about available endpoints, visit our documentation.": "",
+	"To learn more about available endpoints, visit our documentation.": "Jos haluat lisätietoja käytettävissä olevista päätepisteistä, tutustu dokumentaatioomme.",
 	"To protect your privacy, only ratings, model IDs, tags, and metadata are shared from your feedback—your chat logs remain private and are not included.": "Yksityisyydensuojasi vuoksi palautteestasi jaetaan vain arvostelut, mallitunnukset, tagit ja metadata - keskustelulokisi pysyvät yksityisinä eikä niitä sisällytetä.",
 	"To select actions here, add them to the \"Functions\" workspace first.": "Valitaksesi toimintoja tässä, lisää ne ensin \"Toiminnot\"-työtilaan.",
 	"To select filters here, add them to the \"Functions\" workspace first.": "Valitaksesi suodattimia tässä, lisää ne ensin \"Toiminnot\"-työtilaan.",
@@ -1041,12 +1050,13 @@
 	"Top P": "Top P",
 	"Transformers": "Muunnokset",
 	"Trouble accessing Ollama?": "Ongelmia Ollama-yhteydessä?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "Puhesynteesimalli",
 	"TTS Settings": "Puhesynteesiasetukset",
 	"TTS Voice": "Puhesynteesiääni",
 	"Type": "Tyyppi",
 	"Type Hugging Face Resolve (Download) URL": "Kirjoita Hugging Face -resolve-latausosoite",
-	"Uh-oh! There was an issue with the response.": "Voi ei! Vastauksessa oli ongelma.",
+	"Uh-oh! There was an issue with the response.": "Voi ei! Vastauksessa ilmeni ongelma.",
 	"UI": "Käyttöliittymä",
 	"Unarchive All": "Pura kaikkien arkistointi",
 	"Unarchive All Archived Chats": "Pura kaikkien arkistoitujen keskustelujen arkistointi",
@@ -1101,15 +1111,14 @@
 	"Warning:": "Varoitus:",
 	"Warning: Enabling this will allow users to upload arbitrary code on the server.": "Varoitus: Tämän käyttöönotto sallii käyttäjien ladata mielivaltaista koodia palvelimelle.",
 	"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Varoitus: Jos päivität tai vaihdat upotusmallia, sinun on tuotava kaikki asiakirjat uudelleen.",
-	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
+	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "Varoitus: Jupyter käyttö voi mahdollistaa mielivaltaiseen koodin suorittamiseen, mikä voi aiheuttaa tietoturvariskejä - käytä äärimmäisen varoen.",
 	"Web": "Web",
 	"Web API": "Web-API",
-	"Web Loader Settings": "Web Loader -asetukset",
 	"Web Search": "Verkkohaku",
 	"Web Search Engine": "Hakukoneet",
 	"Web Search in Chat": "Verkkohaku keskustelussa",
 	"Web Search Query Generation": "Verkkohakukyselyn luonti",
-	"Webhook URL": "Webhook-URL",
+	"Webhook URL": "Webhook verkko-osoite",
 	"WebUI Settings": "WebUI-asetukset",
 	"WebUI URL": "WebUI-osoite",
 	"WebUI will make requests to \"{{url}}/api/chat\"": "WebUI lähettää pyyntöjä osoitteeseen \"{{url}}/api/chat\"",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Tilisi tila on tällä hetkellä odottaa aktivointia.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "Koko panoksesi menee suoraan lisäosan kehittäjälle; Open WebUI ei pidätä prosenttiosuutta. Valittu rahoitusalusta voi kuitenkin periä omia maksujaan.",
 	"Youtube": "YouTube",
-	"Youtube Loader Settings": "YouTube Loader -asetukset"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/fr-CA/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modèle de tâche est utilisé lors de l’exécution de tâches telles que la génération de titres pour les conversations et les requêtes de recherche sur le web.",
 	"a user": "un utilisateur",
 	"About": "À propos",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Archiver toutes les conversations",
 	"Archived Chats": "Conversations archivées",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "Joindre un document",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Attention aux détails",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Clé API Brave Search",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Bypasser la vérification SSL pour les sites web",
 	"Calendar": "",
 	"Call": "Appeler",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Recherche de mises à jour...",
 	"Choose a model before saving...": "Choisissez un modèle avant de sauvegarder...",
 	"Chunk Overlap": "Chevauchement de blocs",
-	"Chunk Params": "Paramètres d'encombrement",
 	"Chunk Size": "Taille de bloc",
 	"Ciphers": "",
 	"Citation": "Citation",
 	"Clear memory": "Libérer la mémoire",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "Cliquez ici pour obtenir de l'aide.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "Contacter l'administrateur pour l'accès à l'interface Web",
 	"Content": "Contenu",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "Longueur du contexte",
 	"Continue Response": "Continuer la réponse",
 	"Continue with {{provider}}": "Continuer avec {{provider}}",
@@ -245,6 +248,7 @@
 	"Current Model": "Modèle actuel amélioré",
 	"Current Password": "Mot de passe actuel",
 	"Custom": "Sur mesure",
+	"Danger Zone": "",
 	"Dark": "Obscur",
 	"Database": "Base de données",
 	"December": "Décembre",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "Document",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Documentation",
 	"Documents": "Documents",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "ne fait aucune connexion externe et garde vos données en sécurité sur votre serveur local.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "E-mail",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "Taille du lot d'encodage",
 	"Embedding Model": "Modèle d'embedding",
 	"Embedding Model Engine": "Moteur de modèle d'encodage",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "Activer le partage communautaire",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "Activer les nouvelles inscriptions",
-	"Enable Web Search": "Activer la recherche sur le Web",
 	"Enabled": "",
-	"Engine": "Moteur",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.",
 	"Enter {{role}} message here": "Entrez le message {{role}} ici",
 	"Enter a detail about yourself for your LLMs to recall": "Saisissez un détail sur vous-même que vos LLMs pourront se rappeler",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Entrez le chevauchement de chunk",
 	"Enter Chunk Size": "Entrez la taille de bloc",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "Entrez l'URL brute de GitHub",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Général",
-	"General Settings": "Paramètres Généraux",
 	"Generate an image": "",
 	"Generate Image": "Générer une image",
+	"Generate prompt pair": "",
 	"Generating search query": "Génération d'une requête de recherche",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "Entrez les commandes",
 	"Install from Github URL": "Installer depuis l'URL GitHub",
 	"Instant Auto-Send After Voice Transcription": "Envoi automatique instantané après transcription vocale",
+	"Integration": "",
 	"Interface": "Interface utilisateur",
 	"Invalid file format.": "",
 	"Invalid Tag": "Étiquette non valide",
@@ -612,16 +619,17 @@
 	"Listening...": "En train d'écouter...",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "Les LLM peuvent faire des erreurs. Vérifiez les informations importantes.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "Modèles locaux",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Réalisé par la communauté OpenWebUI",
 	"Make sure to enclose them with": "Assurez-vous de les inclure dans",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "Gérer",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "Version Ollama améliorée",
 	"On": "Activé",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Seuls les caractères alphanumériques et les tirets sont autorisés dans la chaîne de commande.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "Prompts",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com",
 	"Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com",
 	"Query Generation Prompt": "",
-	"Query Params": "Paramètres de requête",
 	"RAG Template": "Modèle RAG",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de réponse ne peuvent pas être activées car les autorisations du site web ont été refusées. Veuillez visiter les paramètres de votre navigateur pour accorder l'accès nécessaire.",
 	"Response splitting": "Fractionnement de la réponse",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": "Sélectionnez un modèle",
 	"Select only one model to call": "Sélectionnez seulement un modèle pour appeler",
 	"Selected model(s) do not support image inputs": "Les modèle(s) sélectionné(s) ne prennent pas en charge les entrées d'images",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "Appuyez pour interrompre",
 	"Tasks": "",
 	"Tavily API Key": "Clé API Tavily",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "Rencontrez-vous des difficultés pour accéder à Ollama ?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "Modèle de synthèse vocale",
 	"TTS Settings": "Paramètres de synthèse vocale",
 	"TTS Voice": "Voix TTS",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web",
 	"Web API": "API Web",
-	"Web Loader Settings": "Paramètres du chargeur web",
 	"Web Search": "Recherche Web",
 	"Web Search Engine": "Moteur de recherche Web",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Votre statut de compte est actuellement en attente d'activation.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "YouTube",
-	"Youtube Loader Settings": "Paramètres de l'outil de téléchargement YouTube"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/fr-FR/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "Un modèle de tâche est utilisé lors de l’exécution de tâches telles que la génération de titres pour les conversations et les requêtes de recherche sur le web.",
 	"a user": "un utilisateur",
 	"About": "À propos",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "Accès",
 	"Access Control": "Contrôle d'accès",
 	"Accessible to all users": "Accessible à tous les utilisateurs",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "Archiver toutes les conversations",
 	"Archived Chats": "Conversations archivées",
 	"archived-chat-export": "exportation de conversation archivée",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "Êtes-vous sûr de vouloir supprimer ce canal ?",
 	"Are you sure you want to delete this message?": "Êtes-vous sûr de vouloir supprimer ce message ?",
 	"Are you sure you want to unarchive all archived chats?": "Êtes-vous sûr de vouloir désarchiver toutes les conversations archivées?",
@@ -93,7 +95,7 @@
 	"Artifacts": "Artéfacts",
 	"Ask a question": "Posez votre question",
 	"Assistant": "Assistant",
-	"Attach file": "Joindre un document",
+	"Attach file from knowledge": "",
 	"Attention to detail": "Attention aux détails",
 	"Attribute for Mail": "Attribut pour l'e-mail",
 	"Attribute for Username": "Attribut pour le nom d'utilisateur",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Clé API Brave Search",
 	"By {{name}}": "Par {{name}}",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "Bypasser la vérification SSL pour les sites web",
 	"Calendar": "",
 	"Call": "Appeler",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "Recherche de mises à jour...",
 	"Choose a model before saving...": "Choisissez un modèle avant de sauvegarder...",
 	"Chunk Overlap": "Chevauchement des chunks",
-	"Chunk Params": "Paramètres des chunks",
 	"Chunk Size": "Taille des chunks",
 	"Ciphers": "Chiffres",
 	"Citation": "Citation",
 	"Clear memory": "Effacer la mémoire",
+	"Clear Memory": "",
 	"click here": "cliquez ici",
 	"Click here for filter guides.": "Cliquez ici pour les guides de filtrage.",
 	"Click here for help.": "Cliquez ici pour obtenir de l'aide.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "Contraint l'effort de raisonnement pour les modèles de raisonnement. Applicable uniquement aux modèles de raisonnement de fournisseurs spécifiques qui prennent en charge l'effort de raisonnement. (Par défaut : medium)",
 	"Contact Admin for WebUI Access": "Contacter l'administrateur pour obtenir l'accès à WebUI",
 	"Content": "Contenu",
-	"Content Extraction": "Extraction du contenu",
+	"Content Extraction Engine": "",
 	"Context Length": "Longueur du contexte",
 	"Continue Response": "Continuer la réponse",
 	"Continue with {{provider}}": "Continuer avec {{provider}}",
@@ -245,6 +248,7 @@
 	"Current Model": "Modèle actuel",
 	"Current Password": "Mot de passe actuel",
 	"Custom": "Sur mesure",
+	"Danger Zone": "",
 	"Dark": "Sombre",
 	"Database": "Base de données",
 	"December": "Décembre",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "N'installez pas de fonctions provenant de sources auxquelles vous ne faites pas entièrement confiance.",
 	"Do not install tools from sources you do not fully trust.": "N'installez pas d'outils provenant de sources auxquelles vous ne faites pas entièrement confiance.",
 	"Document": "Document",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "Documentation",
 	"Documents": "Documents",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "n'établit aucune connexion externe et garde vos données en sécurité sur votre serveur local.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "ElevenLabs",
 	"Email": "E-mail",
 	"Embark on adventures": "Embarquez pour des aventures",
+	"Embedding": "",
 	"Embedding Batch Size": "Taille du lot d'embedding",
 	"Embedding Model": "Modèle d'embedding",
 	"Embedding Model Engine": "Moteur de modèle d'embedding",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "Activer la génération des suggestions pour les messages",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "Activer le partage communautaire",
-	"Enable Google Drive": "Activer Google Drive",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "Activer le verrouillage de la mémoire (mlock) pour empêcher les données du modèle d'être échangées de la RAM. Cette option verrouille l'ensemble de pages de travail du modèle en RAM, garantissant qu'elles ne seront pas échangées vers le disque. Cela peut aider à maintenir les performances en évitant les défauts de page et en assurant un accès rapide aux données.",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "Activer le mappage de la mémoire (mmap) pour charger les données du modèle. Cette option permet au système d'utiliser le stockage disque comme une extension de la RAM en traitant les fichiers disque comme s'ils étaient en RAM. Cela peut améliorer les performances du modèle en permettant un accès plus rapide aux données. Cependant, cela peut ne pas fonctionner correctement avec tous les systèmes et peut consommer une quantité significative d'espace disque.",
 	"Enable Message Rating": "Activer l'évaluation des messages",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "Activer l'échantillonnage Mirostat pour contrôler la perplexité. (Par défaut : 0, 0 = Désactivé, 1 = Mirostat, 2 = Mirostat 2.0)",
 	"Enable New Sign Ups": "Activer les nouvelles inscriptions",
-	"Enable Web Search": "Activer la recherche Web",
 	"Enabled": "Activé",
-	"Engine": "Moteur",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "Vérifiez que votre fichier CSV comprenne les 4 colonnes dans cet ordre : Name, Email, Password, Role.",
 	"Enter {{role}} message here": "Entrez le message {{role}} ici",
 	"Enter a detail about yourself for your LLMs to recall": "Saisissez un détail sur vous-même que vos LLMs pourront se rappeler",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "Entrez le chevauchement des chunks",
 	"Enter Chunk Size": "Entrez la taille des chunks",
 	"Enter description": "Entrez la description",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "Entrez l'URL brute de GitHub",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "Général",
-	"General Settings": "Paramètres généraux",
 	"Generate an image": "",
 	"Generate Image": "Générer une image",
+	"Generate prompt pair": "",
 	"Generating search query": "Génération d'une requête de recherche",
 	"Get started": "Commencer",
 	"Get started with {{WEBUI_NAME}}": "Commencez avec {{WEBUI_NAME}}",
@@ -565,6 +571,7 @@
 	"Input commands": "Commandes d'entrée",
 	"Install from Github URL": "Installer depuis une URL GitHub",
 	"Instant Auto-Send After Voice Transcription": "Envoi automatique après la transcription",
+	"Integration": "",
 	"Interface": "Interface utilisateur",
 	"Invalid file format.": "Format de fichier non valide.",
 	"Invalid Tag": "Tag non valide",
@@ -612,16 +619,17 @@
 	"Listening...": "Écoute en cours...",
 	"Llama.cpp": "Llama.cpp",
 	"LLMs can make mistakes. Verify important information.": "Les LLM peuvent faire des erreurs. Vérifiez les informations importantes.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "Local",
 	"Local Models": "Modèles locaux",
+	"Location access not allowed": "",
 	"Lost": "Perdu",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "Réalisé par la communauté OpenWebUI",
 	"Make sure to enclose them with": "Assurez-vous de les inclure dans",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "Veillez à exporter un fichier workflow.json au format API depuis ComfyUI.",
 	"Manage": "Gérer",
-	"Manage Arena Models": "Gérer les modèles d'arène",
 	"Manage Direct Connections": "",
 	"Manage Models": "Gérer les modèles",
 	"Manage Ollama": "Gérer Ollama",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "Aucun contenu HTML, CSS ou JavaScript trouvé.",
 	"No inference engine with management support found": "Aucun moteur d'inférence avec support trouvé",
 	"No knowledge found": "Aucune connaissance trouvée",
+	"No memories to clear": "",
 	"No model IDs": "Aucun ID de modèle",
 	"No models found": "Aucun modèle trouvé",
 	"No models selected": "Aucun modèle sélectionné",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "Paramètres de l'API Ollama mis à jour",
 	"Ollama Version": "Version Ollama",
 	"On": "Activé",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "Seuls les caractères alphanumériques et les tirets sont autorisés",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "Seuls les caractères alphanumériques et les tirets sont autorisés dans la chaîne de commande.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "Seules les collections peuvent être modifiées, créez une nouvelle base de connaissance pour modifier/ajouter des documents.",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "Prompt mis à jour avec succès",
 	"Prompts": "Prompts",
 	"Prompts Access": "Accès aux prompts",
-	"Proxy URL": "URL du proxy",
 	"Pull \"{{searchValue}}\" from Ollama.com": "Récupérer « {{searchValue}} » depuis Ollama.com",
 	"Pull a model from Ollama.com": "Télécharger un modèle depuis Ollama.com",
 	"Query Generation Prompt": "Prompt de génération de requête",
-	"Query Params": "Paramètres de requête",
 	"RAG Template": "Modèle RAG",
 	"Rating": "Note",
 	"Re-rank models by topic similarity": "Reclasser les modèles par similarité de sujet",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "Les notifications de réponse ne peuvent pas être activées car les autorisations du site web ont été refusées. Veuillez vérifier les paramètres de votre navigateur pour accorder l'accès nécessaire.",
 	"Response splitting": "Fractionnement de la réponse",
 	"Result": "Résultat",
+	"Retrieval": "",
 	"Retrieval Query Generation": "Génération de requête de RAG",
 	"Rich Text Input for Chat": "Saisie de texte enrichi pour le chat",
 	"RK": "Rang",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "Sélectionnez une instance Ollama",
 	"Select Engine": "Sélectionnez le moteur",
 	"Select Knowledge": "Sélectionnez une connaissance",
-	"Select model": "Sélectionner un modèle",
 	"Select only one model to call": "Sélectionnez seulement un modèle pour appeler",
 	"Selected model(s) do not support image inputs": "Les modèle(s) sélectionné(s) ne prennent pas en charge les entrées d'images",
 	"Semantic distance to query": "Distance sémantique à la requête",
@@ -957,6 +965,7 @@
 	"Tags Generation": "Génération de tags",
 	"Tags Generation Prompt": "Prompt de génération de tags",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "L'échantillonnage sans queue est utilisé pour réduire l'impact des tokens moins probables dans la sortie. Une valeur plus élevée (par exemple 2.0) réduira davantage l'impact, tandis qu'une valeur de 1.0 désactive ce paramètre. (par défaut : 1)",
+	"Talk to model": "",
 	"Tap to interrupt": "Appuyez pour interrompre",
 	"Tasks": "",
 	"Tavily API Key": "Clé API Tavily",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "Transformers",
 	"Trouble accessing Ollama?": "Problèmes d'accès à Ollama ?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "Modèle de Text-to-Speech",
 	"TTS Settings": "Paramètres de Text-to-Speech",
 	"TTS Voice": "Voix de Text-to-Speech",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "Web",
 	"Web API": "API Web",
-	"Web Loader Settings": "Paramètres du Web Loader",
 	"Web Search": "Recherche Web",
 	"Web Search Engine": "Moteur de recherche Web",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "Votre statut de compte est actuellement en attente d'activation.",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "L'intégralité de votre contribution ira directement au développeur du plugin ; Open WebUI ne prend aucun pourcentage. Cependant, la plateforme de financement choisie peut avoir ses propres frais.",
 	"Youtube": "YouTube",
-	"Youtube Loader Settings": "Paramètres de l'outil de téléchargement YouTube"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/he-IL/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "מודל משימה משמש בעת ביצוע משימות כגון יצירת כותרות עבור צ'אטים ושאילתות חיפוש באינטרנט",
 	"a user": "משתמש",
 	"About": "אודות",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "אחסן בארכיון את כל הצ'אטים",
 	"Archived Chats": "צ'אטים מאורכבים",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "צרף קובץ",
+	"Attach file from knowledge": "",
 	"Attention to detail": "תשומת לב לפרטים",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "מפתח API של חיפוש אמיץ",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "עקוף אימות SSL עבור אתרים",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "בודק עדכונים...",
 	"Choose a model before saving...": "בחר מודל לפני השמירה...",
 	"Chunk Overlap": "חפיפת נתונים",
-	"Chunk Params": "פרמטרי נתונים",
 	"Chunk Size": "גודל נתונים",
 	"Ciphers": "",
 	"Citation": "ציטוט",
 	"Clear memory": "",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "לחץ כאן לעזרה.",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "תוכן",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "אורך הקשר",
 	"Continue Response": "המשך תגובה",
 	"Continue with {{provider}}": "",
@@ -245,6 +248,7 @@
 	"Current Model": "המודל הנוכחי",
 	"Current Password": "הסיסמה הנוכחית",
 	"Custom": "מותאם אישית",
+	"Danger Zone": "",
 	"Dark": "כהה",
 	"Database": "מסד נתונים",
 	"December": "דצמבר",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "מסמך",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "מסמכים",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "לא מבצע חיבורים חיצוניים, והנתונים שלך נשמרים באופן מאובטח בשרת המקומי שלך.",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "דוא\"ל",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "מודל הטמעה",
 	"Embedding Model Engine": "מנוע מודל הטמעה",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "הפיכת שיתוף קהילה לזמין",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "אפשר הרשמות חדשות",
-	"Enable Web Search": "הפיכת חיפוש באינטרנט לזמין",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "ודא שקובץ ה-CSV שלך כולל 4 עמודות בסדר הבא: שם, דוא\"ל, סיסמה, תפקיד.",
 	"Enter {{role}} message here": "הזן הודעת {{role}} כאן",
 	"Enter a detail about yourself for your LLMs to recall": "הזן פרטים על עצמך כדי שLLMs יזכור",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "הזן חפיפת נתונים",
 	"Enter Chunk Size": "הזן גודל נתונים",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "הזן כתובת URL של Github Raw",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "כללי",
-	"General Settings": "הגדרות כלליות",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "יצירת שאילתת חיפוש",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "פקודות קלט",
 	"Install from Github URL": "התקן מכתובת URL של Github",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "ממשק",
 	"Invalid file format.": "",
 	"Invalid Tag": "תג לא חוקי",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "מודלים בשפה טבעית יכולים לטעות. אמת מידע חשוב.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "נוצר על ידי קהילת OpenWebUI",
 	"Make sure to enclose them with": "ודא להקיף אותם עם",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "גרסת Ollama",
 	"On": "פועל",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "רק תווים אלפאנומריים ומקפים מותרים במחרוזת הפקודה.",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "פקודות",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "משוך \"{{searchValue}}\" מ-Ollama.com",
 	"Pull a model from Ollama.com": "משוך מודל מ-Ollama.com",
 	"Query Generation Prompt": "",
-	"Query Params": "פרמטרי שאילתה",
 	"RAG Template": "תבנית RAG",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": "בחר מודל",
 	"Select only one model to call": "",
 	"Selected model(s) do not support image inputs": "דגמים נבחרים אינם תומכים בקלט תמונה",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "Top P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "קשה לגשת לOllama?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "הגדרות TTS",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "רשת",
 	"Web API": "",
-	"Web Loader Settings": "הגדרות טעינת אתר",
 	"Web Search": "חיפוש באינטרנט",
 	"Web Search Engine": "מנוע חיפוש באינטרנט",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "הגדרות Youtube Loader"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

+ 23 - 13
src/lib/i18n/locales/hi-IN/translation.json

@@ -13,6 +13,7 @@
 	"A task model is used when performing tasks such as generating titles for chats and web search queries": "चैट और वेब खोज क्वेरी के लिए शीर्षक उत्पन्न करने जैसे कार्य करते समय कार्य मॉडल का उपयोग किया जाता है",
 	"a user": "एक उपयोगकर्ता",
 	"About": "हमारे बारे में",
+	"Accept autocomplete generation / Jump to prompt variable": "",
 	"Access": "",
 	"Access Control": "",
 	"Accessible to all users": "",
@@ -85,6 +86,7 @@
 	"Archive All Chats": "सभी चैट संग्रहीत करें",
 	"Archived Chats": "संग्रहीत चैट",
 	"archived-chat-export": "",
+	"Are you sure you want to clear all memories? This action cannot be undone.": "",
 	"Are you sure you want to delete this channel?": "",
 	"Are you sure you want to delete this message?": "",
 	"Are you sure you want to unarchive all archived chats?": "",
@@ -93,7 +95,7 @@
 	"Artifacts": "",
 	"Ask a question": "",
 	"Assistant": "",
-	"Attach file": "फ़ाइल atta",
+	"Attach file from knowledge": "",
 	"Attention to detail": "विस्तार पर ध्यान",
 	"Attribute for Mail": "",
 	"Attribute for Username": "",
@@ -127,6 +129,7 @@
 	"Bocha Search API Key": "",
 	"Brave Search API Key": "Brave सर्च एपीआई कुंजी",
 	"By {{name}}": "",
+	"Bypass Embedding and Retrieval": "",
 	"Bypass SSL verification for Websites": "वेबसाइटों के लिए SSL सुनिश्चिती को छोड़ें",
 	"Calendar": "",
 	"Call": "",
@@ -156,11 +159,11 @@
 	"Checking for updates...": "अपडेट के लिए जांच कर रहा है...",
 	"Choose a model before saving...": "सहेजने से पहले एक मॉडल चुनें...",
 	"Chunk Overlap": "चंक ओवरलैप",
-	"Chunk Params": "चंक पैरामीटर्स",
 	"Chunk Size": "चंक आकार",
 	"Ciphers": "",
 	"Citation": "उद्धरण",
 	"Clear memory": "",
+	"Clear Memory": "",
 	"click here": "",
 	"Click here for filter guides.": "",
 	"Click here for help.": "सहायता के लिए यहां क्लिक करें।",
@@ -208,7 +211,7 @@
 	"Constrains effort on reasoning for reasoning models. Only applicable to reasoning models from specific providers that support reasoning effort. (Default: medium)": "",
 	"Contact Admin for WebUI Access": "",
 	"Content": "सामग्री",
-	"Content Extraction": "",
+	"Content Extraction Engine": "",
 	"Context Length": "प्रसंग की लंबाई",
 	"Continue Response": "प्रतिक्रिया जारी रखें",
 	"Continue with {{provider}}": "",
@@ -245,6 +248,7 @@
 	"Current Model": "वर्तमान मॉडल",
 	"Current Password": "वर्तमान पासवर्ड",
 	"Custom": "कस्टम संस्करण",
+	"Danger Zone": "",
 	"Dark": "डार्क",
 	"Database": "डेटाबेस",
 	"December": "डिसेंबर",
@@ -305,6 +309,8 @@
 	"Do not install functions from sources you do not fully trust.": "",
 	"Do not install tools from sources you do not fully trust.": "",
 	"Document": "दस्तावेज़",
+	"Document Intelligence": "",
+	"Document Intelligence endpoint and key required.": "",
 	"Documentation": "",
 	"Documents": "दस्तावेज़",
 	"does not make any external connections, and your data stays securely on your locally hosted server.": "कोई बाहरी कनेक्शन नहीं बनाता है, और आपका डेटा आपके स्थानीय रूप से होस्ट किए गए सर्वर पर सुरक्षित रूप से रहता है।",
@@ -340,6 +346,7 @@
 	"ElevenLabs": "",
 	"Email": "ईमेल",
 	"Embark on adventures": "",
+	"Embedding": "",
 	"Embedding Batch Size": "",
 	"Embedding Model": "मॉडेल अनुकूलन",
 	"Embedding Model Engine": "एंबेडिंग मॉडल इंजन",
@@ -348,15 +355,12 @@
 	"Enable autocomplete generation for chat messages": "",
 	"Enable Code Interpreter": "",
 	"Enable Community Sharing": "समुदाय साझाकरण सक्षम करें",
-	"Enable Google Drive": "",
 	"Enable Memory Locking (mlock) to prevent model data from being swapped out of RAM. This option locks the model's working set of pages into RAM, ensuring that they will not be swapped out to disk. This can help maintain performance by avoiding page faults and ensuring fast data access.": "",
 	"Enable Memory Mapping (mmap) to load model data. This option allows the system to use disk storage as an extension of RAM by treating disk files as if they were in RAM. This can improve model performance by allowing for faster data access. However, it may not work correctly with all systems and can consume a significant amount of disk space.": "",
 	"Enable Message Rating": "",
 	"Enable Mirostat sampling for controlling perplexity. (Default: 0, 0 = Disabled, 1 = Mirostat, 2 = Mirostat 2.0)": "",
 	"Enable New Sign Ups": "नए साइन अप सक्रिय करें",
-	"Enable Web Search": "वेब खोज सक्षम करें",
 	"Enabled": "",
-	"Engine": "",
 	"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "सुनिश्चित करें कि आपकी CSV फ़ाइल में इस क्रम में 4 कॉलम शामिल हैं: नाम, ईमेल, पासवर्ड, भूमिका।",
 	"Enter {{role}} message here": "यहां {{role}} संदेश दर्ज करें",
 	"Enter a detail about yourself for your LLMs to recall": "अपने एलएलएम को याद करने के लिए अपने बारे में एक विवरण दर्ज करें",
@@ -372,6 +376,8 @@
 	"Enter Chunk Overlap": "चंक ओवरलैप दर्ज करें",
 	"Enter Chunk Size": "खंड आकार दर्ज करें",
 	"Enter description": "",
+	"Enter Document Intelligence Endpoint": "",
+	"Enter Document Intelligence Key": "",
 	"Enter domains separated by commas (e.g., example.com,site.org)": "",
 	"Enter Exa API Key": "",
 	"Enter Github Raw URL": "Github Raw URL दर्ज करें",
@@ -507,9 +513,9 @@
 	"Gemini API Config": "",
 	"Gemini API Key is required.": "",
 	"General": "सामान्य",
-	"General Settings": "सामान्य सेटिंग्स",
 	"Generate an image": "",
 	"Generate Image": "",
+	"Generate prompt pair": "",
 	"Generating search query": "खोज क्वेरी जनरेट करना",
 	"Get started": "",
 	"Get started with {{WEBUI_NAME}}": "",
@@ -565,6 +571,7 @@
 	"Input commands": "इनपुट क命",
 	"Install from Github URL": "Github URL से इंस्टॉल करें",
 	"Instant Auto-Send After Voice Transcription": "",
+	"Integration": "",
 	"Interface": "इंटरफेस",
 	"Invalid file format.": "",
 	"Invalid Tag": "अवैध टैग",
@@ -612,16 +619,17 @@
 	"Listening...": "",
 	"Llama.cpp": "",
 	"LLMs can make mistakes. Verify important information.": "एलएलएम गलतियाँ कर सकते हैं। महत्वपूर्ण जानकारी सत्यापित करें.",
+	"Loader": "",
 	"Loading Kokoro.js...": "",
 	"Local": "",
 	"Local Models": "",
+	"Location access not allowed": "",
 	"Lost": "",
 	"LTR": "LTR",
 	"Made by Open WebUI Community": "OpenWebUI समुदाय द्वारा निर्मित",
 	"Make sure to enclose them with": "उन्हें संलग्न करना सुनिश्चित करें",
 	"Make sure to export a workflow.json file as API format from ComfyUI.": "",
 	"Manage": "",
-	"Manage Arena Models": "",
 	"Manage Direct Connections": "",
 	"Manage Models": "",
 	"Manage Ollama": "",
@@ -689,6 +697,7 @@
 	"No HTML, CSS, or JavaScript content found.": "",
 	"No inference engine with management support found": "",
 	"No knowledge found": "",
+	"No memories to clear": "",
 	"No model IDs": "",
 	"No models found": "",
 	"No models selected": "",
@@ -718,6 +727,7 @@
 	"Ollama API settings updated": "",
 	"Ollama Version": "Ollama Version",
 	"On": "चालू",
+	"OneDrive": "",
 	"Only alphanumeric characters and hyphens are allowed": "",
 	"Only alphanumeric characters and hyphens are allowed in the command string.": "कमांड स्ट्रिंग में केवल अल्फ़ान्यूमेरिक वर्ण और हाइफ़न की अनुमति है।",
 	"Only collections can be edited, create a new knowledge base to edit/add documents.": "",
@@ -788,11 +798,9 @@
 	"Prompt updated successfully": "",
 	"Prompts": "प्रॉम्प्ट",
 	"Prompts Access": "",
-	"Proxy URL": "",
 	"Pull \"{{searchValue}}\" from Ollama.com": "\"{{searchValue}}\" को Ollama.com से खींचें",
 	"Pull a model from Ollama.com": "Ollama.com से एक मॉडल खींचें",
 	"Query Generation Prompt": "",
-	"Query Params": "क्वेरी पैरामीटर",
 	"RAG Template": "RAG टेम्पलेट",
 	"Rating": "",
 	"Re-rank models by topic similarity": "",
@@ -827,6 +835,7 @@
 	"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
 	"Response splitting": "",
 	"Result": "",
+	"Retrieval": "",
 	"Retrieval Query Generation": "",
 	"Rich Text Input for Chat": "",
 	"RK": "",
@@ -880,7 +889,6 @@
 	"Select an Ollama instance": "",
 	"Select Engine": "",
 	"Select Knowledge": "",
-	"Select model": "मॉडल चुनें",
 	"Select only one model to call": "",
 	"Selected model(s) do not support image inputs": "चयनित मॉडल छवि इनपुट का समर्थन नहीं करते हैं",
 	"Semantic distance to query": "",
@@ -957,6 +965,7 @@
 	"Tags Generation": "",
 	"Tags Generation Prompt": "",
 	"Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)": "",
+	"Talk to model": "",
 	"Tap to interrupt": "",
 	"Tasks": "",
 	"Tavily API Key": "",
@@ -1041,6 +1050,7 @@
 	"Top P": "शीर्ष  P",
 	"Transformers": "",
 	"Trouble accessing Ollama?": "Ollama तक पहुँचने में परेशानी हो रही है?",
+	"Trust Proxy Environment": "",
 	"TTS Model": "",
 	"TTS Settings": "TTS सेटिंग्स",
 	"TTS Voice": "",
@@ -1104,7 +1114,6 @@
 	"Warning: Jupyter execution enables arbitrary code execution, posing severe security risks—proceed with extreme caution.": "",
 	"Web": "वेब",
 	"Web API": "",
-	"Web Loader Settings": "वेब लोडर सेटिंग्स",
 	"Web Search": "वेब खोज",
 	"Web Search Engine": "वेब खोज इंजन",
 	"Web Search in Chat": "",
@@ -1146,5 +1155,6 @@
 	"Your account status is currently pending activation.": "",
 	"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
 	"Youtube": "Youtube",
-	"Youtube Loader Settings": "यूट्यूब लोडर सेटिंग्स"
+	"Youtube Language": "",
+	"Youtube Proxy URL": ""
 }

部分文件因为文件数量过多而无法显示