Explorar el Código

Merge pull request #5744 from open-webui/dev

0.3.31
Timothy Jaeryang Baek hace 7 meses
padre
commit
c8c41e07e9
Se han modificado 100 ficheros con 4827 adiciones y 1856 borrados
  1. 4 0
      .github/ISSUE_TEMPLATE/bug_report.md
  2. 6 0
      .github/ISSUE_TEMPLATE/feature_request.md
  3. 1 1
      .github/workflows/format-backend.yaml
  4. 2 2
      .github/workflows/format-build-frontend.yaml
  5. 4 1
      .github/workflows/integration-test.yml
  6. 1 1
      .github/workflows/lint-backend.disabled
  7. 36 0
      CHANGELOG.md
  8. 4 4
      Dockerfile
  9. 3 3
      README.md
  10. 137 96
      backend/open_webui/apps/audio/main.py
  11. 5 2
      backend/open_webui/apps/ollama/main.py
  12. 10 6
      backend/open_webui/apps/openai/main.py
  13. 190 0
      backend/open_webui/apps/retrieval/loaders/main.py
  14. 411 658
      backend/open_webui/apps/retrieval/main.py
  15. 81 0
      backend/open_webui/apps/retrieval/models/colbert.py
  16. 59 47
      backend/open_webui/apps/retrieval/utils.py
  17. 4 4
      backend/open_webui/apps/retrieval/vector/connector.py
  18. 54 19
      backend/open_webui/apps/retrieval/vector/dbs/chroma.py
  19. 94 13
      backend/open_webui/apps/retrieval/vector/dbs/milvus.py
  20. 0 0
      backend/open_webui/apps/retrieval/vector/main.py
  21. 1 1
      backend/open_webui/apps/retrieval/web/brave.py
  22. 1 1
      backend/open_webui/apps/retrieval/web/duckduckgo.py
  23. 1 1
      backend/open_webui/apps/retrieval/web/google_pse.py
  24. 1 1
      backend/open_webui/apps/retrieval/web/jina_search.py
  25. 0 0
      backend/open_webui/apps/retrieval/web/main.py
  26. 1 1
      backend/open_webui/apps/retrieval/web/searchapi.py
  27. 1 1
      backend/open_webui/apps/retrieval/web/searxng.py
  28. 1 1
      backend/open_webui/apps/retrieval/web/serper.py
  29. 1 1
      backend/open_webui/apps/retrieval/web/serply.py
  30. 1 1
      backend/open_webui/apps/retrieval/web/serpstack.py
  31. 1 1
      backend/open_webui/apps/retrieval/web/tavily.py
  32. 0 0
      backend/open_webui/apps/retrieval/web/testdata/brave.json
  33. 0 0
      backend/open_webui/apps/retrieval/web/testdata/google_pse.json
  34. 0 0
      backend/open_webui/apps/retrieval/web/testdata/searchapi.json
  35. 0 0
      backend/open_webui/apps/retrieval/web/testdata/searxng.json
  36. 0 0
      backend/open_webui/apps/retrieval/web/testdata/serper.json
  37. 0 0
      backend/open_webui/apps/retrieval/web/testdata/serply.json
  38. 0 0
      backend/open_webui/apps/retrieval/web/testdata/serpstack.json
  39. 97 0
      backend/open_webui/apps/retrieval/web/utils.py
  40. 24 2
      backend/open_webui/apps/webui/internal/db.py
  41. 6 3
      backend/open_webui/apps/webui/main.py
  42. 63 3
      backend/open_webui/apps/webui/models/files.py
  43. 152 0
      backend/open_webui/apps/webui/models/knowledge.py
  44. 6 5
      backend/open_webui/apps/webui/routers/chats.py
  45. 92 11
      backend/open_webui/apps/webui/routers/files.py
  46. 348 0
      backend/open_webui/apps/webui/routers/knowledge.py
  47. 1 1
      backend/open_webui/apps/webui/routers/memories.py
  48. 1 20
      backend/open_webui/config.py
  49. 10 1
      backend/open_webui/constants.py
  50. 50 0
      backend/open_webui/env.py
  51. 97 69
      backend/open_webui/main.py
  52. 0 19
      backend/open_webui/migrations/scripts/revision.py
  53. 6 0
      backend/open_webui/migrations/util.py
  54. 80 0
      backend/open_webui/migrations/versions/6a39f3d8e55c_add_knowledge_table.py
  55. 32 0
      backend/open_webui/migrations/versions/c0fbf31ca0db_update_file_table.py
  56. 1 1
      backend/open_webui/utils/misc.py
  57. 4 0
      backend/open_webui/utils/schemas.py
  58. 9 7
      backend/requirements.txt
  59. BIN
      bun.lockb
  60. 460 49
      package-lock.json
  61. 5 3
      package.json
  62. 4 2
      pyproject.toml
  63. 6 0
      src/app.css
  64. 1 0
      src/app.html
  65. 60 0
      src/lib/apis/files/index.ts
  66. 308 0
      src/lib/apis/knowledge/index.ts
  67. 121 174
      src/lib/apis/retrieval/index.ts
  68. 2 0
      src/lib/apis/streaming/index.ts
  69. 9 1
      src/lib/components/AddFilesPlaceholder.svelte
  70. 4 1
      src/lib/components/admin/Settings/Audio.svelte
  71. 8 70
      src/lib/components/admin/Settings/Documents.svelte
  72. 2 2
      src/lib/components/admin/Settings/WebSearch.svelte
  73. 305 0
      src/lib/components/chat/Artifacts.svelte
  74. 166 41
      src/lib/components/chat/Chat.svelte
  75. 67 25
      src/lib/components/chat/ChatControls.svelte
  76. 5 2
      src/lib/components/chat/ChatPlaceholder.svelte
  77. 6 1
      src/lib/components/chat/Controls/Controls.svelte
  78. 32 81
      src/lib/components/chat/MessageInput.svelte
  79. 36 30
      src/lib/components/chat/MessageInput/CallOverlay.svelte
  80. 24 15
      src/lib/components/chat/MessageInput/Commands.svelte
  81. 92 77
      src/lib/components/chat/MessageInput/Commands/Knowledge.svelte
  82. 31 11
      src/lib/components/chat/MessageInput/Commands/Models.svelte
  83. 2 2
      src/lib/components/chat/MessageInput/Commands/Prompts.svelte
  84. 0 118
      src/lib/components/chat/MessageInput/Suggestions.svelte
  85. 10 5
      src/lib/components/chat/Messages.svelte
  86. 113 65
      src/lib/components/chat/Messages/CodeBlock.svelte
  87. 208 0
      src/lib/components/chat/Messages/ContentRenderer.svelte
  88. 8 19
      src/lib/components/chat/Messages/Error.svelte
  89. 20 3
      src/lib/components/chat/Messages/Markdown.svelte
  90. 35 8
      src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte
  91. 6 0
      src/lib/components/chat/Messages/Message.svelte
  92. 8 4
      src/lib/components/chat/Messages/MultiResponseMessages.svelte
  93. 78 27
      src/lib/components/chat/Messages/ResponseMessage.svelte
  94. 2 1
      src/lib/components/chat/Messages/UserMessage.svelte
  95. 4 1
      src/lib/components/chat/Overview.svelte
  96. 227 0
      src/lib/components/chat/Placeholder.svelte
  97. 27 0
      src/lib/components/chat/Settings/Interface.svelte
  98. 53 0
      src/lib/components/chat/Suggestions.svelte
  99. 18 0
      src/lib/components/common/Badge.svelte
  100. 59 9
      src/lib/components/common/CodeEditor.svelte

+ 4 - 0
.github/ISSUE_TEMPLATE/bug_report.md

@@ -8,6 +8,10 @@ assignees: ''
 
 # Bug Report
 
+**Important: Before submitting a bug report, please check whether a similar issue or feature request has already been posted in the Issues or Discussions section. It's likely we're already tracking it. In case of uncertainty, initiate a discussion post first. This helps us all to efficiently focus on improving the project.**
+
+**Let's collaborate respectfully. If you bring negativity, please understand our capacity to engage may be limited. If you're open to learning and communicating constructively, we're more than happy to assist you. Remember, Open WebUI is a volunteer-driven project maintained by a single maintainer, supported by our amazing contributors who also manage full-time jobs. We respect your time; please respect ours. If you have an issue, We highly encourage you to submit a pull request or to fork the project. We actively work to prevent contributor burnout to preserve the quality and continuity of Open WebUI.**
+
 ## Installation Method
 
 [Describe the method you used to install the project, e.g., git clone, Docker, pip, etc.]

+ 6 - 0
.github/ISSUE_TEMPLATE/feature_request.md

@@ -6,6 +6,12 @@ labels: ''
 assignees: ''
 ---
 
+# Feature Request
+
+**Important: Before submitting a feature request, please check whether a similar issue or feature request has already been posted in the Issues or Discussions section. It's likely we're already tracking it. In case of uncertainty, initiate a discussion post first. This helps us all to efficiently focus on improving the project.**
+
+**Let's collaborate respectfully. If you bring negativity, please understand our capacity to engage may be limited. If you're open to learning and communicating constructively, we're more than happy to assist you. Remember, Open WebUI is a volunteer-driven project maintained by a single maintainer, supported by our amazing contributors who also manage full-time jobs. We respect your time; please respect ours. If you have an issue, We highly encourage you to submit a pull request or to fork the project. We actively work to prevent contributor burnout to preserve the quality and continuity of Open WebUI.**
+
 **Is your feature request related to a problem? Please describe.**
 A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
 

+ 1 - 1
.github/workflows/format-backend.yaml

@@ -23,7 +23,7 @@ jobs:
       - uses: actions/checkout@v4
 
       - name: Set up Python
-        uses: actions/setup-python@v4
+        uses: actions/setup-python@v5
         with:
           python-version: ${{ matrix.python-version }}
 

+ 2 - 2
.github/workflows/format-build-frontend.yaml

@@ -21,7 +21,7 @@ jobs:
       - name: Setup Node.js
         uses: actions/setup-node@v4
         with:
-          node-version: '20' # Or specify any other version you want to use
+          node-version: '22' # Or specify any other version you want to use
 
       - name: Install Dependencies
         run: npm install
@@ -48,7 +48,7 @@ jobs:
       - name: Setup Node.js
         uses: actions/setup-node@v4
         with:
-          node-version: '20'
+          node-version: '22'
 
       - name: Install Dependencies
         run: npm ci

+ 4 - 1
.github/workflows/integration-test.yml

@@ -85,7 +85,7 @@ jobs:
   #     - uses: actions/checkout@v4
 
   #     - name: Set up Python
-  #       uses: actions/setup-python@v4
+  #       uses: actions/setup-python@v5
   #       with:
   #         python-version: ${{ matrix.python-version }}
 
@@ -182,6 +182,9 @@ jobs:
           WEBUI_SECRET_KEY: secret-key
           GLOBAL_LOG_LEVEL: debug
           DATABASE_URL: postgresql://postgres:postgres@localhost:5432/postgres
+          DATABASE_POOL_SIZE: 10
+          DATABASE_POOL_MAX_OVERFLOW: 10
+          DATABASE_POOL_TIMEOUT: 30
         run: |
           cd backend
           uvicorn open_webui.main:app --port "8081" --forwarded-allow-ips '*' &

+ 1 - 1
.github/workflows/lint-backend.disabled

@@ -16,7 +16,7 @@ jobs:
     steps:
       - uses: actions/checkout@v4
       - name: Use Python
-        uses: actions/setup-python@v4
+        uses: actions/setup-python@v5
       - name: Use Bun
         uses: oven-sh/setup-bun@v1
       - name: Install dependencies

+ 36 - 0
CHANGELOG.md

@@ -5,6 +5,42 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.3.31] - 2024-10-06
+
+### Added
+
+- **📚 Knowledge Feature**: Reimagined documents feature, now more performant with a better UI for enhanced organization; includes streamlined API integration for Retrieval-Augmented Generation (RAG). Detailed documentation forthcoming: https://docs.openwebui.com/
+- **🌐 New Landing Page**: Freshly designed landing page; toggle between the new UI and the classic chat UI from Settings > Interface for a personalized experience.
+- **📁 Full Document Retrieval Mode**: Toggle between full document retrieval or traditional snippets by clicking on the file item. This mode enhances document capabilities and supports comprehensive tasks like summarization by utilizing the entire content instead of RAG.
+- **📄 Extracted File Content Display**: View extracted content directly by clicking on the file item, simplifying file analysis.
+- **🎨 Artifacts Feature**: Render web content and SVGs directly in the interface, supporting quick iterations and live changes.
+- **🖊️ Editable Code Blocks**: Supercharged code blocks now allow live editing directly in the LLM response, with live reloads supported by artifacts.
+- **🔧 Code Block Enhancements**: Introduced a floating copy button in code blocks to facilitate easier code copying without scrolling.
+- **🔍 SVG Pan/Zoom**: Enhanced interaction with SVG images, including Mermaid diagrams, via new pan and zoom capabilities.
+- **🔍 Text Select Quick Actions**: New floating buttons appear when text is highlighted in LLM responses, offering deeper interactions like "Ask a Question" or "Explain".
+- **🗃️ Database Pool Configuration**: Enhanced database handling to support scalable user growth.
+- **🔊 Experimental Audio Compression**: Compress audio files to navigate around the 25MB limit for OpenAI's speech-to-text processing.
+- **🔍 Query Embedding**: Adjusted embedding behavior to enhance system performance by not repeating query embedding.
+- **💾 Lazy Load Optimizations**: Implemented lazy loading of large dependencies to minimize initial memory usage, boosting performance.
+- **🍏 Apple Touch Icon Support**: Optimizes the display of icons for web bookmarks on Apple mobile devices.
+- **🔽 Expandable Content Markdown Support**: Introducing 'details', 'summary' tag support for creating expandable content sections in markdown, facilitating cleaner, organized documentation and interactive content display.
+
+### Fixed
+
+- **🔘 Action Button Issue**: Resolved a bug where action buttons were not functioning, enhancing UI reliability.
+- **🔄 Multi-Model Chat Loop**: Fixed an infinite loop issue in multi-model chat environments, ensuring smoother chat operations.
+- **📄 Chat PDF/TXT Export Issue**: Resolved problems with exporting chat logs to PDF and TXT formats.
+- **🔊 Call to Text-to-Speech Issues**: Rectified problems with text-to-speech functions to improve audio interactions.
+
+### Changed
+
+- **⚙️ Endpoint Renaming**: Renamed 'rag' endpoints to 'retrieval' for clearer function description.
+- **🎨 Styling and Interface Updates**: Multiple refinements across the platform to enhance visual appeal and user interaction.
+
+### Removed
+
+- **🗑️ Deprecated 'DOCS_DIR'**: Removed the outdated 'docs_dir' variable in favor of more direct file management solutions, with direct file directory syncing and API uploads for a more integrated experience.
+
 ## [0.3.30] - 2024-09-26
 
 ### Fixed

+ 4 - 4
Dockerfile

@@ -17,7 +17,7 @@ ARG UID=0
 ARG GID=0
 
 ######## WebUI frontend ########
-FROM --platform=$BUILDPLATFORM node:21-alpine3.19 as build
+FROM --platform=$BUILDPLATFORM node:22-alpine3.20 AS build
 ARG BUILD_HASH
 
 WORKDIR /app
@@ -30,7 +30,7 @@ ENV APP_BUILD_HASH=${BUILD_HASH}
 RUN npm run build
 
 ######## WebUI backend ########
-FROM python:3.11-slim-bookworm as base
+FROM python:3.11-slim-bookworm AS base
 
 # Use args
 ARG USE_CUDA
@@ -82,7 +82,7 @@ ENV HF_HOME="/app/backend/data/cache/embedding/models"
 
 WORKDIR /app/backend
 
-ENV HOME /root
+ENV HOME=/root
 # Create user and group if not root
 RUN if [ $UID -ne 0 ]; then \
     if [ $GID -ne 0 ]; then \
@@ -161,6 +161,6 @@ USER $UID:$GID
 
 ARG BUILD_HASH
 ENV WEBUI_BUILD_VERSION=${BUILD_HASH}
-ENV DOCKER true
+ENV DOCKER=true
 
 CMD [ "bash", "start.sh"]

+ 3 - 3
README.md

@@ -1,4 +1,4 @@
-# Open WebUI (Formerly Ollama WebUI) 👋
+# Open WebUI 👋
 
 ![GitHub stars](https://img.shields.io/github/stars/open-webui/open-webui?style=social)
 ![GitHub forks](https://img.shields.io/github/forks/open-webui/open-webui?style=social)
@@ -170,7 +170,7 @@ docker run --rm --volume /var/run/docker.sock:/var/run/docker.sock containrrr/wa
 
 In the last part of the command, replace `open-webui` with your container name if it is different.
 
-Check our Migration Guide available in our [Open WebUI Documentation](https://docs.openwebui.com/migration/).
+Check our Migration Guide available in our [Open WebUI Documentation](https://docs.openwebui.com/tutorials/migration/).
 
 ### Using the Dev Branch 🌙
 
@@ -220,4 +220,4 @@ If you have any questions, suggestions, or need assistance, please open an issue
 
 ---
 
-Created by [Timothy J. Baek](https://github.com/tjbck) - Let's make Open WebUI even more amazing together! 💪
+Created by [Timothy Jaeryang Baek](https://github.com/tjbck) - Let's make Open WebUI even more amazing together! 💪

+ 137 - 96
backend/open_webui/apps/audio/main.py

@@ -5,6 +5,8 @@ import os
 import uuid
 from functools import lru_cache
 from pathlib import Path
+from pydub import AudioSegment
+from pydub.silence import split_on_silence
 
 import requests
 from open_webui.config import (
@@ -35,7 +37,12 @@ from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile,
 from fastapi.middleware.cors import CORSMiddleware
 from fastapi.responses import FileResponse
 from pydantic import BaseModel
-from open_webui.utils.utils import get_admin_user, get_current_user, get_verified_user
+from open_webui.utils.utils import get_admin_user, get_verified_user
+
+# Constants
+MAX_FILE_SIZE_MB = 25
+MAX_FILE_SIZE = MAX_FILE_SIZE_MB * 1024 * 1024  # Convert MB to bytes
+
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["AUDIO"])
@@ -353,67 +360,77 @@ async def speech(request: Request, user=Depends(get_verified_user)):
             )
 
 
-@app.post("/transcriptions")
-def transcribe(
-    file: UploadFile = File(...),
-    user=Depends(get_current_user),
-):
-    log.info(f"file.content_type: {file.content_type}")
+def transcribe(file_path):
+    print("transcribe", file_path)
+    filename = os.path.basename(file_path)
+    file_dir = os.path.dirname(file_path)
+    id = filename.split(".")[0]
 
-    if file.content_type not in ["audio/mpeg", "audio/wav", "audio/ogg", "audio/x-m4a"]:
-        raise HTTPException(
-            status_code=status.HTTP_400_BAD_REQUEST,
-            detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED,
+    if app.state.config.STT_ENGINE == "":
+        from faster_whisper import WhisperModel
+
+        whisper_kwargs = {
+            "model_size_or_path": WHISPER_MODEL,
+            "device": whisper_device_type,
+            "compute_type": "int8",
+            "download_root": WHISPER_MODEL_DIR,
+            "local_files_only": not WHISPER_MODEL_AUTO_UPDATE,
+        }
+
+        log.debug(f"whisper_kwargs: {whisper_kwargs}")
+
+        try:
+            model = WhisperModel(**whisper_kwargs)
+        except Exception:
+            log.warning(
+                "WhisperModel initialization failed, attempting download with local_files_only=False"
+            )
+            whisper_kwargs["local_files_only"] = False
+            model = WhisperModel(**whisper_kwargs)
+
+        segments, info = model.transcribe(file_path, beam_size=5)
+        log.info(
+            "Detected language '%s' with probability %f"
+            % (info.language, info.language_probability)
         )
 
-    try:
-        ext = file.filename.split(".")[-1]
+        transcript = "".join([segment.text for segment in list(segments)])
 
-        id = uuid.uuid4()
-        filename = f"{id}.{ext}"
+        data = {"text": transcript.strip()}
 
-        file_dir = f"{CACHE_DIR}/audio/transcriptions"
-        os.makedirs(file_dir, exist_ok=True)
-        file_path = f"{file_dir}/{filename}"
+        # save the transcript to a json file
+        transcript_file = f"{file_dir}/{id}.json"
+        with open(transcript_file, "w") as f:
+            json.dump(data, f)
 
-        print(filename)
+        print(data)
+        return data
+    elif app.state.config.STT_ENGINE == "openai":
+        if is_mp4_audio(file_path):
+            print("is_mp4_audio")
+            os.rename(file_path, file_path.replace(".wav", ".mp4"))
+            # Convert MP4 audio file to WAV format
+            convert_mp4_to_wav(file_path.replace(".wav", ".mp4"), file_path)
 
-        contents = file.file.read()
-        with open(file_path, "wb") as f:
-            f.write(contents)
-            f.close()
-
-        if app.state.config.STT_ENGINE == "":
-            from faster_whisper import WhisperModel
-
-            whisper_kwargs = {
-                "model_size_or_path": WHISPER_MODEL,
-                "device": whisper_device_type,
-                "compute_type": "int8",
-                "download_root": WHISPER_MODEL_DIR,
-                "local_files_only": not WHISPER_MODEL_AUTO_UPDATE,
-            }
-
-            log.debug(f"whisper_kwargs: {whisper_kwargs}")
-
-            try:
-                model = WhisperModel(**whisper_kwargs)
-            except Exception:
-                log.warning(
-                    "WhisperModel initialization failed, attempting download with local_files_only=False"
-                )
-                whisper_kwargs["local_files_only"] = False
-                model = WhisperModel(**whisper_kwargs)
+        headers = {"Authorization": f"Bearer {app.state.config.STT_OPENAI_API_KEY}"}
 
-            segments, info = model.transcribe(file_path, beam_size=5)
-            log.info(
-                "Detected language '%s' with probability %f"
-                % (info.language, info.language_probability)
+        files = {"file": (filename, open(file_path, "rb"))}
+        data = {"model": app.state.config.STT_MODEL}
+
+        print(files, data)
+
+        r = None
+        try:
+            r = requests.post(
+                url=f"{app.state.config.STT_OPENAI_API_BASE_URL}/audio/transcriptions",
+                headers=headers,
+                files=files,
+                data=data,
             )
 
-            transcript = "".join([segment.text for segment in list(segments)])
+            r.raise_for_status()
 
-            data = {"text": transcript.strip()}
+            data = r.json()
 
             # save the transcript to a json file
             transcript_file = f"{file_dir}/{id}.json"
@@ -421,58 +438,82 @@ def transcribe(
                 json.dump(data, f)
 
             print(data)
-
             return data
+        except Exception as e:
+            log.exception(e)
+            error_detail = "Open WebUI: Server Connection Error"
+            if r is not None:
+                try:
+                    res = r.json()
+                    if "error" in res:
+                        error_detail = f"External: {res['error']['message']}"
+                except Exception:
+                    error_detail = f"External: {e}"
 
-        elif app.state.config.STT_ENGINE == "openai":
-            if is_mp4_audio(file_path):
-                print("is_mp4_audio")
-                os.rename(file_path, file_path.replace(".wav", ".mp4"))
-                # Convert MP4 audio file to WAV format
-                convert_mp4_to_wav(file_path.replace(".wav", ".mp4"), file_path)
+            raise error_detail
 
-            headers = {"Authorization": f"Bearer {app.state.config.STT_OPENAI_API_KEY}"}
 
-            files = {"file": (filename, open(file_path, "rb"))}
-            data = {"model": app.state.config.STT_MODEL}
+@app.post("/transcriptions")
+def transcription(
+    file: UploadFile = File(...),
+    user=Depends(get_verified_user),
+):
+    log.info(f"file.content_type: {file.content_type}")
 
-            print(files, data)
+    if file.content_type not in ["audio/mpeg", "audio/wav", "audio/ogg", "audio/x-m4a"]:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.FILE_NOT_SUPPORTED,
+        )
 
-            r = None
-            try:
-                r = requests.post(
-                    url=f"{app.state.config.STT_OPENAI_API_BASE_URL}/audio/transcriptions",
-                    headers=headers,
-                    files=files,
-                    data=data,
-                )
+    try:
+        ext = file.filename.split(".")[-1]
+        id = uuid.uuid4()
 
-                r.raise_for_status()
-
-                data = r.json()
-
-                # save the transcript to a json file
-                transcript_file = f"{file_dir}/{id}.json"
-                with open(transcript_file, "w") as f:
-                    json.dump(data, f)
-
-                print(data)
-                return data
-            except Exception as e:
-                log.exception(e)
-                error_detail = "Open WebUI: Server Connection Error"
-                if r is not None:
-                    try:
-                        res = r.json()
-                        if "error" in res:
-                            error_detail = f"External: {res['error']['message']}"
-                    except Exception:
-                        error_detail = f"External: {e}"
-
-                raise HTTPException(
-                    status_code=r.status_code if r != None else 500,
-                    detail=error_detail,
-                )
+        filename = f"{id}.{ext}"
+        contents = file.file.read()
+
+        file_dir = f"{CACHE_DIR}/audio/transcriptions"
+        os.makedirs(file_dir, exist_ok=True)
+        file_path = f"{file_dir}/{filename}"
+
+        with open(file_path, "wb") as f:
+            f.write(contents)
+
+        try:
+            if os.path.getsize(file_path) > MAX_FILE_SIZE:  # file is bigger than 25MB
+                log.debug(f"File size is larger than {MAX_FILE_SIZE_MB}MB")
+                audio = AudioSegment.from_file(file_path)
+                audio = audio.set_frame_rate(16000).set_channels(1)  # Compress audio
+                compressed_path = f"{file_dir}/{id}_compressed.opus"
+                audio.export(compressed_path, format="opus", bitrate="32k")
+                log.debug(f"Compressed audio to {compressed_path}")
+                file_path = compressed_path
+
+                if (
+                    os.path.getsize(file_path) > MAX_FILE_SIZE
+                ):  # Still larger than 25MB after compression
+                    log.debug(
+                        f"Compressed file size is still larger than {MAX_FILE_SIZE_MB}MB: {os.path.getsize(file_path)}"
+                    )
+                    raise HTTPException(
+                        status_code=status.HTTP_400_BAD_REQUEST,
+                        detail=ERROR_MESSAGES.FILE_TOO_LARGE(
+                            size=f"{MAX_FILE_SIZE_MB}MB"
+                        ),
+                    )
+
+                data = transcribe(file_path)
+            else:
+                data = transcribe(file_path)
+
+            return data
+        except Exception as e:
+            log.exception(e)
+            raise HTTPException(
+                status_code=status.HTTP_400_BAD_REQUEST,
+                detail=ERROR_MESSAGES.DEFAULT(e),
+            )
 
     except Exception as e:
         log.exception(e)

+ 5 - 2
backend/open_webui/apps/ollama/main.py

@@ -12,7 +12,6 @@ import aiohttp
 import requests
 from open_webui.apps.webui.models.models import Models
 from open_webui.config import (
-    AIOHTTP_CLIENT_TIMEOUT,
     CORS_ALLOW_ORIGIN,
     ENABLE_MODEL_FILTER,
     ENABLE_OLLAMA_API,
@@ -21,6 +20,9 @@ from open_webui.config import (
     UPLOAD_DIR,
     AppConfig,
 )
+from open_webui.env import AIOHTTP_CLIENT_TIMEOUT
+
+
 from open_webui.constants import ERROR_MESSAGES
 from open_webui.env import SRC_LOG_LEVELS
 from fastapi import Depends, FastAPI, File, HTTPException, Request, UploadFile
@@ -117,7 +119,7 @@ async def update_ollama_api_url(form_data: UrlUpdateForm, user=Depends(get_admin
 
 
 async def fetch_url(url):
-    timeout = aiohttp.ClientTimeout(total=5)
+    timeout = aiohttp.ClientTimeout(total=3)
     try:
         async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
             async with session.get(url) as response:
@@ -787,6 +789,7 @@ async def generate_chat_completion(
 ):
     payload = {**form_data.model_dump(exclude_none=True)}
     log.debug(f"{payload = }")
+
     if "metadata" in payload:
         del payload["metadata"]
 

+ 10 - 6
backend/open_webui/apps/openai/main.py

@@ -9,7 +9,6 @@ import aiohttp
 import requests
 from open_webui.apps.webui.models.models import Models
 from open_webui.config import (
-    AIOHTTP_CLIENT_TIMEOUT,
     CACHE_DIR,
     CORS_ALLOW_ORIGIN,
     ENABLE_MODEL_FILTER,
@@ -19,6 +18,8 @@ from open_webui.config import (
     OPENAI_API_KEYS,
     AppConfig,
 )
+from open_webui.env import AIOHTTP_CLIENT_TIMEOUT
+
 from open_webui.constants import ERROR_MESSAGES
 from open_webui.env import SRC_LOG_LEVELS
 from fastapi import Depends, FastAPI, HTTPException, Request
@@ -27,7 +28,6 @@ from fastapi.responses import FileResponse, StreamingResponse
 from pydantic import BaseModel
 from starlette.background import BackgroundTask
 
-
 from open_webui.utils.payload import (
     apply_model_params_to_body_openai,
     apply_model_system_prompt_to_body,
@@ -47,7 +47,6 @@ app.add_middleware(
     allow_headers=["*"],
 )
 
-
 app.state.config = AppConfig()
 
 app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
@@ -180,7 +179,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
 
 
 async def fetch_url(url, key):
-    timeout = aiohttp.ClientTimeout(total=5)
+    timeout = aiohttp.ClientTimeout(total=3)
     try:
         headers = {"Authorization": f"Bearer {key}"}
         async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
@@ -407,20 +406,25 @@ async def generate_chat_completion(
 
     url = app.state.config.OPENAI_API_BASE_URLS[idx]
     key = app.state.config.OPENAI_API_KEYS[idx]
+    is_o1 = payload["model"].lower().startswith("o1-")
 
     # Change max_completion_tokens to max_tokens (Backward compatible)
-    if "api.openai.com" not in url and not payload["model"].lower().startswith("o1-"):
+    if "api.openai.com" not in url and not is_o1:
         if "max_completion_tokens" in payload:
             # Remove "max_completion_tokens" from the payload
             payload["max_tokens"] = payload["max_completion_tokens"]
             del payload["max_completion_tokens"]
     else:
-        if payload["model"].lower().startswith("o1-") and "max_tokens" in payload:
+        if is_o1 and "max_tokens" in payload:
             payload["max_completion_tokens"] = payload["max_tokens"]
             del payload["max_tokens"]
         if "max_tokens" in payload and "max_completion_tokens" in payload:
             del payload["max_tokens"]
 
+    # Fix: O1 does not support the "system" parameter, Modify "system" to "user"
+    if is_o1 and payload["messages"][0]["role"] == "system":
+        payload["messages"][0]["role"] = "user"
+
     # Convert the modified body back to JSON
     payload = json.dumps(payload)
 

+ 190 - 0
backend/open_webui/apps/retrieval/loaders/main.py

@@ -0,0 +1,190 @@
+import requests
+import logging
+import ftfy
+
+from langchain_community.document_loaders import (
+    BSHTMLLoader,
+    CSVLoader,
+    Docx2txtLoader,
+    OutlookMessageLoader,
+    PyPDFLoader,
+    TextLoader,
+    UnstructuredEPubLoader,
+    UnstructuredExcelLoader,
+    UnstructuredMarkdownLoader,
+    UnstructuredPowerPointLoader,
+    UnstructuredRSTLoader,
+    UnstructuredXMLLoader,
+    YoutubeLoader,
+)
+from langchain_core.documents import Document
+from open_webui.env import SRC_LOG_LEVELS
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
+known_source_ext = [
+    "go",
+    "py",
+    "java",
+    "sh",
+    "bat",
+    "ps1",
+    "cmd",
+    "js",
+    "ts",
+    "css",
+    "cpp",
+    "hpp",
+    "h",
+    "c",
+    "cs",
+    "sql",
+    "log",
+    "ini",
+    "pl",
+    "pm",
+    "r",
+    "dart",
+    "dockerfile",
+    "env",
+    "php",
+    "hs",
+    "hsc",
+    "lua",
+    "nginxconf",
+    "conf",
+    "m",
+    "mm",
+    "plsql",
+    "perl",
+    "rb",
+    "rs",
+    "db2",
+    "scala",
+    "bash",
+    "swift",
+    "vue",
+    "svelte",
+    "msg",
+    "ex",
+    "exs",
+    "erl",
+    "tsx",
+    "jsx",
+    "hs",
+    "lhs",
+]
+
+
+class TikaLoader:
+    def __init__(self, url, file_path, mime_type=None):
+        self.url = url
+        self.file_path = file_path
+        self.mime_type = mime_type
+
+    def load(self) -> list[Document]:
+        with open(self.file_path, "rb") as f:
+            data = f.read()
+
+        if self.mime_type is not None:
+            headers = {"Content-Type": self.mime_type}
+        else:
+            headers = {}
+
+        endpoint = self.url
+        if not endpoint.endswith("/"):
+            endpoint += "/"
+        endpoint += "tika/text"
+
+        r = requests.put(endpoint, data=data, headers=headers)
+
+        if r.ok:
+            raw_metadata = r.json()
+            text = raw_metadata.get("X-TIKA:content", "<No text content found>")
+
+            if "Content-Type" in raw_metadata:
+                headers["Content-Type"] = raw_metadata["Content-Type"]
+
+            log.info("Tika extracted text: %s", text)
+
+            return [Document(page_content=text, metadata=headers)]
+        else:
+            raise Exception(f"Error calling Tika: {r.reason}")
+
+
+class Loader:
+    def __init__(self, engine: str = "", **kwargs):
+        self.engine = engine
+        self.kwargs = kwargs
+
+    def load(
+        self, filename: str, file_content_type: str, file_path: str
+    ) -> list[Document]:
+        loader = self._get_loader(filename, file_content_type, file_path)
+        docs = loader.load()
+
+        return [
+            Document(
+                page_content=ftfy.fix_text(doc.page_content), metadata=doc.metadata
+            )
+            for doc in docs
+        ]
+
+    def _get_loader(self, filename: str, file_content_type: str, file_path: str):
+        file_ext = filename.split(".")[-1].lower()
+
+        if self.engine == "tika" and self.kwargs.get("TIKA_SERVER_URL"):
+            if file_ext in known_source_ext or (
+                file_content_type and file_content_type.find("text/") >= 0
+            ):
+                loader = TextLoader(file_path, autodetect_encoding=True)
+            else:
+                loader = TikaLoader(
+                    url=self.kwargs.get("TIKA_SERVER_URL"),
+                    file_path=file_path,
+                    mime_type=file_content_type,
+                )
+        else:
+            if file_ext == "pdf":
+                loader = PyPDFLoader(
+                    file_path, extract_images=self.kwargs.get("PDF_EXTRACT_IMAGES")
+                )
+            elif file_ext == "csv":
+                loader = CSVLoader(file_path)
+            elif file_ext == "rst":
+                loader = UnstructuredRSTLoader(file_path, mode="elements")
+            elif file_ext == "xml":
+                loader = UnstructuredXMLLoader(file_path)
+            elif file_ext in ["htm", "html"]:
+                loader = BSHTMLLoader(file_path, open_encoding="unicode_escape")
+            elif file_ext == "md":
+                loader = UnstructuredMarkdownLoader(file_path)
+            elif file_content_type == "application/epub+zip":
+                loader = UnstructuredEPubLoader(file_path)
+            elif (
+                file_content_type
+                == "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
+                or file_ext == "docx"
+            ):
+                loader = Docx2txtLoader(file_path)
+            elif file_content_type in [
+                "application/vnd.ms-excel",
+                "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
+            ] or file_ext in ["xls", "xlsx"]:
+                loader = UnstructuredExcelLoader(file_path)
+            elif file_content_type in [
+                "application/vnd.ms-powerpoint",
+                "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+            ] or file_ext in ["ppt", "pptx"]:
+                loader = UnstructuredPowerPointLoader(file_path)
+            elif file_ext == "msg":
+                loader = OutlookMessageLoader(file_path)
+            elif file_ext in known_source_ext or (
+                file_content_type and file_content_type.find("text/") >= 0
+            ):
+                loader = TextLoader(file_path, autodetect_encoding=True)
+            else:
+                loader = TextLoader(file_path, autodetect_encoding=True)
+
+        return loader

La diferencia del archivo ha sido suprimido porque es demasiado grande
+ 411 - 658
backend/open_webui/apps/retrieval/main.py


+ 81 - 0
backend/open_webui/apps/retrieval/models/colbert.py

@@ -0,0 +1,81 @@
+import os
+import torch
+import numpy as np
+from colbert.infra import ColBERTConfig
+from colbert.modeling.checkpoint import Checkpoint
+
+
+class ColBERT:
+    def __init__(self, name, **kwargs) -> None:
+        print("ColBERT: Loading model", name)
+        self.device = "cuda" if torch.cuda.is_available() else "cpu"
+
+        DOCKER = kwargs.get("env") == "docker"
+        if DOCKER:
+            # This is a workaround for the issue with the docker container
+            # where the torch extension is not loaded properly
+            # and the following error is thrown:
+            # /root/.cache/torch_extensions/py311_cpu/segmented_maxsim_cpp/segmented_maxsim_cpp.so: cannot open shared object file: No such file or directory
+
+            lock_file = (
+                "/root/.cache/torch_extensions/py311_cpu/segmented_maxsim_cpp/lock"
+            )
+            if os.path.exists(lock_file):
+                os.remove(lock_file)
+
+        self.ckpt = Checkpoint(
+            name,
+            colbert_config=ColBERTConfig(model_name=name),
+        ).to(self.device)
+        pass
+
+    def calculate_similarity_scores(self, query_embeddings, document_embeddings):
+
+        query_embeddings = query_embeddings.to(self.device)
+        document_embeddings = document_embeddings.to(self.device)
+
+        # Validate dimensions to ensure compatibility
+        if query_embeddings.dim() != 3:
+            raise ValueError(
+                f"Expected query embeddings to have 3 dimensions, but got {query_embeddings.dim()}."
+            )
+        if document_embeddings.dim() != 3:
+            raise ValueError(
+                f"Expected document embeddings to have 3 dimensions, but got {document_embeddings.dim()}."
+            )
+        if query_embeddings.size(0) not in [1, document_embeddings.size(0)]:
+            raise ValueError(
+                "There should be either one query or queries equal to the number of documents."
+            )
+
+        # Transpose the query embeddings to align for matrix multiplication
+        transposed_query_embeddings = query_embeddings.permute(0, 2, 1)
+        # Compute similarity scores using batch matrix multiplication
+        computed_scores = torch.matmul(document_embeddings, transposed_query_embeddings)
+        # Apply max pooling to extract the highest semantic similarity across each document's sequence
+        maximum_scores = torch.max(computed_scores, dim=1).values
+
+        # Sum up the maximum scores across features to get the overall document relevance scores
+        final_scores = maximum_scores.sum(dim=1)
+
+        normalized_scores = torch.softmax(final_scores, dim=0)
+
+        return normalized_scores.detach().cpu().numpy().astype(np.float32)
+
+    def predict(self, sentences):
+
+        query = sentences[0][0]
+        docs = [i[1] for i in sentences]
+
+        # Embedding the documents
+        embedded_docs = self.ckpt.docFromText(docs, bsize=32)[0]
+        # Embedding the queries
+        embedded_queries = self.ckpt.queryFromText([query], bsize=32)
+        embedded_query = embedded_queries[0]
+
+        # Calculate retrieval scores for the query against all documents
+        scores = self.calculate_similarity_scores(
+            embedded_query.unsqueeze(0), embedded_docs
+        )
+
+        return scores

+ 59 - 47
backend/open_webui/apps/rag/utils.py → backend/open_webui/apps/retrieval/utils.py

@@ -15,7 +15,7 @@ from open_webui.apps.ollama.main import (
     GenerateEmbeddingsForm,
     generate_ollama_embeddings,
 )
-from open_webui.apps.rag.vector.connector import VECTOR_DB_CLIENT
+from open_webui.apps.retrieval.vector.connector import VECTOR_DB_CLIENT
 from open_webui.utils.misc import get_last_user_message
 
 from open_webui.env import SRC_LOG_LEVELS
@@ -65,19 +65,16 @@ class VectorSearchRetriever(BaseRetriever):
 
 def query_doc(
     collection_name: str,
-    query: str,
-    embedding_function,
+    query_embedding: list[float],
     k: int,
 ):
     try:
         result = VECTOR_DB_CLIENT.search(
             collection_name=collection_name,
-            vectors=[embedding_function(query)],
+            vectors=[query_embedding],
             limit=k,
         )
 
-        print("result", result)
-
         log.info(f"query_doc:result {result}")
         return result
     except Exception as e:
@@ -184,15 +181,17 @@ def query_collection(
     embedding_function,
     k: int,
 ) -> dict:
+
     results = []
+    query_embedding = embedding_function(query)
+
     for collection_name in collection_names:
         if collection_name:
             try:
                 result = query_doc(
                     collection_name=collection_name,
-                    query=query,
                     k=k,
-                    embedding_function=embedding_function,
+                    query_embedding=query_embedding,
                 )
                 results.append(result.model_dump())
             except Exception as e:
@@ -319,58 +318,71 @@ def get_rag_context(
     relevant_contexts = []
 
     for file in files:
-        context = None
+        if file.get("context") == "full":
+            context = {
+                "documents": [[file.get("file").get("data", {}).get("content")]],
+                "metadatas": [[{"file_id": file.get("id"), "name": file.get("name")}]],
+            }
+        else:
+            context = None
 
-        collection_names = (
-            file["collection_names"]
-            if file["type"] == "collection"
-            else [file["collection_name"]] if file["collection_name"] else []
-        )
+            collection_names = []
+            if file.get("type") == "collection":
+                if file.get("legacy"):
+                    collection_names = file.get("collection_names", [])
+                else:
+                    collection_names.append(file["id"])
+            elif file.get("collection_name"):
+                collection_names.append(file["collection_name"])
+            elif file.get("id"):
+                if file.get("legacy"):
+                    collection_names.append(f"{file['id']}")
+                else:
+                    collection_names.append(f"file-{file['id']}")
 
-        collection_names = set(collection_names).difference(extracted_collections)
-        if not collection_names:
-            log.debug(f"skipping {file} as it has already been extracted")
-            continue
+            collection_names = set(collection_names).difference(extracted_collections)
+            if not collection_names:
+                log.debug(f"skipping {file} as it has already been extracted")
+                continue
 
-        try:
-            context = None
-            if file["type"] == "text":
-                context = file["content"]
-            else:
-                if hybrid_search:
-                    try:
-                        context = query_collection_with_hybrid_search(
+            try:
+                context = None
+                if file.get("type") == "text":
+                    context = file["content"]
+                else:
+                    if hybrid_search:
+                        try:
+                            context = query_collection_with_hybrid_search(
+                                collection_names=collection_names,
+                                query=query,
+                                embedding_function=embedding_function,
+                                k=k,
+                                reranking_function=reranking_function,
+                                r=r,
+                            )
+                        except Exception as e:
+                            log.debug(
+                                "Error when using hybrid search, using"
+                                " non hybrid search as fallback."
+                            )
+
+                    if (not hybrid_search) or (context is None):
+                        context = query_collection(
                             collection_names=collection_names,
                             query=query,
                             embedding_function=embedding_function,
                             k=k,
-                            reranking_function=reranking_function,
-                            r=r,
-                        )
-                    except Exception as e:
-                        log.debug(
-                            "Error when using hybrid search, using"
-                            " non hybrid search as fallback."
                         )
+            except Exception as e:
+                log.exception(e)
 
-                if (not hybrid_search) or (context is None):
-                    context = query_collection(
-                        collection_names=collection_names,
-                        query=query,
-                        embedding_function=embedding_function,
-                        k=k,
-                    )
-        except Exception as e:
-            log.exception(e)
+            extracted_collections.extend(collection_names)
 
         if context:
-            relevant_contexts.append({**context, "source": file})
-
-        extracted_collections.extend(collection_names)
+            relevant_contexts.append({**context, "file": file})
 
     contexts = []
     citations = []
-
     for context in relevant_contexts:
         try:
             if "documents" in context:
@@ -383,7 +395,7 @@ def get_rag_context(
                 if "metadatas" in context:
                     citations.append(
                         {
-                            "source": context["source"],
+                            "source": context["file"],
                             "document": context["documents"][0],
                             "metadata": context["metadatas"][0],
                         }

+ 4 - 4
backend/open_webui/apps/rag/vector/connector.py → backend/open_webui/apps/retrieval/vector/connector.py

@@ -1,10 +1,10 @@
-from open_webui.apps.rag.vector.dbs.chroma import ChromaClient
-from open_webui.apps.rag.vector.dbs.milvus import MilvusClient
-
-
 from open_webui.config import VECTOR_DB
 
 if VECTOR_DB == "milvus":
+    from open_webui.apps.retrieval.vector.dbs.milvus import MilvusClient
+
     VECTOR_DB_CLIENT = MilvusClient()
 else:
+    from open_webui.apps.retrieval.vector.dbs.chroma import ChromaClient
+
     VECTOR_DB_CLIENT = ChromaClient()

+ 54 - 19
backend/open_webui/apps/rag/vector/dbs/chroma.py → backend/open_webui/apps/retrieval/vector/dbs/chroma.py

@@ -4,7 +4,7 @@ from chromadb.utils.batch_utils import create_batches
 
 from typing import Optional
 
-from open_webui.apps.rag.vector.main import VectorItem, SearchResult, GetResult
+from open_webui.apps.retrieval.vector.main import VectorItem, SearchResult, GetResult
 from open_webui.config import (
     CHROMA_DATA_PATH,
     CHROMA_HTTP_HOST,
@@ -49,22 +49,49 @@ class ChromaClient:
         self, collection_name: str, vectors: list[list[float | int]], limit: int
     ) -> Optional[SearchResult]:
         # Search for the nearest neighbor items based on the vectors and return 'limit' number of results.
-        collection = self.client.get_collection(name=collection_name)
-        if collection:
-            result = collection.query(
-                query_embeddings=vectors,
-                n_results=limit,
-            )
-
-            return SearchResult(
-                **{
-                    "ids": result["ids"],
-                    "distances": result["distances"],
-                    "documents": result["documents"],
-                    "metadatas": result["metadatas"],
-                }
-            )
-        return None
+        try:
+            collection = self.client.get_collection(name=collection_name)
+            if collection:
+                result = collection.query(
+                    query_embeddings=vectors,
+                    n_results=limit,
+                )
+
+                return SearchResult(
+                    **{
+                        "ids": result["ids"],
+                        "distances": result["distances"],
+                        "documents": result["documents"],
+                        "metadatas": result["metadatas"],
+                    }
+                )
+            return None
+        except Exception as e:
+            return None
+
+    def query(
+        self, collection_name: str, filter: dict, limit: Optional[int] = None
+    ) -> Optional[GetResult]:
+        # Query the items from the collection based on the filter.
+        try:
+            collection = self.client.get_collection(name=collection_name)
+            if collection:
+                result = collection.get(
+                    where=filter,
+                    limit=limit,
+                )
+
+                return GetResult(
+                    **{
+                        "ids": [result["ids"]],
+                        "documents": [result["documents"]],
+                        "metadatas": [result["metadatas"]],
+                    }
+                )
+            return None
+        except Exception as e:
+            print(e)
+            return None
 
     def get(self, collection_name: str) -> Optional[GetResult]:
         # Get all the items in the collection.
@@ -111,11 +138,19 @@ class ChromaClient:
             ids=ids, documents=documents, embeddings=embeddings, metadatas=metadatas
         )
 
-    def delete(self, collection_name: str, ids: list[str]):
+    def delete(
+        self,
+        collection_name: str,
+        ids: Optional[list[str]] = None,
+        filter: Optional[dict] = None,
+    ):
         # Delete the items from the collection based on the ids.
         collection = self.client.get_collection(name=collection_name)
         if collection:
-            collection.delete(ids=ids)
+            if ids:
+                collection.delete(ids=ids)
+            elif filter:
+                collection.delete(where=filter)
 
     def reset(self):
         # Resets the database. This will delete all collections and item entries.

+ 94 - 13
backend/open_webui/apps/rag/vector/dbs/milvus.py → backend/open_webui/apps/retrieval/vector/dbs/milvus.py

@@ -4,7 +4,7 @@ import json
 
 from typing import Optional
 
-from open_webui.apps.rag.vector.main import VectorItem, SearchResult, GetResult
+from open_webui.apps.retrieval.vector.main import VectorItem, SearchResult, GetResult
 from open_webui.config import (
     MILVUS_URI,
 )
@@ -16,8 +16,6 @@ class MilvusClient:
         self.client = Client(uri=MILVUS_URI)
 
     def _result_to_get_result(self, result) -> GetResult:
-        print(result)
-
         ids = []
         documents = []
         metadatas = []
@@ -26,7 +24,6 @@ class MilvusClient:
             _ids = []
             _documents = []
             _metadatas = []
-
             for item in match:
                 _ids.append(item.get("id"))
                 _documents.append(item.get("data", {}).get("text"))
@@ -45,8 +42,6 @@ class MilvusClient:
         )
 
     def _result_to_search_result(self, result) -> SearchResult:
-        print(result)
-
         ids = []
         distances = []
         documents = []
@@ -102,7 +97,10 @@ class MilvusClient:
 
         index_params = self.client.prepare_index_params()
         index_params.add_index(
-            field_name="vector", index_type="HNSW", metric_type="COSINE", params={}
+            field_name="vector",
+            index_type="HNSW",
+            metric_type="COSINE",
+            params={"M": 16, "efConstruction": 100},
         )
 
         self.client.create_collection(
@@ -113,12 +111,14 @@ class MilvusClient:
 
     def has_collection(self, collection_name: str) -> bool:
         # Check if the collection exists based on the collection name.
+        collection_name = collection_name.replace("-", "_")
         return self.client.has_collection(
             collection_name=f"{self.collection_prefix}_{collection_name}"
         )
 
     def delete_collection(self, collection_name: str):
         # Delete the collection based on the collection name.
+        collection_name = collection_name.replace("-", "_")
         return self.client.drop_collection(
             collection_name=f"{self.collection_prefix}_{collection_name}"
         )
@@ -127,6 +127,7 @@ class MilvusClient:
         self, collection_name: str, vectors: list[list[float | int]], limit: int
     ) -> Optional[SearchResult]:
         # Search for the nearest neighbor items based on the vectors and return 'limit' number of results.
+        collection_name = collection_name.replace("-", "_")
         result = self.client.search(
             collection_name=f"{self.collection_prefix}_{collection_name}",
             data=vectors,
@@ -136,8 +137,68 @@ class MilvusClient:
 
         return self._result_to_search_result(result)
 
+    def query(self, collection_name: str, filter: dict, limit: Optional[int] = None):
+        # Construct the filter string for querying
+        collection_name = collection_name.replace("-", "_")
+        if not self.has_collection(collection_name):
+            return None
+
+        filter_string = " && ".join(
+            [
+                f'metadata["{key}"] == {json.dumps(value)}'
+                for key, value in filter.items()
+            ]
+        )
+
+        max_limit = 16383  # The maximum number of records per request
+        all_results = []
+
+        if limit is None:
+            limit = float("inf")  # Use infinity as a placeholder for no limit
+
+        # Initialize offset and remaining to handle pagination
+        offset = 0
+        remaining = limit
+
+        try:
+            # Loop until there are no more items to fetch or the desired limit is reached
+            while remaining > 0:
+                print("remaining", remaining)
+                current_fetch = min(
+                    max_limit, remaining
+                )  # Determine how many items to fetch in this iteration
+
+                results = self.client.query(
+                    collection_name=f"{self.collection_prefix}_{collection_name}",
+                    filter=filter_string,
+                    output_fields=["*"],
+                    limit=current_fetch,
+                    offset=offset,
+                )
+
+                if not results:
+                    break
+
+                all_results.extend(results)
+                results_count = len(results)
+                remaining -= (
+                    results_count  # Decrease remaining by the number of items fetched
+                )
+                offset += results_count
+
+                # Break the loop if the results returned are less than the requested fetch count
+                if results_count < current_fetch:
+                    break
+
+            print(all_results)
+            return self._result_to_get_result([all_results])
+        except Exception as e:
+            print(e)
+            return None
+
     def get(self, collection_name: str) -> Optional[GetResult]:
         # Get all the items in the collection.
+        collection_name = collection_name.replace("-", "_")
         result = self.client.query(
             collection_name=f"{self.collection_prefix}_{collection_name}",
             filter='id != ""',
@@ -146,6 +207,7 @@ class MilvusClient:
 
     def insert(self, collection_name: str, items: list[VectorItem]):
         # Insert the items into the collection, if the collection does not exist, it will be created.
+        collection_name = collection_name.replace("-", "_")
         if not self.client.has_collection(
             collection_name=f"{self.collection_prefix}_{collection_name}"
         ):
@@ -168,6 +230,7 @@ class MilvusClient:
 
     def upsert(self, collection_name: str, items: list[VectorItem]):
         # Update the items in the collection, if the items are not present, insert them. If the collection does not exist, it will be created.
+        collection_name = collection_name.replace("-", "_")
         if not self.client.has_collection(
             collection_name=f"{self.collection_prefix}_{collection_name}"
         ):
@@ -188,17 +251,35 @@ class MilvusClient:
             ],
         )
 
-    def delete(self, collection_name: str, ids: list[str]):
+    def delete(
+        self,
+        collection_name: str,
+        ids: Optional[list[str]] = None,
+        filter: Optional[dict] = None,
+    ):
         # Delete the items from the collection based on the ids.
+        collection_name = collection_name.replace("-", "_")
+        if ids:
+            return self.client.delete(
+                collection_name=f"{self.collection_prefix}_{collection_name}",
+                ids=ids,
+            )
+        elif filter:
+            # Convert the filter dictionary to a string using JSON_CONTAINS.
+            filter_string = " && ".join(
+                [
+                    f'metadata["{key}"] == {json.dumps(value)}'
+                    for key, value in filter.items()
+                ]
+            )
 
-        return self.client.delete(
-            collection_name=f"{self.collection_prefix}_{collection_name}",
-            ids=ids,
-        )
+            return self.client.delete(
+                collection_name=f"{self.collection_prefix}_{collection_name}",
+                filter=filter_string,
+            )
 
     def reset(self):
         # Resets the database. This will delete all collections and item entries.
-
         collection_names = self.client.list_collections()
         for collection_name in collection_names:
             if collection_name.startswith(self.collection_prefix):

+ 0 - 0
backend/open_webui/apps/rag/vector/main.py → backend/open_webui/apps/retrieval/vector/main.py


+ 1 - 1
backend/open_webui/apps/rag/search/brave.py → backend/open_webui/apps/retrieval/web/brave.py

@@ -2,7 +2,7 @@ import logging
 from typing import Optional
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult, get_filtered_results
+from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
 from open_webui.env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)

+ 1 - 1
backend/open_webui/apps/rag/search/duckduckgo.py → backend/open_webui/apps/retrieval/web/duckduckgo.py

@@ -1,7 +1,7 @@
 import logging
 from typing import Optional
 
-from open_webui.apps.rag.search.main import SearchResult, get_filtered_results
+from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
 from duckduckgo_search import DDGS
 from open_webui.env import SRC_LOG_LEVELS
 

+ 1 - 1
backend/open_webui/apps/rag/search/google_pse.py → backend/open_webui/apps/retrieval/web/google_pse.py

@@ -2,7 +2,7 @@ import logging
 from typing import Optional
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult, get_filtered_results
+from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
 from open_webui.env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)

+ 1 - 1
backend/open_webui/apps/rag/search/jina_search.py → backend/open_webui/apps/retrieval/web/jina_search.py

@@ -1,7 +1,7 @@
 import logging
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult
+from open_webui.apps.retrieval.web.main import SearchResult
 from open_webui.env import SRC_LOG_LEVELS
 from yarl import URL
 

+ 0 - 0
backend/open_webui/apps/rag/search/main.py → backend/open_webui/apps/retrieval/web/main.py


+ 1 - 1
backend/open_webui/apps/rag/search/searchapi.py → backend/open_webui/apps/retrieval/web/searchapi.py

@@ -3,7 +3,7 @@ from typing import Optional
 from urllib.parse import urlencode
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult, get_filtered_results
+from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
 from open_webui.env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)

+ 1 - 1
backend/open_webui/apps/rag/search/searxng.py → backend/open_webui/apps/retrieval/web/searxng.py

@@ -2,7 +2,7 @@ import logging
 from typing import Optional
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult, get_filtered_results
+from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
 from open_webui.env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)

+ 1 - 1
backend/open_webui/apps/rag/search/serper.py → backend/open_webui/apps/retrieval/web/serper.py

@@ -3,7 +3,7 @@ import logging
 from typing import Optional
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult, get_filtered_results
+from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
 from open_webui.env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)

+ 1 - 1
backend/open_webui/apps/rag/search/serply.py → backend/open_webui/apps/retrieval/web/serply.py

@@ -3,7 +3,7 @@ from typing import Optional
 from urllib.parse import urlencode
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult, get_filtered_results
+from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
 from open_webui.env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)

+ 1 - 1
backend/open_webui/apps/rag/search/serpstack.py → backend/open_webui/apps/retrieval/web/serpstack.py

@@ -2,7 +2,7 @@ import logging
 from typing import Optional
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult, get_filtered_results
+from open_webui.apps.retrieval.web.main import SearchResult, get_filtered_results
 from open_webui.env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)

+ 1 - 1
backend/open_webui/apps/rag/search/tavily.py → backend/open_webui/apps/retrieval/web/tavily.py

@@ -1,7 +1,7 @@
 import logging
 
 import requests
-from open_webui.apps.rag.search.main import SearchResult
+from open_webui.apps.retrieval.web.main import SearchResult
 from open_webui.env import SRC_LOG_LEVELS
 
 log = logging.getLogger(__name__)

+ 0 - 0
backend/open_webui/apps/rag/search/testdata/brave.json → backend/open_webui/apps/retrieval/web/testdata/brave.json


+ 0 - 0
backend/open_webui/apps/rag/search/testdata/google_pse.json → backend/open_webui/apps/retrieval/web/testdata/google_pse.json


+ 0 - 0
backend/open_webui/apps/rag/search/testdata/searchapi.json → backend/open_webui/apps/retrieval/web/testdata/searchapi.json


+ 0 - 0
backend/open_webui/apps/rag/search/testdata/searxng.json → backend/open_webui/apps/retrieval/web/testdata/searxng.json


+ 0 - 0
backend/open_webui/apps/rag/search/testdata/serper.json → backend/open_webui/apps/retrieval/web/testdata/serper.json


+ 0 - 0
backend/open_webui/apps/rag/search/testdata/serply.json → backend/open_webui/apps/retrieval/web/testdata/serply.json


+ 0 - 0
backend/open_webui/apps/rag/search/testdata/serpstack.json → backend/open_webui/apps/retrieval/web/testdata/serpstack.json


+ 97 - 0
backend/open_webui/apps/retrieval/web/utils.py

@@ -0,0 +1,97 @@
+import socket
+import urllib.parse
+import validators
+from typing import Union, Sequence, Iterator
+
+from langchain_community.document_loaders import (
+    WebBaseLoader,
+)
+from langchain_core.documents import Document
+
+
+from open_webui.constants import ERROR_MESSAGES
+from open_webui.config import ENABLE_RAG_LOCAL_WEB_FETCH
+from open_webui.env import SRC_LOG_LEVELS
+
+import logging
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["RAG"])
+
+
+def validate_url(url: Union[str, Sequence[str]]):
+    if isinstance(url, str):
+        if isinstance(validators.url(url), validators.ValidationError):
+            raise ValueError(ERROR_MESSAGES.INVALID_URL)
+        if not ENABLE_RAG_LOCAL_WEB_FETCH:
+            # Local web fetch is disabled, filter out any URLs that resolve to private IP addresses
+            parsed_url = urllib.parse.urlparse(url)
+            # Get IPv4 and IPv6 addresses
+            ipv4_addresses, ipv6_addresses = resolve_hostname(parsed_url.hostname)
+            # Check if any of the resolved addresses are private
+            # This is technically still vulnerable to DNS rebinding attacks, as we don't control WebBaseLoader
+            for ip in ipv4_addresses:
+                if validators.ipv4(ip, private=True):
+                    raise ValueError(ERROR_MESSAGES.INVALID_URL)
+            for ip in ipv6_addresses:
+                if validators.ipv6(ip, private=True):
+                    raise ValueError(ERROR_MESSAGES.INVALID_URL)
+        return True
+    elif isinstance(url, Sequence):
+        return all(validate_url(u) for u in url)
+    else:
+        return False
+
+
+def resolve_hostname(hostname):
+    # Get address information
+    addr_info = socket.getaddrinfo(hostname, None)
+
+    # Extract IP addresses from address information
+    ipv4_addresses = [info[4][0] for info in addr_info if info[0] == socket.AF_INET]
+    ipv6_addresses = [info[4][0] for info in addr_info if info[0] == socket.AF_INET6]
+
+    return ipv4_addresses, ipv6_addresses
+
+
+class SafeWebBaseLoader(WebBaseLoader):
+    """WebBaseLoader with enhanced error handling for URLs."""
+
+    def lazy_load(self) -> Iterator[Document]:
+        """Lazy load text from the url(s) in web_path with error handling."""
+        for path in self.web_paths:
+            try:
+                soup = self._scrape(path, bs_kwargs=self.bs_kwargs)
+                text = soup.get_text(**self.bs_get_text_kwargs)
+
+                # Build metadata
+                metadata = {"source": path}
+                if title := soup.find("title"):
+                    metadata["title"] = title.get_text()
+                if description := soup.find("meta", attrs={"name": "description"}):
+                    metadata["description"] = description.get(
+                        "content", "No description found."
+                    )
+                if html := soup.find("html"):
+                    metadata["language"] = html.get("lang", "No language found.")
+
+                yield Document(page_content=text, metadata=metadata)
+            except Exception as e:
+                # Log the error and continue with the next URL
+                log.error(f"Error loading {path}: {e}")
+
+
+def get_web_loader(
+    url: Union[str, Sequence[str]],
+    verify_ssl: bool = True,
+    requests_per_second: int = 2,
+):
+    # Check if the URL is valid
+    if not validate_url(url):
+        raise ValueError(ERROR_MESSAGES.INVALID_URL)
+    return SafeWebBaseLoader(
+        url,
+        verify_ssl=verify_ssl,
+        requests_per_second=requests_per_second,
+        continue_on_failure=True,
+    )

+ 24 - 2
backend/open_webui/apps/webui/internal/db.py

@@ -4,11 +4,20 @@ from contextlib import contextmanager
 from typing import Any, Optional
 
 from open_webui.apps.webui.internal.wrappers import register_connection
-from open_webui.env import OPEN_WEBUI_DIR, DATABASE_URL, SRC_LOG_LEVELS
+from open_webui.env import (
+    OPEN_WEBUI_DIR,
+    DATABASE_URL,
+    SRC_LOG_LEVELS,
+    DATABASE_POOL_MAX_OVERFLOW,
+    DATABASE_POOL_RECYCLE,
+    DATABASE_POOL_SIZE,
+    DATABASE_POOL_TIMEOUT,
+)
 from peewee_migrate import Router
 from sqlalchemy import Dialect, create_engine, types
 from sqlalchemy.ext.declarative import declarative_base
 from sqlalchemy.orm import scoped_session, sessionmaker
+from sqlalchemy.pool import QueuePool, NullPool
 from sqlalchemy.sql.type_api import _T
 from typing_extensions import Self
 
@@ -71,7 +80,20 @@ if "sqlite" in SQLALCHEMY_DATABASE_URL:
         SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
     )
 else:
-    engine = create_engine(SQLALCHEMY_DATABASE_URL, pool_pre_ping=True)
+    if DATABASE_POOL_SIZE > 0:
+        engine = create_engine(
+            SQLALCHEMY_DATABASE_URL,
+            pool_size=DATABASE_POOL_SIZE,
+            max_overflow=DATABASE_POOL_MAX_OVERFLOW,
+            pool_timeout=DATABASE_POOL_TIMEOUT,
+            pool_recycle=DATABASE_POOL_RECYCLE,
+            pool_pre_ping=True,
+            poolclass=QueuePool,
+        )
+    else:
+        engine = create_engine(
+            SQLALCHEMY_DATABASE_URL, pool_pre_ping=True, poolclass=NullPool
+        )
 
 
 SessionLocal = sessionmaker(

+ 6 - 3
backend/open_webui/apps/webui/main.py

@@ -10,11 +10,11 @@ from open_webui.apps.webui.routers import (
     auths,
     chats,
     configs,
-    documents,
     files,
     functions,
     memories,
     models,
+    knowledge,
     prompts,
     tools,
     users,
@@ -111,15 +111,15 @@ app.include_router(auths.router, prefix="/auths", tags=["auths"])
 app.include_router(users.router, prefix="/users", tags=["users"])
 app.include_router(chats.router, prefix="/chats", tags=["chats"])
 
-app.include_router(documents.router, prefix="/documents", tags=["documents"])
 app.include_router(models.router, prefix="/models", tags=["models"])
+app.include_router(knowledge.router, prefix="/knowledge", tags=["knowledge"])
 app.include_router(prompts.router, prefix="/prompts", tags=["prompts"])
 
-app.include_router(memories.router, prefix="/memories", tags=["memories"])
 app.include_router(files.router, prefix="/files", tags=["files"])
 app.include_router(tools.router, prefix="/tools", tags=["tools"])
 app.include_router(functions.router, prefix="/functions", tags=["functions"])
 
+app.include_router(memories.router, prefix="/memories", tags=["memories"])
 app.include_router(utils.router, prefix="/utils", tags=["utils"])
 
 
@@ -287,17 +287,20 @@ async def generate_function_chat_completion(form_data, user):
     __event_emitter__ = None
     __event_call__ = None
     __task__ = None
+    __task_body__ = None
 
     if metadata:
         if all(k in metadata for k in ("session_id", "chat_id", "message_id")):
             __event_emitter__ = get_event_emitter(metadata)
             __event_call__ = get_event_call(metadata)
         __task__ = metadata.get("task", None)
+        __task_body__ = metadata.get("task_body", None)
 
     extra_params = {
         "__event_emitter__": __event_emitter__,
         "__event_call__": __event_call__,
         "__task__": __task__,
+        "__task_body__": __task_body__,
         "__files__": files,
         "__user__": {
             "id": user.id,

+ 63 - 3
backend/open_webui/apps/webui/models/files.py

@@ -5,7 +5,7 @@ from typing import Optional
 from open_webui.apps.webui.internal.db import Base, JSONField, get_db
 from open_webui.env import SRC_LOG_LEVELS
 from pydantic import BaseModel, ConfigDict
-from sqlalchemy import BigInteger, Column, String, Text
+from sqlalchemy import BigInteger, Column, String, Text, JSON
 
 log = logging.getLogger(__name__)
 log.setLevel(SRC_LOG_LEVELS["MODELS"])
@@ -20,19 +20,29 @@ class File(Base):
 
     id = Column(String, primary_key=True)
     user_id = Column(String)
+    hash = Column(Text, nullable=True)
+
     filename = Column(Text)
+    data = Column(JSON, nullable=True)
     meta = Column(JSONField)
+
     created_at = Column(BigInteger)
+    updated_at = Column(BigInteger)
 
 
 class FileModel(BaseModel):
+    model_config = ConfigDict(from_attributes=True)
+
     id: str
     user_id: str
+    hash: Optional[str] = None
+
     filename: str
+    data: Optional[dict] = None
     meta: dict
-    created_at: int  # timestamp in epoch
 
-    model_config = ConfigDict(from_attributes=True)
+    created_at: int  # timestamp in epoch
+    updated_at: int  # timestamp in epoch
 
 
 ####################
@@ -43,14 +53,21 @@ class FileModel(BaseModel):
 class FileModelResponse(BaseModel):
     id: str
     user_id: str
+    hash: Optional[str] = None
+
     filename: str
+    data: Optional[dict] = None
     meta: dict
+
     created_at: int  # timestamp in epoch
+    updated_at: int  # timestamp in epoch
 
 
 class FileForm(BaseModel):
     id: str
+    hash: Optional[str] = None
     filename: str
+    data: dict = {}
     meta: dict = {}
 
 
@@ -62,6 +79,7 @@ class FilesTable:
                     **form_data.model_dump(),
                     "user_id": user_id,
                     "created_at": int(time.time()),
+                    "updated_at": int(time.time()),
                 }
             )
 
@@ -90,6 +108,16 @@ class FilesTable:
         with get_db() as db:
             return [FileModel.model_validate(file) for file in db.query(File).all()]
 
+    def get_files_by_ids(self, ids: list[str]) -> list[FileModel]:
+        with get_db() as db:
+            return [
+                FileModel.model_validate(file)
+                for file in db.query(File)
+                .filter(File.id.in_(ids))
+                .order_by(File.updated_at.desc())
+                .all()
+            ]
+
     def get_files_by_user_id(self, user_id: str) -> list[FileModel]:
         with get_db() as db:
             return [
@@ -97,6 +125,38 @@ class FilesTable:
                 for file in db.query(File).filter_by(user_id=user_id).all()
             ]
 
+    def update_file_hash_by_id(self, id: str, hash: str) -> Optional[FileModel]:
+        with get_db() as db:
+            try:
+                file = db.query(File).filter_by(id=id).first()
+                file.hash = hash
+                db.commit()
+
+                return FileModel.model_validate(file)
+            except Exception:
+                return None
+
+    def update_file_data_by_id(self, id: str, data: dict) -> Optional[FileModel]:
+        with get_db() as db:
+            try:
+                file = db.query(File).filter_by(id=id).first()
+                file.data = {**(file.data if file.data else {}), **data}
+                db.commit()
+                return FileModel.model_validate(file)
+            except Exception as e:
+
+                return None
+
+    def update_file_metadata_by_id(self, id: str, meta: dict) -> Optional[FileModel]:
+        with get_db() as db:
+            try:
+                file = db.query(File).filter_by(id=id).first()
+                file.meta = {**(file.meta if file.meta else {}), **meta}
+                db.commit()
+                return FileModel.model_validate(file)
+            except Exception:
+                return None
+
     def delete_file_by_id(self, id: str) -> bool:
         with get_db() as db:
             try:

+ 152 - 0
backend/open_webui/apps/webui/models/knowledge.py

@@ -0,0 +1,152 @@
+import json
+import logging
+import time
+from typing import Optional
+import uuid
+
+from open_webui.apps.webui.internal.db import Base, get_db
+from open_webui.env import SRC_LOG_LEVELS
+from pydantic import BaseModel, ConfigDict
+from sqlalchemy import BigInteger, Column, String, Text, JSON
+
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
+####################
+# Knowledge DB Schema
+####################
+
+
+class Knowledge(Base):
+    __tablename__ = "knowledge"
+
+    id = Column(Text, unique=True, primary_key=True)
+    user_id = Column(Text)
+
+    name = Column(Text)
+    description = Column(Text)
+
+    data = Column(JSON, nullable=True)
+    meta = Column(JSON, nullable=True)
+
+    created_at = Column(BigInteger)
+    updated_at = Column(BigInteger)
+
+
+class KnowledgeModel(BaseModel):
+    model_config = ConfigDict(from_attributes=True)
+
+    id: str
+    user_id: str
+
+    name: str
+    description: str
+
+    data: Optional[dict] = None
+    meta: Optional[dict] = None
+
+    created_at: int  # timestamp in epoch
+    updated_at: int  # timestamp in epoch
+
+
+####################
+# Forms
+####################
+
+
+class KnowledgeResponse(BaseModel):
+    id: str
+    name: str
+    description: str
+    data: Optional[dict] = None
+    meta: Optional[dict] = None
+    created_at: int  # timestamp in epoch
+    updated_at: int  # timestamp in epoch
+
+
+class KnowledgeForm(BaseModel):
+    name: str
+    description: str
+    data: Optional[dict] = None
+
+
+class KnowledgeUpdateForm(BaseModel):
+    name: Optional[str] = None
+    description: Optional[str] = None
+    data: Optional[dict] = None
+
+
+class KnowledgeTable:
+    def insert_new_knowledge(
+        self, user_id: str, form_data: KnowledgeForm
+    ) -> Optional[KnowledgeModel]:
+        with get_db() as db:
+            knowledge = KnowledgeModel(
+                **{
+                    **form_data.model_dump(),
+                    "id": str(uuid.uuid4()),
+                    "user_id": user_id,
+                    "created_at": int(time.time()),
+                    "updated_at": int(time.time()),
+                }
+            )
+
+            try:
+                result = Knowledge(**knowledge.model_dump())
+                db.add(result)
+                db.commit()
+                db.refresh(result)
+                if result:
+                    return KnowledgeModel.model_validate(result)
+                else:
+                    return None
+            except Exception:
+                return None
+
+    def get_knowledge_items(self) -> list[KnowledgeModel]:
+        with get_db() as db:
+            return [
+                KnowledgeModel.model_validate(knowledge)
+                for knowledge in db.query(Knowledge)
+                .order_by(Knowledge.updated_at.desc())
+                .all()
+            ]
+
+    def get_knowledge_by_id(self, id: str) -> Optional[KnowledgeModel]:
+        try:
+            with get_db() as db:
+                knowledge = db.query(Knowledge).filter_by(id=id).first()
+                return KnowledgeModel.model_validate(knowledge) if knowledge else None
+        except Exception:
+            return None
+
+    def update_knowledge_by_id(
+        self, id: str, form_data: KnowledgeUpdateForm, overwrite: bool = False
+    ) -> Optional[KnowledgeModel]:
+        try:
+            with get_db() as db:
+                knowledge = self.get_knowledge_by_id(id=id)
+                db.query(Knowledge).filter_by(id=id).update(
+                    {
+                        **form_data.model_dump(exclude_none=True),
+                        "updated_at": int(time.time()),
+                    }
+                )
+                db.commit()
+                return self.get_knowledge_by_id(id=id)
+        except Exception as e:
+            log.exception(e)
+            return None
+
+    def delete_knowledge_by_id(self, id: str) -> bool:
+        try:
+            with get_db() as db:
+                db.query(Knowledge).filter_by(id=id).delete()
+                db.commit()
+                return True
+        except Exception:
+            return False
+
+
+Knowledges = KnowledgeTable()

+ 6 - 5
backend/open_webui/apps/webui/routers/chats.py

@@ -52,10 +52,9 @@ async def get_session_user_chat_list(
 
 @router.delete("/", response_model=bool)
 async def delete_all_user_chats(request: Request, user=Depends(get_verified_user)):
-    if (
-        user.role == "user"
-        and not request.app.state.config.USER_PERMISSIONS["chat"]["deletion"]
-    ):
+    if user.role == "user" and not request.app.state.config.USER_PERMISSIONS.get(
+        "chat", {}
+    ).get("deletion", {}):
         raise HTTPException(
             status_code=status.HTTP_401_UNAUTHORIZED,
             detail=ERROR_MESSAGES.ACCESS_PROHIBITED,
@@ -292,7 +291,9 @@ async def delete_chat_by_id(request: Request, id: str, user=Depends(get_verified
         result = Chats.delete_chat_by_id(id)
         return result
     else:
-        if not request.app.state.config.USER_PERMISSIONS["chat"]["deletion"]:
+        if not request.app.state.config.USER_PERMISSIONS.get("chat", {}).get(
+            "deletion", {}
+        ):
             raise HTTPException(
                 status_code=status.HTTP_401_UNAUTHORIZED,
                 detail=ERROR_MESSAGES.ACCESS_PROHIBITED,

+ 92 - 11
backend/open_webui/apps/webui/routers/files.py

@@ -4,13 +4,22 @@ import shutil
 import uuid
 from pathlib import Path
 from typing import Optional
+from pydantic import BaseModel
+import mimetypes
+
 
 from open_webui.apps.webui.models.files import FileForm, FileModel, Files
+from open_webui.apps.retrieval.main import process_file, ProcessFileForm
+
 from open_webui.config import UPLOAD_DIR
-from open_webui.constants import ERROR_MESSAGES
 from open_webui.env import SRC_LOG_LEVELS
+from open_webui.constants import ERROR_MESSAGES
+
+
 from fastapi import APIRouter, Depends, File, HTTPException, UploadFile, status
-from fastapi.responses import FileResponse
+from fastapi.responses import FileResponse, StreamingResponse
+
+
 from open_webui.utils.utils import get_admin_user, get_verified_user
 
 log = logging.getLogger(__name__)
@@ -58,6 +67,13 @@ def upload_file(file: UploadFile = File(...), user=Depends(get_verified_user)):
             ),
         )
 
+        try:
+            process_file(ProcessFileForm(file_id=id))
+            file = Files.get_file_by_id(id=id)
+        except Exception as e:
+            log.exception(e)
+            log.error(f"Error processing file: {file.id}")
+
         if file:
             return file
         else:
@@ -143,6 +159,55 @@ async def get_file_by_id(id: str, user=Depends(get_verified_user)):
         )
 
 
+############################
+# Get File Data Content By Id
+############################
+
+
+@router.get("/{id}/data/content")
+async def get_file_data_content_by_id(id: str, user=Depends(get_verified_user)):
+    file = Files.get_file_by_id(id)
+
+    if file and (file.user_id == user.id or user.role == "admin"):
+        return {"content": file.data.get("content", "")}
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_404_NOT_FOUND,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+
+
+############################
+# Update File Data Content By Id
+############################
+
+
+class ContentForm(BaseModel):
+    content: str
+
+
+@router.post("/{id}/data/content/update")
+async def update_file_data_content_by_id(
+    id: str, form_data: ContentForm, user=Depends(get_verified_user)
+):
+    file = Files.get_file_by_id(id)
+
+    if file and (file.user_id == user.id or user.role == "admin"):
+        try:
+            process_file(ProcessFileForm(file_id=id, content=form_data.content))
+            file = Files.get_file_by_id(id=id)
+        except Exception as e:
+            log.exception(e)
+            log.error(f"Error processing file: {file.id}")
+
+        return {"content": file.data.get("content", "")}
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_404_NOT_FOUND,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+
+
 ############################
 # Get File Content By Id
 ############################
@@ -176,16 +241,32 @@ async def get_file_content_by_id(id: str, user=Depends(get_verified_user)):
     file = Files.get_file_by_id(id)
 
     if file and (file.user_id == user.id or user.role == "admin"):
-        file_path = Path(file.meta["path"])
-
-        # Check if the file already exists in the cache
-        if file_path.is_file():
-            print(f"file_path: {file_path}")
-            return FileResponse(file_path)
+        file_path = file.meta.get("path")
+        if file_path:
+            file_path = Path(file_path)
+
+            # Check if the file already exists in the cache
+            if file_path.is_file():
+                print(f"file_path: {file_path}")
+                return FileResponse(file_path)
+            else:
+                raise HTTPException(
+                    status_code=status.HTTP_404_NOT_FOUND,
+                    detail=ERROR_MESSAGES.NOT_FOUND,
+                )
         else:
-            raise HTTPException(
-                status_code=status.HTTP_404_NOT_FOUND,
-                detail=ERROR_MESSAGES.NOT_FOUND,
+            # File path doesn’t exist, return the content as .txt if possible
+            file_content = file.content.get("content", "")
+            file_name = file.filename
+
+            # Create a generator that encodes the file content
+            def generator():
+                yield file_content.encode("utf-8")
+
+            return StreamingResponse(
+                generator(),
+                media_type="text/plain",
+                headers={"Content-Disposition": f"attachment; filename={file_name}"},
             )
     else:
         raise HTTPException(

+ 348 - 0
backend/open_webui/apps/webui/routers/knowledge.py

@@ -0,0 +1,348 @@
+import json
+from typing import Optional, Union
+from pydantic import BaseModel
+from fastapi import APIRouter, Depends, HTTPException, status
+import logging
+
+from open_webui.apps.webui.models.knowledge import (
+    Knowledges,
+    KnowledgeUpdateForm,
+    KnowledgeForm,
+    KnowledgeResponse,
+)
+from open_webui.apps.webui.models.files import Files, FileModel
+from open_webui.apps.retrieval.vector.connector import VECTOR_DB_CLIENT
+from open_webui.apps.retrieval.main import process_file, ProcessFileForm
+
+
+from open_webui.constants import ERROR_MESSAGES
+from open_webui.utils.utils import get_admin_user, get_verified_user
+from open_webui.env import SRC_LOG_LEVELS
+
+
+log = logging.getLogger(__name__)
+log.setLevel(SRC_LOG_LEVELS["MODELS"])
+
+router = APIRouter()
+
+############################
+# GetKnowledgeItems
+############################
+
+
+@router.get(
+    "/", response_model=Optional[Union[list[KnowledgeResponse], KnowledgeResponse]]
+)
+async def get_knowledge_items(
+    id: Optional[str] = None, user=Depends(get_verified_user)
+):
+    if id:
+        knowledge = Knowledges.get_knowledge_by_id(id=id)
+
+        if knowledge:
+            return knowledge
+        else:
+            raise HTTPException(
+                status_code=status.HTTP_401_UNAUTHORIZED,
+                detail=ERROR_MESSAGES.NOT_FOUND,
+            )
+    else:
+        return [
+            KnowledgeResponse(**knowledge.model_dump())
+            for knowledge in Knowledges.get_knowledge_items()
+        ]
+
+
+############################
+# CreateNewKnowledge
+############################
+
+
+@router.post("/create", response_model=Optional[KnowledgeResponse])
+async def create_new_knowledge(form_data: KnowledgeForm, user=Depends(get_admin_user)):
+    knowledge = Knowledges.insert_new_knowledge(user.id, form_data)
+
+    if knowledge:
+        return knowledge
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.FILE_EXISTS,
+        )
+
+
+############################
+# GetKnowledgeById
+############################
+
+
+class KnowledgeFilesResponse(KnowledgeResponse):
+    files: list[FileModel]
+
+
+@router.get("/{id}", response_model=Optional[KnowledgeFilesResponse])
+async def get_knowledge_by_id(id: str, user=Depends(get_verified_user)):
+    knowledge = Knowledges.get_knowledge_by_id(id=id)
+
+    if knowledge:
+        file_ids = knowledge.data.get("file_ids", []) if knowledge.data else []
+        files = Files.get_files_by_ids(file_ids)
+
+        return KnowledgeFilesResponse(
+            **knowledge.model_dump(),
+            files=files,
+        )
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_401_UNAUTHORIZED,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+
+
+############################
+# UpdateKnowledgeById
+############################
+
+
+@router.post("/{id}/update", response_model=Optional[KnowledgeFilesResponse])
+async def update_knowledge_by_id(
+    id: str,
+    form_data: KnowledgeUpdateForm,
+    user=Depends(get_admin_user),
+):
+    knowledge = Knowledges.update_knowledge_by_id(id=id, form_data=form_data)
+
+    if knowledge:
+        file_ids = knowledge.data.get("file_ids", []) if knowledge.data else []
+        files = Files.get_files_by_ids(file_ids)
+
+        return KnowledgeFilesResponse(
+            **knowledge.model_dump(),
+            files=files,
+        )
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.ID_TAKEN,
+        )
+
+
+############################
+# AddFileToKnowledge
+############################
+
+
+class KnowledgeFileIdForm(BaseModel):
+    file_id: str
+
+
+@router.post("/{id}/file/add", response_model=Optional[KnowledgeFilesResponse])
+def add_file_to_knowledge_by_id(
+    id: str,
+    form_data: KnowledgeFileIdForm,
+    user=Depends(get_admin_user),
+):
+    knowledge = Knowledges.get_knowledge_by_id(id=id)
+    file = Files.get_file_by_id(form_data.file_id)
+    if not file:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+    if not file.data:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.FILE_NOT_PROCESSED,
+        )
+
+    # Add content to the vector database
+    try:
+        process_file(ProcessFileForm(file_id=form_data.file_id, collection_name=id))
+    except Exception as e:
+        log.debug(e)
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=str(e),
+        )
+
+    if knowledge:
+        data = knowledge.data or {}
+        file_ids = data.get("file_ids", [])
+
+        if form_data.file_id not in file_ids:
+            file_ids.append(form_data.file_id)
+            data["file_ids"] = file_ids
+
+            knowledge = Knowledges.update_knowledge_by_id(
+                id=id, form_data=KnowledgeUpdateForm(data=data)
+            )
+
+            if knowledge:
+                files = Files.get_files_by_ids(file_ids)
+
+                return KnowledgeFilesResponse(
+                    **knowledge.model_dump(),
+                    files=files,
+                )
+            else:
+                raise HTTPException(
+                    status_code=status.HTTP_400_BAD_REQUEST,
+                    detail=ERROR_MESSAGES.DEFAULT("knowledge"),
+                )
+        else:
+            raise HTTPException(
+                status_code=status.HTTP_400_BAD_REQUEST,
+                detail=ERROR_MESSAGES.DEFAULT("file_id"),
+            )
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+
+
+@router.post("/{id}/file/update", response_model=Optional[KnowledgeFilesResponse])
+def update_file_from_knowledge_by_id(
+    id: str,
+    form_data: KnowledgeFileIdForm,
+    user=Depends(get_admin_user),
+):
+    knowledge = Knowledges.get_knowledge_by_id(id=id)
+    file = Files.get_file_by_id(form_data.file_id)
+    if not file:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+
+    # Remove content from the vector database
+    VECTOR_DB_CLIENT.delete(
+        collection_name=knowledge.id, filter={"file_id": form_data.file_id}
+    )
+
+    # Add content to the vector database
+    try:
+        process_file(ProcessFileForm(file_id=form_data.file_id, collection_name=id))
+    except Exception as e:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=str(e),
+        )
+
+    if knowledge:
+        data = knowledge.data or {}
+        file_ids = data.get("file_ids", [])
+
+        files = Files.get_files_by_ids(file_ids)
+
+        return KnowledgeFilesResponse(
+            **knowledge.model_dump(),
+            files=files,
+        )
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+
+
+############################
+# RemoveFileFromKnowledge
+############################
+
+
+@router.post("/{id}/file/remove", response_model=Optional[KnowledgeFilesResponse])
+def remove_file_from_knowledge_by_id(
+    id: str,
+    form_data: KnowledgeFileIdForm,
+    user=Depends(get_admin_user),
+):
+    knowledge = Knowledges.get_knowledge_by_id(id=id)
+    file = Files.get_file_by_id(form_data.file_id)
+    if not file:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+
+    # Remove content from the vector database
+    VECTOR_DB_CLIENT.delete(
+        collection_name=knowledge.id, filter={"file_id": form_data.file_id}
+    )
+
+    result = VECTOR_DB_CLIENT.query(
+        collection_name=knowledge.id,
+        filter={"file_id": form_data.file_id},
+    )
+
+    Files.delete_file_by_id(form_data.file_id)
+
+    if knowledge:
+        data = knowledge.data or {}
+        file_ids = data.get("file_ids", [])
+
+        if form_data.file_id in file_ids:
+            file_ids.remove(form_data.file_id)
+            data["file_ids"] = file_ids
+
+            knowledge = Knowledges.update_knowledge_by_id(
+                id=id, form_data=KnowledgeUpdateForm(data=data)
+            )
+
+            if knowledge:
+                files = Files.get_files_by_ids(file_ids)
+
+                return KnowledgeFilesResponse(
+                    **knowledge.model_dump(),
+                    files=files,
+                )
+            else:
+                raise HTTPException(
+                    status_code=status.HTTP_400_BAD_REQUEST,
+                    detail=ERROR_MESSAGES.DEFAULT("knowledge"),
+                )
+        else:
+            raise HTTPException(
+                status_code=status.HTTP_400_BAD_REQUEST,
+                detail=ERROR_MESSAGES.DEFAULT("file_id"),
+            )
+    else:
+        raise HTTPException(
+            status_code=status.HTTP_400_BAD_REQUEST,
+            detail=ERROR_MESSAGES.NOT_FOUND,
+        )
+
+
+############################
+# ResetKnowledgeById
+############################
+
+
+@router.post("/{id}/reset", response_model=Optional[KnowledgeResponse])
+async def reset_knowledge_by_id(id: str, user=Depends(get_admin_user)):
+    try:
+        VECTOR_DB_CLIENT.delete_collection(collection_name=id)
+    except Exception as e:
+        log.debug(e)
+        pass
+
+    knowledge = Knowledges.update_knowledge_by_id(
+        id=id, form_data=KnowledgeUpdateForm(data={"file_ids": []})
+    )
+    return knowledge
+
+
+############################
+# DeleteKnowledgeById
+############################
+
+
+@router.delete("/{id}/delete", response_model=bool)
+async def delete_knowledge_by_id(id: str, user=Depends(get_admin_user)):
+    try:
+        VECTOR_DB_CLIENT.delete_collection(collection_name=id)
+    except Exception as e:
+        log.debug(e)
+        pass
+    result = Knowledges.delete_knowledge_by_id(id=id)
+    return result

+ 1 - 1
backend/open_webui/apps/webui/routers/memories.py

@@ -4,7 +4,7 @@ import logging
 from typing import Optional
 
 from open_webui.apps.webui.models.memories import Memories, MemoryModel
-from open_webui.apps.rag.vector.connector import VECTOR_DB_CLIENT
+from open_webui.apps.retrieval.vector.connector import VECTOR_DB_CLIENT
 from open_webui.utils.utils import get_verified_user
 from open_webui.env import SRC_LOG_LEVELS
 

+ 1 - 20
backend/open_webui/config.py

@@ -521,15 +521,6 @@ Path(UPLOAD_DIR).mkdir(parents=True, exist_ok=True)
 CACHE_DIR = f"{DATA_DIR}/cache"
 Path(CACHE_DIR).mkdir(parents=True, exist_ok=True)
 
-
-####################################
-# Docs DIR
-####################################
-
-DOCS_DIR = os.getenv("DOCS_DIR", f"{DATA_DIR}/docs")
-Path(DOCS_DIR).mkdir(parents=True, exist_ok=True)
-
-
 ####################################
 # Tools DIR
 ####################################
@@ -561,16 +552,6 @@ OLLAMA_API_BASE_URL = os.environ.get(
 )
 
 OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
-AIOHTTP_CLIENT_TIMEOUT = os.environ.get("AIOHTTP_CLIENT_TIMEOUT", "")
-
-if AIOHTTP_CLIENT_TIMEOUT == "":
-    AIOHTTP_CLIENT_TIMEOUT = None
-else:
-    try:
-        AIOHTTP_CLIENT_TIMEOUT = int(AIOHTTP_CLIENT_TIMEOUT)
-    except Exception:
-        AIOHTTP_CLIENT_TIMEOUT = 300
-
 
 K8S_FLAG = os.environ.get("K8S_FLAG", "")
 USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false")
@@ -921,7 +902,7 @@ CHROMA_HTTP_SSL = os.environ.get("CHROMA_HTTP_SSL", "false").lower() == "true"
 MILVUS_URI = os.environ.get("MILVUS_URI", f"{DATA_DIR}/vector_db/milvus.db")
 
 ####################################
-# RAG
+# Information Retrieval (RAG)
 ####################################
 
 # RAG Content Extraction

+ 10 - 1
backend/open_webui/constants.py

@@ -34,8 +34,8 @@ class ERROR_MESSAGES(str, Enum):
 
     ID_TAKEN = "Uh-oh! This id is already registered. Please choose another id string."
     MODEL_ID_TAKEN = "Uh-oh! This model id is already registered. Please choose another model id string."
-
     NAME_TAG_TAKEN = "Uh-oh! This name tag is already registered. Please choose another name tag string."
+
     INVALID_TOKEN = (
         "Your session has expired or the token is invalid. Please sign in again."
     )
@@ -90,6 +90,15 @@ class ERROR_MESSAGES(str, Enum):
         "The Ollama API is disabled. Please enable it to use this feature."
     )
 
+    FILE_TOO_LARGE = (
+        lambda size="": f"Oops! The file you're trying to upload is too large. Please upload a file that is less than {size}."
+    )
+
+    DUPLICATE_CONTENT = (
+        "Duplicate content detected. Please provide unique content to proceed."
+    )
+    FILE_NOT_PROCESSED = "Extracted content is not available for this file. Please ensure that the file is processed before proceeding."
+
 
 class TASKS(str, Enum):
     def __str__(self) -> str:

+ 50 - 0
backend/open_webui/env.py

@@ -258,6 +258,45 @@ DATABASE_URL = os.environ.get("DATABASE_URL", f"sqlite:///{DATA_DIR}/webui.db")
 if "postgres://" in DATABASE_URL:
     DATABASE_URL = DATABASE_URL.replace("postgres://", "postgresql://")
 
+DATABASE_POOL_SIZE = os.environ.get("DATABASE_POOL_SIZE", 0)
+
+if DATABASE_POOL_SIZE == "":
+    DATABASE_POOL_SIZE = 0
+else:
+    try:
+        DATABASE_POOL_SIZE = int(DATABASE_POOL_SIZE)
+    except Exception:
+        DATABASE_POOL_SIZE = 0
+
+DATABASE_POOL_MAX_OVERFLOW = os.environ.get("DATABASE_POOL_MAX_OVERFLOW", 0)
+
+if DATABASE_POOL_MAX_OVERFLOW == "":
+    DATABASE_POOL_MAX_OVERFLOW = 0
+else:
+    try:
+        DATABASE_POOL_MAX_OVERFLOW = int(DATABASE_POOL_MAX_OVERFLOW)
+    except Exception:
+        DATABASE_POOL_MAX_OVERFLOW = 0
+
+DATABASE_POOL_TIMEOUT = os.environ.get("DATABASE_POOL_TIMEOUT", 30)
+
+if DATABASE_POOL_TIMEOUT == "":
+    DATABASE_POOL_TIMEOUT = 30
+else:
+    try:
+        DATABASE_POOL_TIMEOUT = int(DATABASE_POOL_TIMEOUT)
+    except Exception:
+        DATABASE_POOL_TIMEOUT = 30
+
+DATABASE_POOL_RECYCLE = os.environ.get("DATABASE_POOL_RECYCLE", 3600)
+
+if DATABASE_POOL_RECYCLE == "":
+    DATABASE_POOL_RECYCLE = 3600
+else:
+    try:
+        DATABASE_POOL_RECYCLE = int(DATABASE_POOL_RECYCLE)
+    except Exception:
+        DATABASE_POOL_RECYCLE = 3600
 
 RESET_CONFIG_ON_START = (
     os.environ.get("RESET_CONFIG_ON_START", "False").lower() == "true"
@@ -305,3 +344,14 @@ ENABLE_WEBSOCKET_SUPPORT = (
 WEBSOCKET_MANAGER = os.environ.get("WEBSOCKET_MANAGER", "")
 
 WEBSOCKET_REDIS_URL = os.environ.get("WEBSOCKET_REDIS_URL", "redis://localhost:6379/0")
+
+
+AIOHTTP_CLIENT_TIMEOUT = os.environ.get("AIOHTTP_CLIENT_TIMEOUT", "")
+
+if AIOHTTP_CLIENT_TIMEOUT == "":
+    AIOHTTP_CLIENT_TIMEOUT = None
+else:
+    try:
+        AIOHTTP_CLIENT_TIMEOUT = int(AIOHTTP_CLIENT_TIMEOUT)
+    except Exception:
+        AIOHTTP_CLIENT_TIMEOUT = 300

+ 97 - 69
backend/open_webui/main.py

@@ -16,37 +16,45 @@ from typing import Optional
 import aiohttp
 import requests
 
-
-from open_webui.apps.audio.main import app as audio_app
-from open_webui.apps.images.main import app as images_app
-from open_webui.apps.ollama.main import app as ollama_app
 from open_webui.apps.ollama.main import (
-    GenerateChatCompletionForm,
+    app as ollama_app,
+    get_all_models as get_ollama_models,
     generate_chat_completion as generate_ollama_chat_completion,
     generate_openai_chat_completion as generate_ollama_openai_chat_completion,
+    GenerateChatCompletionForm,
 )
-from open_webui.apps.ollama.main import get_all_models as get_ollama_models
-from open_webui.apps.openai.main import app as openai_app
 from open_webui.apps.openai.main import (
+    app as openai_app,
     generate_chat_completion as generate_openai_chat_completion,
+    get_all_models as get_openai_models,
 )
-from open_webui.apps.openai.main import get_all_models as get_openai_models
-from open_webui.apps.rag.main import app as rag_app
-from open_webui.apps.rag.utils import get_rag_context, rag_template
-from open_webui.apps.socket.main import app as socket_app, periodic_usage_pool_cleanup
-from open_webui.apps.socket.main import get_event_call, get_event_emitter
-from open_webui.apps.webui.internal.db import Session
-from open_webui.apps.webui.main import app as webui_app
+
+from open_webui.apps.retrieval.main import app as retrieval_app
+from open_webui.apps.retrieval.utils import get_rag_context, rag_template
+
+from open_webui.apps.socket.main import (
+    app as socket_app,
+    periodic_usage_pool_cleanup,
+    get_event_call,
+    get_event_emitter,
+)
+
 from open_webui.apps.webui.main import (
+    app as webui_app,
     generate_function_chat_completion,
     get_pipe_models,
 )
+from open_webui.apps.webui.internal.db import Session
+
 from open_webui.apps.webui.models.auths import Auths
 from open_webui.apps.webui.models.functions import Functions
 from open_webui.apps.webui.models.models import Models
 from open_webui.apps.webui.models.users import UserModel, Users
+
 from open_webui.apps.webui.utils import load_function_module_by_id
 
+from open_webui.apps.audio.main import app as audio_app
+from open_webui.apps.images.main import app as images_app
 
 from authlib.integrations.starlette_client import OAuth
 from authlib.oidc.core import UserInfo
@@ -187,8 +195,6 @@ https://github.com/open-webui/open-webui
 
 @asynccontextmanager
 async def lifespan(app: FastAPI):
-    run_migrations()
-
     if RESET_CONFIG_ON_START:
         reset_config()
 
@@ -440,37 +446,44 @@ async def chat_completion_tools_handler(
         if not content:
             return body, {}
 
-        result = json.loads(content)
+        try:
+            content = content[content.find("{") : content.rfind("}") + 1]
+            if not content:
+                raise Exception("No JSON object found in the response")
 
-        tool_function_name = result.get("name", None)
-        if tool_function_name not in tools:
-            return body, {}
+            result = json.loads(content)
 
-        tool_function_params = result.get("parameters", {})
+            tool_function_name = result.get("name", None)
+            if tool_function_name not in tools:
+                return body, {}
 
-        try:
-            tool_output = await tools[tool_function_name]["callable"](
-                **tool_function_params
-            )
-        except Exception as e:
-            tool_output = str(e)
+            tool_function_params = result.get("parameters", {})
 
-        if tools[tool_function_name]["citation"]:
-            citations.append(
-                {
-                    "source": {
-                        "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
-                    },
-                    "document": [tool_output],
-                    "metadata": [{"source": tool_function_name}],
-                }
-            )
-        if tools[tool_function_name]["file_handler"]:
-            skip_files = True
+            try:
+                tool_output = await tools[tool_function_name]["callable"](
+                    **tool_function_params
+                )
+            except Exception as e:
+                tool_output = str(e)
 
-        if isinstance(tool_output, str):
-            contexts.append(tool_output)
+            if tools[tool_function_name]["citation"]:
+                citations.append(
+                    {
+                        "source": {
+                            "name": f"TOOL:{tools[tool_function_name]['toolkit_id']}/{tool_function_name}"
+                        },
+                        "document": [tool_output],
+                        "metadata": [{"source": tool_function_name}],
+                    }
+                )
+            if tools[tool_function_name]["file_handler"]:
+                skip_files = True
 
+            if isinstance(tool_output, str):
+                contexts.append(tool_output)
+        except Exception as e:
+            log.exception(f"Error: {e}")
+            content = None
     except Exception as e:
         log.exception(f"Error: {e}")
         content = None
@@ -491,11 +504,11 @@ async def chat_completion_files_handler(body) -> tuple[dict, dict[str, list]]:
         contexts, citations = get_rag_context(
             files=files,
             messages=body["messages"],
-            embedding_function=rag_app.state.EMBEDDING_FUNCTION,
-            k=rag_app.state.config.TOP_K,
-            reranking_function=rag_app.state.sentence_transformer_rf,
-            r=rag_app.state.config.RELEVANCE_THRESHOLD,
-            hybrid_search=rag_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
+            embedding_function=retrieval_app.state.EMBEDDING_FUNCTION,
+            k=retrieval_app.state.config.TOP_K,
+            reranking_function=retrieval_app.state.sentence_transformer_rf,
+            r=retrieval_app.state.config.RELEVANCE_THRESHOLD,
+            hybrid_search=retrieval_app.state.config.ENABLE_RAG_HYBRID_SEARCH,
         )
 
         log.debug(f"rag_contexts: {contexts}, citations: {citations}")
@@ -608,7 +621,7 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware):
             if prompt is None:
                 raise Exception("No user message found")
             if (
-                rag_app.state.config.RELEVANCE_THRESHOLD == 0
+                retrieval_app.state.config.RELEVANCE_THRESHOLD == 0
                 and context_string.strip() == ""
             ):
                 log.debug(
@@ -620,14 +633,14 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware):
             if model["owned_by"] == "ollama":
                 body["messages"] = prepend_to_first_user_message_content(
                     rag_template(
-                        rag_app.state.config.RAG_TEMPLATE, context_string, prompt
+                        retrieval_app.state.config.RAG_TEMPLATE, context_string, prompt
                     ),
                     body["messages"],
                 )
             else:
                 body["messages"] = add_or_update_system_message(
                     rag_template(
-                        rag_app.state.config.RAG_TEMPLATE, context_string, prompt
+                        retrieval_app.state.config.RAG_TEMPLATE, context_string, prompt
                     ),
                     body["messages"],
                 )
@@ -761,10 +774,22 @@ class PipelineMiddleware(BaseHTTPMiddleware):
         # Parse string to JSON
         data = json.loads(body_str) if body_str else {}
 
-        user = get_current_user(
-            request,
-            get_http_authorization_cred(request.headers["Authorization"]),
-        )
+        try:
+            user = get_current_user(
+                request,
+                get_http_authorization_cred(request.headers["Authorization"]),
+            )
+        except KeyError as e:
+            if len(e.args) > 1:
+                return JSONResponse(
+                    status_code=e.args[0],
+                    content={"detail": e.args[1]},
+                )
+            else:
+                return JSONResponse(
+                    status_code=status.HTTP_401_UNAUTHORIZED,
+                    content={"detail": "Not authenticated"},
+                )
 
         try:
             data = filter_pipeline(data, user)
@@ -837,7 +862,7 @@ async def check_url(request: Request, call_next):
 async def update_embedding_function(request: Request, call_next):
     response = await call_next(request)
     if "/embedding/update" in request.url.path:
-        webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
+        webui_app.state.EMBEDDING_FUNCTION = retrieval_app.state.EMBEDDING_FUNCTION
     return response
 
 
@@ -865,11 +890,12 @@ app.mount("/openai", openai_app)
 
 app.mount("/images/api/v1", images_app)
 app.mount("/audio/api/v1", audio_app)
-app.mount("/rag/api/v1", rag_app)
+app.mount("/retrieval/api/v1", retrieval_app)
 
 app.mount("/api/v1", webui_app)
 
-webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
+
+webui_app.state.EMBEDDING_FUNCTION = retrieval_app.state.EMBEDDING_FUNCTION
 
 
 async def get_all_models():
@@ -1466,7 +1492,7 @@ Prompt: {{prompt:middletruncate:8000}}"""
             }
         ),
         "chat_id": form_data.get("chat_id", None),
-        "metadata": {"task": str(TASKS.TITLE_GENERATION)},
+        "metadata": {"task": str(TASKS.TITLE_GENERATION), "task_body": form_data},
     }
     log.debug(payload)
 
@@ -1543,7 +1569,7 @@ Search Query:"""
                 "max_completion_tokens": 30,
             }
         ),
-        "metadata": {"task": str(TASKS.QUERY_GENERATION)},
+        "metadata": {"task": str(TASKS.QUERY_GENERATION), "task_body": form_data},
     }
     log.debug(payload)
 
@@ -1611,7 +1637,7 @@ Message: """{{prompt}}"""
             }
         ),
         "chat_id": form_data.get("chat_id", None),
-        "metadata": {"task": str(TASKS.EMOJI_GENERATION)},
+        "metadata": {"task": str(TASKS.EMOJI_GENERATION), "task_body": form_data},
     }
     log.debug(payload)
 
@@ -1670,7 +1696,10 @@ Responses from models: {{responses}}"""
         "messages": [{"role": "user", "content": content}],
         "stream": form_data.get("stream", False),
         "chat_id": form_data.get("chat_id", None),
-        "metadata": {"task": str(TASKS.MOA_RESPONSE_GENERATION)},
+        "metadata": {
+            "task": str(TASKS.MOA_RESPONSE_GENERATION),
+            "task_body": form_data,
+        },
     }
     log.debug(payload)
 
@@ -2054,7 +2083,7 @@ async def get_app_config(request: Request):
             "enable_login_form": webui_app.state.config.ENABLE_LOGIN_FORM,
             **(
                 {
-                    "enable_web_search": rag_app.state.config.ENABLE_RAG_WEB_SEARCH,
+                    "enable_web_search": retrieval_app.state.config.ENABLE_RAG_WEB_SEARCH,
                     "enable_image_generation": images_app.state.config.ENABLED,
                     "enable_community_sharing": webui_app.state.config.ENABLE_COMMUNITY_SHARING,
                     "enable_message_rating": webui_app.state.config.ENABLE_MESSAGE_RATING,
@@ -2080,8 +2109,8 @@ async def get_app_config(request: Request):
                     },
                 },
                 "file": {
-                    "max_size": rag_app.state.config.FILE_MAX_SIZE,
-                    "max_count": rag_app.state.config.FILE_MAX_COUNT,
+                    "max_size": retrieval_app.state.config.FILE_MAX_SIZE,
+                    "max_count": retrieval_app.state.config.FILE_MAX_COUNT,
                 },
                 "permissions": {**webui_app.state.config.USER_PERMISSIONS},
             }
@@ -2153,7 +2182,8 @@ async def get_app_changelog():
 @app.get("/api/version/updates")
 async def get_app_latest_release_version():
     try:
-        async with aiohttp.ClientSession(trust_env=True) as session:
+        timeout = aiohttp.ClientTimeout(total=1)
+        async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:
             async with session.get(
                 "https://api.github.com/repos/open-webui/open-webui/releases/latest"
             ) as response:
@@ -2162,11 +2192,9 @@ async def get_app_latest_release_version():
                 latest_version = data["tag_name"]
 
                 return {"current": VERSION, "latest": latest_version[1:]}
-    except aiohttp.ClientError:
-        raise HTTPException(
-            status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
-            detail=ERROR_MESSAGES.RATE_LIMIT_EXCEEDED,
-        )
+    except Exception as e:
+        log.debug(e)
+        return {"current": VERSION, "latest": VERSION}
 
 
 ############################

+ 0 - 19
backend/open_webui/migrations/scripts/revision.py

@@ -1,19 +0,0 @@
-from alembic import command
-from alembic.config import Config
-
-from open_webui.env import OPEN_WEBUI_DIR
-
-alembic_cfg = Config(OPEN_WEBUI_DIR / "alembic.ini")
-
-# Set the script location dynamically
-migrations_path = OPEN_WEBUI_DIR / "migrations"
-alembic_cfg.set_main_option("script_location", str(migrations_path))
-
-
-def revision(message: str) -> None:
-    command.revision(alembic_cfg, message=message, autogenerate=False)
-
-
-if __name__ == "__main__":
-    input_message = input("Enter the revision message: ")
-    revision(input_message)

+ 6 - 0
backend/open_webui/migrations/util.py

@@ -7,3 +7,9 @@ def get_existing_tables():
     inspector = Inspector.from_engine(con)
     tables = set(inspector.get_table_names())
     return tables
+
+
+def get_revision_id():
+    import uuid
+
+    return str(uuid.uuid4()).replace("-", "")[:12]

+ 80 - 0
backend/open_webui/migrations/versions/6a39f3d8e55c_add_knowledge_table.py

@@ -0,0 +1,80 @@
+"""Add knowledge table
+
+Revision ID: 6a39f3d8e55c
+Revises: c0fbf31ca0db
+Create Date: 2024-10-01 14:02:35.241684
+
+"""
+
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.sql import table, column, select
+import json
+
+
+revision = "6a39f3d8e55c"
+down_revision = "c0fbf31ca0db"
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+    # Creating the 'knowledge' table
+    print("Creating knowledge table")
+    knowledge_table = op.create_table(
+        "knowledge",
+        sa.Column("id", sa.Text(), primary_key=True),
+        sa.Column("user_id", sa.Text(), nullable=False),
+        sa.Column("name", sa.Text(), nullable=False),
+        sa.Column("description", sa.Text(), nullable=True),
+        sa.Column("data", sa.JSON(), nullable=True),
+        sa.Column("meta", sa.JSON(), nullable=True),
+        sa.Column("created_at", sa.BigInteger(), nullable=False),
+        sa.Column("updated_at", sa.BigInteger(), nullable=True),
+    )
+
+    print("Migrating data from document table to knowledge table")
+    # Representation of the existing 'document' table
+    document_table = table(
+        "document",
+        column("collection_name", sa.String()),
+        column("user_id", sa.String()),
+        column("name", sa.String()),
+        column("title", sa.Text()),
+        column("content", sa.Text()),
+        column("timestamp", sa.BigInteger()),
+    )
+
+    # Select all from existing document table
+    documents = op.get_bind().execute(
+        select(
+            document_table.c.collection_name,
+            document_table.c.user_id,
+            document_table.c.name,
+            document_table.c.title,
+            document_table.c.content,
+            document_table.c.timestamp,
+        )
+    )
+
+    # Insert data into knowledge table from document table
+    for doc in documents:
+        op.get_bind().execute(
+            knowledge_table.insert().values(
+                id=doc.collection_name,
+                user_id=doc.user_id,
+                description=doc.name,
+                meta={
+                    "legacy": True,
+                    "document": True,
+                    "tags": json.loads(doc.content or "{}").get("tags", []),
+                },
+                name=doc.title,
+                created_at=doc.timestamp,
+                updated_at=doc.timestamp,  # using created_at for both created_at and updated_at in project
+            )
+        )
+
+
+def downgrade():
+    op.drop_table("knowledge")

+ 32 - 0
backend/open_webui/migrations/versions/c0fbf31ca0db_update_file_table.py

@@ -0,0 +1,32 @@
+"""Update file table
+
+Revision ID: c0fbf31ca0db
+Revises: ca81bd47c050
+Create Date: 2024-09-20 15:26:35.241684
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision: str = "c0fbf31ca0db"
+down_revision: Union[str, None] = "ca81bd47c050"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade():
+    # ### commands auto generated by Alembic - please adjust! ###
+    op.add_column("file", sa.Column("hash", sa.Text(), nullable=True))
+    op.add_column("file", sa.Column("data", sa.JSON(), nullable=True))
+    op.add_column("file", sa.Column("updated_at", sa.BigInteger(), nullable=True))
+
+
+def downgrade():
+    # ### commands auto generated by Alembic - please adjust! ###
+    op.drop_column("file", "updated_at")
+    op.drop_column("file", "data")
+    op.drop_column("file", "hash")

+ 1 - 1
backend/open_webui/utils/misc.py

@@ -122,7 +122,7 @@ def openai_chat_completion_message_template(
 ) -> dict:
     template = openai_chat_message_template(model)
     template["object"] = "chat.completion"
-    if message:
+    if message is not None:
         template["choices"][0]["message"] = {"content": message, "role": "assistant"}
     template["choices"][0]["finish_reason"] = "stop"
     return template

+ 4 - 0
backend/open_webui/utils/schemas.py

@@ -104,5 +104,9 @@ def json_schema_to_pydantic_type(json_schema: dict[str, Any]) -> Any:
         return Optional[Any]  # Use Optional[Any] for nullable fields
     elif type_ == "literal":
         return Literal[literal_eval(json_schema.get("enum"))]
+    elif type_ == "optional":
+        inner_schema = json_schema.get("items", {"type": "string"})
+        inner_type = json_schema_to_pydantic_type(inner_schema)
+        return Optional[inner_type]
     else:
         raise ValueError(f"Unsupported JSON schema type: {type_}")

+ 9 - 7
backend/requirements.txt

@@ -1,6 +1,6 @@
 fastapi==0.111.0
 uvicorn[standard]==0.30.6
-pydantic==2.8.2
+pydantic==2.9.2
 python-multipart==0.0.9
 
 Flask==3.0.3
@@ -11,7 +11,7 @@ python-jose==3.3.0
 passlib[bcrypt]==1.7.4
 
 requests==2.32.3
-aiohttp==3.10.5
+aiohttp==3.10.8
 
 sqlalchemy==2.0.32
 alembic==1.13.2
@@ -36,16 +36,18 @@ tiktoken
 
 langchain==0.2.15
 langchain-community==0.2.12
-langchain-chroma==0.1.2
+langchain-chroma==0.1.4
 
 fake-useragent==1.5.1
-chromadb==0.5.5
-pymilvus==2.4.6
+chromadb==0.5.9
+pymilvus==2.4.7
 
 sentence-transformers==3.0.1
 colbert-ai==0.2.21
 einops==0.8.0
 
+
+ftfy==6.2.3
 pypdf==4.3.1
 docx2txt==0.8
 python-pptx==1.0.0
@@ -53,7 +55,7 @@ unstructured==0.15.9
 nltk==3.9.1
 Markdown==3.7
 pypandoc==1.13
-pandas==2.2.2
+pandas==2.2.3
 openpyxl==3.1.5
 pyxlsb==1.0.10
 xlrd==2.0.1
@@ -78,7 +80,7 @@ pytube==15.0.0
 
 extract_msg
 pydub
-duckduckgo-search~=6.2.11
+duckduckgo-search~=6.2.13
 
 ## Tests
 docker~=7.1.0

BIN
bun.lockb


+ 460 - 49
package-lock.json

@@ -1,15 +1,16 @@
 {
 	"name": "open-webui",
-	"version": "0.3.30",
+	"version": "0.3.31",
 	"lockfileVersion": 3,
 	"requires": true,
 	"packages": {
 		"": {
 			"name": "open-webui",
-			"version": "0.3.30",
+			"version": "0.3.31",
 			"dependencies": {
 				"@codemirror/lang-javascript": "^6.2.2",
 				"@codemirror/lang-python": "^6.1.6",
+				"@codemirror/language-data": "^6.5.1",
 				"@codemirror/theme-one-dark": "^6.1.2",
 				"@pyscript/core": "^0.4.32",
 				"@sveltejs/adapter-node": "^2.0.0",
@@ -33,6 +34,7 @@
 				"marked": "^9.1.0",
 				"mermaid": "^10.9.1",
 				"paneforge": "^0.0.6",
+				"panzoom": "^9.4.3",
 				"pyodide": "^0.26.1",
 				"socket.io-client": "^4.2.0",
 				"sortablejs": "^1.15.2",
@@ -50,7 +52,7 @@
 				"@typescript-eslint/eslint-plugin": "^6.17.0",
 				"@typescript-eslint/parser": "^6.17.0",
 				"autoprefixer": "^10.4.16",
-				"cypress": "^13.8.1",
+				"cypress": "^13.15.0",
 				"eslint": "^8.56.0",
 				"eslint-config-prettier": "^9.1.0",
 				"eslint-plugin-cypress": "^3.4.0",
@@ -69,7 +71,7 @@
 				"vitest": "^1.6.0"
 			},
 			"engines": {
-				"node": ">=18.13.0 <=21.x.x",
+				"node": ">=18.13.0 <=22.x.x",
 				"npm": ">=6.0.0"
 			}
 		},
@@ -150,6 +152,77 @@
 				"@lezer/common": "^1.1.0"
 			}
 		},
+		"node_modules/@codemirror/lang-angular": {
+			"version": "0.1.3",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-angular/-/lang-angular-0.1.3.tgz",
+			"integrity": "sha512-xgeWGJQQl1LyStvndWtruUvb4SnBZDAu/gvFH/ZU+c0W25tQR8e5hq7WTwiIY2dNxnf+49mRiGI/9yxIwB6f5w==",
+			"dependencies": {
+				"@codemirror/lang-html": "^6.0.0",
+				"@codemirror/lang-javascript": "^6.1.2",
+				"@codemirror/language": "^6.0.0",
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.3.3"
+			}
+		},
+		"node_modules/@codemirror/lang-cpp": {
+			"version": "6.0.2",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-cpp/-/lang-cpp-6.0.2.tgz",
+			"integrity": "sha512-6oYEYUKHvrnacXxWxYa6t4puTlbN3dgV662BDfSH8+MfjQjVmP697/KYTDOqpxgerkvoNm7q5wlFMBeX8ZMocg==",
+			"dependencies": {
+				"@codemirror/language": "^6.0.0",
+				"@lezer/cpp": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-css": {
+			"version": "6.3.0",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-css/-/lang-css-6.3.0.tgz",
+			"integrity": "sha512-CyR4rUNG9OYcXDZwMPvJdtb6PHbBDKUc/6Na2BIwZ6dKab1JQqKa4di+RNRY9Myn7JB81vayKwJeQ7jEdmNVDA==",
+			"dependencies": {
+				"@codemirror/autocomplete": "^6.0.0",
+				"@codemirror/language": "^6.0.0",
+				"@codemirror/state": "^6.0.0",
+				"@lezer/common": "^1.0.2",
+				"@lezer/css": "^1.1.7"
+			}
+		},
+		"node_modules/@codemirror/lang-go": {
+			"version": "6.0.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-go/-/lang-go-6.0.1.tgz",
+			"integrity": "sha512-7fNvbyNylvqCphW9HD6WFnRpcDjr+KXX/FgqXy5H5ZS0eC5edDljukm/yNgYkwTsgp2busdod50AOTIy6Jikfg==",
+			"dependencies": {
+				"@codemirror/autocomplete": "^6.0.0",
+				"@codemirror/language": "^6.6.0",
+				"@codemirror/state": "^6.0.0",
+				"@lezer/common": "^1.0.0",
+				"@lezer/go": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-html": {
+			"version": "6.4.9",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-html/-/lang-html-6.4.9.tgz",
+			"integrity": "sha512-aQv37pIMSlueybId/2PVSP6NPnmurFDVmZwzc7jszd2KAF8qd4VBbvNYPXWQq90WIARjsdVkPbw29pszmHws3Q==",
+			"dependencies": {
+				"@codemirror/autocomplete": "^6.0.0",
+				"@codemirror/lang-css": "^6.0.0",
+				"@codemirror/lang-javascript": "^6.0.0",
+				"@codemirror/language": "^6.4.0",
+				"@codemirror/state": "^6.0.0",
+				"@codemirror/view": "^6.17.0",
+				"@lezer/common": "^1.0.0",
+				"@lezer/css": "^1.1.0",
+				"@lezer/html": "^1.3.0"
+			}
+		},
+		"node_modules/@codemirror/lang-java": {
+			"version": "6.0.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-java/-/lang-java-6.0.1.tgz",
+			"integrity": "sha512-OOnmhH67h97jHzCuFaIEspbmsT98fNdhVhmA3zCxW0cn7l8rChDhZtwiwJ/JOKXgfm4J+ELxQihxaI7bj7mJRg==",
+			"dependencies": {
+				"@codemirror/language": "^6.0.0",
+				"@lezer/java": "^1.0.0"
+			}
+		},
 		"node_modules/@codemirror/lang-javascript": {
 			"version": "6.2.2",
 			"resolved": "https://registry.npmjs.org/@codemirror/lang-javascript/-/lang-javascript-6.2.2.tgz",
@@ -164,6 +237,68 @@
 				"@lezer/javascript": "^1.0.0"
 			}
 		},
+		"node_modules/@codemirror/lang-json": {
+			"version": "6.0.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-json/-/lang-json-6.0.1.tgz",
+			"integrity": "sha512-+T1flHdgpqDDlJZ2Lkil/rLiRy684WMLc74xUnjJH48GQdfJo/pudlTRreZmKwzP8/tGdKf83wlbAdOCzlJOGQ==",
+			"dependencies": {
+				"@codemirror/language": "^6.0.0",
+				"@lezer/json": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-less": {
+			"version": "6.0.2",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-less/-/lang-less-6.0.2.tgz",
+			"integrity": "sha512-EYdQTG22V+KUUk8Qq582g7FMnCZeEHsyuOJisHRft/mQ+ZSZ2w51NupvDUHiqtsOy7It5cHLPGfHQLpMh9bqpQ==",
+			"dependencies": {
+				"@codemirror/lang-css": "^6.2.0",
+				"@codemirror/language": "^6.0.0",
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-liquid": {
+			"version": "6.2.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-liquid/-/lang-liquid-6.2.1.tgz",
+			"integrity": "sha512-J1Mratcm6JLNEiX+U2OlCDTysGuwbHD76XwuL5o5bo9soJtSbz2g6RU3vGHFyS5DC8rgVmFSzi7i6oBftm7tnA==",
+			"dependencies": {
+				"@codemirror/autocomplete": "^6.0.0",
+				"@codemirror/lang-html": "^6.0.0",
+				"@codemirror/language": "^6.0.0",
+				"@codemirror/state": "^6.0.0",
+				"@codemirror/view": "^6.0.0",
+				"@lezer/common": "^1.0.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.3.1"
+			}
+		},
+		"node_modules/@codemirror/lang-markdown": {
+			"version": "6.3.0",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-markdown/-/lang-markdown-6.3.0.tgz",
+			"integrity": "sha512-lYrI8SdL/vhd0w0aHIEvIRLRecLF7MiiRfzXFZY94dFwHqC9HtgxgagJ8fyYNBldijGatf9wkms60d8SrAj6Nw==",
+			"dependencies": {
+				"@codemirror/autocomplete": "^6.7.1",
+				"@codemirror/lang-html": "^6.0.0",
+				"@codemirror/language": "^6.3.0",
+				"@codemirror/state": "^6.0.0",
+				"@codemirror/view": "^6.0.0",
+				"@lezer/common": "^1.2.1",
+				"@lezer/markdown": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-php": {
+			"version": "6.0.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-php/-/lang-php-6.0.1.tgz",
+			"integrity": "sha512-ublojMdw/PNWa7qdN5TMsjmqkNuTBD3k6ndZ4Z0S25SBAiweFGyY68AS3xNcIOlb6DDFDvKlinLQ40vSLqf8xA==",
+			"dependencies": {
+				"@codemirror/lang-html": "^6.0.0",
+				"@codemirror/language": "^6.0.0",
+				"@codemirror/state": "^6.0.0",
+				"@lezer/common": "^1.0.0",
+				"@lezer/php": "^1.0.0"
+			}
+		},
 		"node_modules/@codemirror/lang-python": {
 			"version": "6.1.6",
 			"resolved": "https://registry.npmjs.org/@codemirror/lang-python/-/lang-python-6.1.6.tgz",
@@ -176,6 +311,90 @@
 				"@lezer/python": "^1.1.4"
 			}
 		},
+		"node_modules/@codemirror/lang-rust": {
+			"version": "6.0.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-rust/-/lang-rust-6.0.1.tgz",
+			"integrity": "sha512-344EMWFBzWArHWdZn/NcgkwMvZIWUR1GEBdwG8FEp++6o6vT6KL9V7vGs2ONsKxxFUPXKI0SPcWhyYyl2zPYxQ==",
+			"dependencies": {
+				"@codemirror/language": "^6.0.0",
+				"@lezer/rust": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-sass": {
+			"version": "6.0.2",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-sass/-/lang-sass-6.0.2.tgz",
+			"integrity": "sha512-l/bdzIABvnTo1nzdY6U+kPAC51czYQcOErfzQ9zSm9D8GmNPD0WTW8st/CJwBTPLO8jlrbyvlSEcN20dc4iL0Q==",
+			"dependencies": {
+				"@codemirror/lang-css": "^6.2.0",
+				"@codemirror/language": "^6.0.0",
+				"@codemirror/state": "^6.0.0",
+				"@lezer/common": "^1.0.2",
+				"@lezer/sass": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-sql": {
+			"version": "6.8.0",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-sql/-/lang-sql-6.8.0.tgz",
+			"integrity": "sha512-aGLmY4OwGqN3TdSx3h6QeA1NrvaYtF7kkoWR/+W7/JzB0gQtJ+VJxewlnE3+VImhA4WVlhmkJr109PefOOhjLg==",
+			"dependencies": {
+				"@codemirror/autocomplete": "^6.0.0",
+				"@codemirror/language": "^6.0.0",
+				"@codemirror/state": "^6.0.0",
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-vue": {
+			"version": "0.1.3",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-vue/-/lang-vue-0.1.3.tgz",
+			"integrity": "sha512-QSKdtYTDRhEHCfo5zOShzxCmqKJvgGrZwDQSdbvCRJ5pRLWBS7pD/8e/tH44aVQT6FKm0t6RVNoSUWHOI5vNug==",
+			"dependencies": {
+				"@codemirror/lang-html": "^6.0.0",
+				"@codemirror/lang-javascript": "^6.1.2",
+				"@codemirror/language": "^6.0.0",
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.3.1"
+			}
+		},
+		"node_modules/@codemirror/lang-wast": {
+			"version": "6.0.2",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-wast/-/lang-wast-6.0.2.tgz",
+			"integrity": "sha512-Imi2KTpVGm7TKuUkqyJ5NRmeFWF7aMpNiwHnLQe0x9kmrxElndyH0K6H/gXtWwY6UshMRAhpENsgfpSwsgmC6Q==",
+			"dependencies": {
+				"@codemirror/language": "^6.0.0",
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-xml": {
+			"version": "6.1.0",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-xml/-/lang-xml-6.1.0.tgz",
+			"integrity": "sha512-3z0blhicHLfwi2UgkZYRPioSgVTo9PV5GP5ducFH6FaHy0IAJRg+ixj5gTR1gnT/glAIC8xv4w2VL1LoZfs+Jg==",
+			"dependencies": {
+				"@codemirror/autocomplete": "^6.0.0",
+				"@codemirror/language": "^6.4.0",
+				"@codemirror/state": "^6.0.0",
+				"@codemirror/view": "^6.0.0",
+				"@lezer/common": "^1.0.0",
+				"@lezer/xml": "^1.0.0"
+			}
+		},
+		"node_modules/@codemirror/lang-yaml": {
+			"version": "6.1.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/lang-yaml/-/lang-yaml-6.1.1.tgz",
+			"integrity": "sha512-HV2NzbK9bbVnjWxwObuZh5FuPCowx51mEfoFT9y3y+M37fA3+pbxx4I7uePuygFzDsAmCTwQSc/kXh/flab4uw==",
+			"dependencies": {
+				"@codemirror/autocomplete": "^6.0.0",
+				"@codemirror/language": "^6.0.0",
+				"@codemirror/state": "^6.0.0",
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.2.0",
+				"@lezer/yaml": "^1.0.0"
+			}
+		},
 		"node_modules/@codemirror/language": {
 			"version": "6.10.2",
 			"resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.2.tgz",
@@ -189,6 +408,43 @@
 				"style-mod": "^4.0.0"
 			}
 		},
+		"node_modules/@codemirror/language-data": {
+			"version": "6.5.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/language-data/-/language-data-6.5.1.tgz",
+			"integrity": "sha512-0sWxeUSNlBr6OmkqybUTImADFUP0M3P0IiSde4nc24bz/6jIYzqYSgkOSLS+CBIoW1vU8Q9KUWXscBXeoMVC9w==",
+			"dependencies": {
+				"@codemirror/lang-angular": "^0.1.0",
+				"@codemirror/lang-cpp": "^6.0.0",
+				"@codemirror/lang-css": "^6.0.0",
+				"@codemirror/lang-go": "^6.0.0",
+				"@codemirror/lang-html": "^6.0.0",
+				"@codemirror/lang-java": "^6.0.0",
+				"@codemirror/lang-javascript": "^6.0.0",
+				"@codemirror/lang-json": "^6.0.0",
+				"@codemirror/lang-less": "^6.0.0",
+				"@codemirror/lang-liquid": "^6.0.0",
+				"@codemirror/lang-markdown": "^6.0.0",
+				"@codemirror/lang-php": "^6.0.0",
+				"@codemirror/lang-python": "^6.0.0",
+				"@codemirror/lang-rust": "^6.0.0",
+				"@codemirror/lang-sass": "^6.0.0",
+				"@codemirror/lang-sql": "^6.0.0",
+				"@codemirror/lang-vue": "^0.1.1",
+				"@codemirror/lang-wast": "^6.0.0",
+				"@codemirror/lang-xml": "^6.0.0",
+				"@codemirror/lang-yaml": "^6.0.0",
+				"@codemirror/language": "^6.0.0",
+				"@codemirror/legacy-modes": "^6.4.0"
+			}
+		},
+		"node_modules/@codemirror/legacy-modes": {
+			"version": "6.4.1",
+			"resolved": "https://registry.npmjs.org/@codemirror/legacy-modes/-/legacy-modes-6.4.1.tgz",
+			"integrity": "sha512-vdg3XY7OAs5uLDx2Iw+cGfnwtd7kM+Et/eMsqAGTfT/JKiVBQZXosTzjEbWAi/FrY6DcQIz8mQjBozFHZEUWQA==",
+			"dependencies": {
+				"@codemirror/language": "^6.0.0"
+			}
+		},
 		"node_modules/@codemirror/lint": {
 			"version": "6.8.0",
 			"resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.0.tgz",
@@ -246,9 +502,9 @@
 			}
 		},
 		"node_modules/@cypress/request": {
-			"version": "3.0.1",
-			"resolved": "https://registry.npmjs.org/@cypress/request/-/request-3.0.1.tgz",
-			"integrity": "sha512-TWivJlJi8ZDx2wGOw1dbLuHJKUYX7bWySw377nlnGOW3hP9/MUKIsEdXT/YngWxVdgNCHRBmFlBipE+5/2ZZlQ==",
+			"version": "3.0.5",
+			"resolved": "https://registry.npmjs.org/@cypress/request/-/request-3.0.5.tgz",
+			"integrity": "sha512-v+XHd9XmWbufxF1/bTaVm2yhbxY+TB4YtWRqF2zaXBlDNMkls34KiATz0AVDLavL3iB6bQk9/7n3oY1EoLSWGA==",
 			"dev": true,
 			"dependencies": {
 				"aws-sign2": "~0.7.0",
@@ -257,14 +513,14 @@
 				"combined-stream": "~1.0.6",
 				"extend": "~3.0.2",
 				"forever-agent": "~0.6.1",
-				"form-data": "~2.3.2",
-				"http-signature": "~1.3.6",
+				"form-data": "~4.0.0",
+				"http-signature": "~1.4.0",
 				"is-typedarray": "~1.0.0",
 				"isstream": "~0.1.2",
 				"json-stringify-safe": "~5.0.1",
 				"mime-types": "~2.1.19",
 				"performance-now": "^2.1.0",
-				"qs": "6.10.4",
+				"qs": "6.13.0",
 				"safe-buffer": "^5.1.2",
 				"tough-cookie": "^4.1.3",
 				"tunnel-agent": "^0.6.0",
@@ -949,6 +1205,36 @@
 			"resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz",
 			"integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ=="
 		},
+		"node_modules/@lezer/cpp": {
+			"version": "1.1.2",
+			"resolved": "https://registry.npmjs.org/@lezer/cpp/-/cpp-1.1.2.tgz",
+			"integrity": "sha512-macwKtyeUO0EW86r3xWQCzOV9/CF8imJLpJlPv3sDY57cPGeUZ8gXWOWNlJr52TVByMV3PayFQCA5SHEERDmVQ==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@lezer/css": {
+			"version": "1.1.9",
+			"resolved": "https://registry.npmjs.org/@lezer/css/-/css-1.1.9.tgz",
+			"integrity": "sha512-TYwgljcDv+YrV0MZFFvYFQHCfGgbPMR6nuqLabBdmZoFH3EP1gvw8t0vae326Ne3PszQkbXfVBjCnf3ZVCr0bA==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@lezer/go": {
+			"version": "1.0.0",
+			"resolved": "https://registry.npmjs.org/@lezer/go/-/go-1.0.0.tgz",
+			"integrity": "sha512-co9JfT3QqX1YkrMmourYw2Z8meGC50Ko4d54QEcQbEYpvdUvN4yb0NBZdn/9ertgvjsySxHsKzH3lbm3vqJ4Jw==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
 		"node_modules/@lezer/highlight": {
 			"version": "1.2.0",
 			"resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz",
@@ -957,6 +1243,26 @@
 				"@lezer/common": "^1.0.0"
 			}
 		},
+		"node_modules/@lezer/html": {
+			"version": "1.3.10",
+			"resolved": "https://registry.npmjs.org/@lezer/html/-/html-1.3.10.tgz",
+			"integrity": "sha512-dqpT8nISx/p9Do3AchvYGV3qYc4/rKr3IBZxlHmpIKam56P47RSHkSF5f13Vu9hebS1jM0HmtJIwLbWz1VIY6w==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@lezer/java": {
+			"version": "1.1.2",
+			"resolved": "https://registry.npmjs.org/@lezer/java/-/java-1.1.2.tgz",
+			"integrity": "sha512-3j8X70JvYf0BZt8iSRLXLkt0Ry1hVUgH6wT32yBxH/Xi55nW2VMhc1Az4SKwu4YGSmxCm1fsqDDcHTuFjC8pmg==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
 		"node_modules/@lezer/javascript": {
 			"version": "1.4.16",
 			"resolved": "https://registry.npmjs.org/@lezer/javascript/-/javascript-1.4.16.tgz",
@@ -967,6 +1273,16 @@
 				"@lezer/lr": "^1.3.0"
 			}
 		},
+		"node_modules/@lezer/json": {
+			"version": "1.0.2",
+			"resolved": "https://registry.npmjs.org/@lezer/json/-/json-1.0.2.tgz",
+			"integrity": "sha512-xHT2P4S5eeCYECyKNPhr4cbEL9tc8w83SPwRC373o9uEdrvGKTZoJVAGxpOsZckMlEh9W23Pc72ew918RWQOBQ==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
 		"node_modules/@lezer/lr": {
 			"version": "1.4.1",
 			"resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.1.tgz",
@@ -975,6 +1291,25 @@
 				"@lezer/common": "^1.0.0"
 			}
 		},
+		"node_modules/@lezer/markdown": {
+			"version": "1.3.1",
+			"resolved": "https://registry.npmjs.org/@lezer/markdown/-/markdown-1.3.1.tgz",
+			"integrity": "sha512-DGlzU/i8DC8k0uz1F+jeePrkATl0jWakauTzftMQOcbaMkHbNSRki/4E2tOzJWsVpoKYhe7iTJ03aepdwVUXUA==",
+			"dependencies": {
+				"@lezer/common": "^1.0.0",
+				"@lezer/highlight": "^1.0.0"
+			}
+		},
+		"node_modules/@lezer/php": {
+			"version": "1.0.2",
+			"resolved": "https://registry.npmjs.org/@lezer/php/-/php-1.0.2.tgz",
+			"integrity": "sha512-GN7BnqtGRpFyeoKSEqxvGvhJQiI4zkgmYnDk/JIyc7H7Ifc1tkPnUn/R2R8meH3h/aBf5rzjvU8ZQoyiNDtDrA==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.1.0"
+			}
+		},
 		"node_modules/@lezer/python": {
 			"version": "1.1.14",
 			"resolved": "https://registry.npmjs.org/@lezer/python/-/python-1.1.14.tgz",
@@ -985,6 +1320,46 @@
 				"@lezer/lr": "^1.0.0"
 			}
 		},
+		"node_modules/@lezer/rust": {
+			"version": "1.0.2",
+			"resolved": "https://registry.npmjs.org/@lezer/rust/-/rust-1.0.2.tgz",
+			"integrity": "sha512-Lz5sIPBdF2FUXcWeCu1//ojFAZqzTQNRga0aYv6dYXqJqPfMdCAI0NzajWUd4Xijj1IKJLtjoXRPMvTKWBcqKg==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@lezer/sass": {
+			"version": "1.0.7",
+			"resolved": "https://registry.npmjs.org/@lezer/sass/-/sass-1.0.7.tgz",
+			"integrity": "sha512-8HLlOkuX/SMHOggI2DAsXUw38TuURe+3eQ5hiuk9QmYOUyC55B1dYEIMkav5A4IELVaW4e1T4P9WRiI5ka4mdw==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@lezer/xml": {
+			"version": "1.0.5",
+			"resolved": "https://registry.npmjs.org/@lezer/xml/-/xml-1.0.5.tgz",
+			"integrity": "sha512-VFouqOzmUWfIg+tfmpcdV33ewtK+NSwd4ngSe1aG7HFb4BN0ExyY1b8msp+ndFrnlG4V4iC8yXacjFtrwERnaw==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.0.0"
+			}
+		},
+		"node_modules/@lezer/yaml": {
+			"version": "1.0.3",
+			"resolved": "https://registry.npmjs.org/@lezer/yaml/-/yaml-1.0.3.tgz",
+			"integrity": "sha512-GuBLekbw9jDBDhGur82nuwkxKQ+a3W5H0GfaAthDXcAu+XdpS43VlnxA9E9hllkpSP5ellRDKjLLj7Lu9Wr6xA==",
+			"dependencies": {
+				"@lezer/common": "^1.2.0",
+				"@lezer/highlight": "^1.0.0",
+				"@lezer/lr": "^1.4.0"
+			}
+		},
 		"node_modules/@melt-ui/svelte": {
 			"version": "0.76.0",
 			"resolved": "https://registry.npmjs.org/@melt-ui/svelte/-/svelte-0.76.0.tgz",
@@ -1413,14 +1788,14 @@
 			}
 		},
 		"node_modules/@sveltejs/kit": {
-			"version": "2.5.20",
-			"resolved": "https://registry.npmjs.org/@sveltejs/kit/-/kit-2.5.20.tgz",
-			"integrity": "sha512-47rJ5BoYwURE/Rp7FNMLp3NzdbWC9DQ/PmKd0mebxT2D/PrPxZxcLImcD3zsWdX2iS6oJk8ITJbO/N2lWnnUqA==",
+			"version": "2.6.2",
+			"resolved": "https://registry.npmjs.org/@sveltejs/kit/-/kit-2.6.2.tgz",
+			"integrity": "sha512-ruogrSPXjckn5poUiZU8VYNCSPHq66SFR1AATvOikQxtP6LNI4niAZVX/AWZRe/EPDG3oY2DNJ9c5z7u0t2NAQ==",
 			"hasInstallScript": true,
 			"dependencies": {
 				"@types/cookie": "^0.6.0",
-				"cookie": "^0.6.0",
-				"devalue": "^5.0.0",
+				"cookie": "^0.7.0",
+				"devalue": "^5.1.0",
 				"esm-env": "^1.0.0",
 				"import-meta-resolve": "^4.1.0",
 				"kleur": "^4.1.5",
@@ -1438,7 +1813,7 @@
 				"node": ">=18.13"
 			},
 			"peerDependencies": {
-				"@sveltejs/vite-plugin-svelte": "^3.0.0",
+				"@sveltejs/vite-plugin-svelte": "^3.0.0 || ^4.0.0-next.1",
 				"svelte": "^4.0.0 || ^5.0.0-next.0",
 				"vite": "^5.0.3"
 			}
@@ -2061,6 +2436,14 @@
 				"url": "https://github.com/sponsors/epoberezkin"
 			}
 		},
+		"node_modules/amator": {
+			"version": "1.1.0",
+			"resolved": "https://registry.npmjs.org/amator/-/amator-1.1.0.tgz",
+			"integrity": "sha512-V5+aH8pe+Z3u/UG3L3pG3BaFQGXAyXHVQDroRwjPHdh08bcUEchAVsU1MCuJSCaU5o60wTK6KaE6te5memzgYw==",
+			"dependencies": {
+				"bezier-easing": "^2.0.3"
+			}
+		},
 		"node_modules/ansi-colors": {
 			"version": "4.1.3",
 			"resolved": "https://registry.npmjs.org/ansi-colors/-/ansi-colors-4.1.3.tgz",
@@ -2292,9 +2675,9 @@
 			}
 		},
 		"node_modules/aws4": {
-			"version": "1.12.0",
-			"resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz",
-			"integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==",
+			"version": "1.13.2",
+			"resolved": "https://registry.npmjs.org/aws4/-/aws4-1.13.2.tgz",
+			"integrity": "sha512-lHe62zvbTB5eEABUVi/AwVh0ZKY9rMMDhmm+eeyuuUQbQ3+J+fONVQOZyj+DdrvD4BY33uYniyRJ4UJIaSKAfw==",
 			"dev": true
 		},
 		"node_modules/axobject-query": {
@@ -2351,6 +2734,11 @@
 				"tweetnacl": "^0.14.3"
 			}
 		},
+		"node_modules/bezier-easing": {
+			"version": "2.1.0",
+			"resolved": "https://registry.npmjs.org/bezier-easing/-/bezier-easing-2.1.0.tgz",
+			"integrity": "sha512-gbIqZ/eslnUFC1tjEvtz0sgx+xTK20wDnYMIA27VA04R7w6xxXQPZDbibjA9DTWZRA2CXtwHykkVzlCaAJAZig=="
+		},
 		"node_modules/binary-extensions": {
 			"version": "2.3.0",
 			"resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz",
@@ -3083,9 +3471,9 @@
 			"dev": true
 		},
 		"node_modules/cookie": {
-			"version": "0.6.0",
-			"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz",
-			"integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==",
+			"version": "0.7.1",
+			"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz",
+			"integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
 			"engines": {
 				"node": ">= 0.6"
 			}
@@ -3187,13 +3575,13 @@
 			}
 		},
 		"node_modules/cypress": {
-			"version": "13.8.1",
-			"resolved": "https://registry.npmjs.org/cypress/-/cypress-13.8.1.tgz",
-			"integrity": "sha512-Uk6ovhRbTg6FmXjeZW/TkbRM07KPtvM5gah1BIMp4Y2s+i/NMxgaLw0+PbYTOdw1+egE0FP3mWRiGcRkjjmhzA==",
+			"version": "13.15.0",
+			"resolved": "https://registry.npmjs.org/cypress/-/cypress-13.15.0.tgz",
+			"integrity": "sha512-53aO7PwOfi604qzOkCSzNlWquCynLlKE/rmmpSPcziRH6LNfaDUAklQT6WJIsD8ywxlIy+uVZsnTMCCQVd2kTw==",
 			"dev": true,
 			"hasInstallScript": true,
 			"dependencies": {
-				"@cypress/request": "^3.0.0",
+				"@cypress/request": "^3.0.4",
 				"@cypress/xvfb": "^1.2.4",
 				"@types/sinonjs__fake-timers": "8.1.1",
 				"@types/sizzle": "^2.3.2",
@@ -3232,7 +3620,7 @@
 				"request-progress": "^3.0.0",
 				"semver": "^7.5.3",
 				"supports-color": "^8.1.1",
-				"tmp": "~0.2.1",
+				"tmp": "~0.2.3",
 				"untildify": "^4.0.0",
 				"yauzl": "^2.10.0"
 			},
@@ -3870,9 +4258,9 @@
 			}
 		},
 		"node_modules/devalue": {
-			"version": "5.0.0",
-			"resolved": "https://registry.npmjs.org/devalue/-/devalue-5.0.0.tgz",
-			"integrity": "sha512-gO+/OMXF7488D+u3ue+G7Y4AA3ZmUnB3eHJXmBTgNHvr4ZNzl36A0ZtG+XCRNYCkYx/bFmw4qtkoFLa+wSrwAA=="
+			"version": "5.1.1",
+			"resolved": "https://registry.npmjs.org/devalue/-/devalue-5.1.1.tgz",
+			"integrity": "sha512-maua5KUiapvEwiEAe+XnlZ3Rh0GD+qI1J/nb9vrJc3muPXvcF/8gXYTWF76+5DAqHyDUtOIImEuo0YKE9mshVw=="
 		},
 		"node_modules/didyoumean": {
 			"version": "1.2.2",
@@ -4715,17 +5103,17 @@
 			}
 		},
 		"node_modules/form-data": {
-			"version": "2.3.3",
-			"resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
-			"integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==",
+			"version": "4.0.0",
+			"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
+			"integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
 			"dev": true,
 			"dependencies": {
 				"asynckit": "^0.4.0",
-				"combined-stream": "^1.0.6",
+				"combined-stream": "^1.0.8",
 				"mime-types": "^2.1.12"
 			},
 			"engines": {
-				"node": ">= 0.12"
+				"node": ">= 6"
 			}
 		},
 		"node_modules/fraction.js": {
@@ -5211,14 +5599,14 @@
 			}
 		},
 		"node_modules/http-signature": {
-			"version": "1.3.6",
-			"resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.3.6.tgz",
-			"integrity": "sha512-3adrsD6zqo4GsTqtO7FyrejHNv+NgiIfAfv68+jVlFmSr9OGy7zrxONceFRLKvnnZA5jbxQBX1u9PpB6Wi32Gw==",
+			"version": "1.4.0",
+			"resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.4.0.tgz",
+			"integrity": "sha512-G5akfn7eKbpDN+8nPS/cb57YeA1jLTVxjpCj7tmm3QKPdyDy7T+qSC40e9ptydSWvkwjSXw1VbkpyEm39ukeAg==",
 			"dev": true,
 			"dependencies": {
 				"assert-plus": "^1.0.0",
 				"jsprim": "^2.0.2",
-				"sshpk": "^1.14.1"
+				"sshpk": "^1.18.0"
 			},
 			"engines": {
 				"node": ">=0.10"
@@ -6804,6 +7192,11 @@
 			"integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==",
 			"dev": true
 		},
+		"node_modules/ngraph.events": {
+			"version": "1.2.2",
+			"resolved": "https://registry.npmjs.org/ngraph.events/-/ngraph.events-1.2.2.tgz",
+			"integrity": "sha512-JsUbEOzANskax+WSYiAPETemLWYXmixuPAlmZmhIbIj6FH/WDgEGCGnRwUQBK0GjOnVm8Ui+e5IJ+5VZ4e32eQ=="
+		},
 		"node_modules/node-releases": {
 			"version": "2.0.14",
 			"resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.14.tgz",
@@ -6888,10 +7281,13 @@
 			}
 		},
 		"node_modules/object-inspect": {
-			"version": "1.13.1",
-			"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz",
-			"integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==",
+			"version": "1.13.2",
+			"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz",
+			"integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==",
 			"dev": true,
+			"engines": {
+				"node": ">= 0.4"
+			},
 			"funding": {
 				"url": "https://github.com/sponsors/ljharb"
 			}
@@ -6998,6 +7394,16 @@
 				"svelte": "^4.0.0 || ^5.0.0-next.1"
 			}
 		},
+		"node_modules/panzoom": {
+			"version": "9.4.3",
+			"resolved": "https://registry.npmjs.org/panzoom/-/panzoom-9.4.3.tgz",
+			"integrity": "sha512-xaxCpElcRbQsUtIdwlrZA90P90+BHip4Vda2BC8MEb4tkI05PmR6cKECdqUCZ85ZvBHjpI9htJrZBxV5Gp/q/w==",
+			"dependencies": {
+				"amator": "^1.1.0",
+				"ngraph.events": "^1.2.2",
+				"wheel": "^1.0.0"
+			}
+		},
 		"node_modules/parent-module": {
 			"version": "1.0.1",
 			"resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
@@ -7583,12 +7989,12 @@
 			}
 		},
 		"node_modules/qs": {
-			"version": "6.10.4",
-			"resolved": "https://registry.npmjs.org/qs/-/qs-6.10.4.tgz",
-			"integrity": "sha512-OQiU+C+Ds5qiH91qh/mg0w+8nwQuLjM4F4M/PbmhDOoYehPh+Fb0bDjtR1sOvy7YKxvj28Y/M0PhP5uVX0kB+g==",
+			"version": "6.13.0",
+			"resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
+			"integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
 			"dev": true,
 			"dependencies": {
-				"side-channel": "^1.0.4"
+				"side-channel": "^1.0.6"
 			},
 			"engines": {
 				"node": ">=0.6"
@@ -9025,9 +9431,9 @@
 			}
 		},
 		"node_modules/tough-cookie": {
-			"version": "4.1.3",
-			"resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.3.tgz",
-			"integrity": "sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==",
+			"version": "4.1.4",
+			"resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz",
+			"integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==",
 			"dev": true,
 			"dependencies": {
 				"psl": "^1.1.33",
@@ -10104,6 +10510,11 @@
 			"resolved": "https://registry.npmjs.org/web-worker/-/web-worker-1.3.0.tgz",
 			"integrity": "sha512-BSR9wyRsy/KOValMgd5kMyr3JzpdeoR9KVId8u5GVlTTAtNChlsE4yTxeY7zMdNSyOmoKBv8NH2qeRY9Tg+IaA=="
 		},
+		"node_modules/wheel": {
+			"version": "1.0.0",
+			"resolved": "https://registry.npmjs.org/wheel/-/wheel-1.0.0.tgz",
+			"integrity": "sha512-XiCMHibOiqalCQ+BaNSwRoZ9FDTAvOsXxGHXChBugewDj7HC8VBIER71dEOiRH1fSdLbRCQzngKTSiZ06ZQzeA=="
+		},
 		"node_modules/which": {
 			"version": "2.0.2",
 			"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",

+ 5 - 3
package.json

@@ -1,6 +1,6 @@
 {
 	"name": "open-webui",
-	"version": "0.3.30",
+	"version": "0.3.31",
 	"private": true,
 	"scripts": {
 		"dev": "npm run pyodide:fetch && vite dev --host",
@@ -28,7 +28,7 @@
 		"@typescript-eslint/eslint-plugin": "^6.17.0",
 		"@typescript-eslint/parser": "^6.17.0",
 		"autoprefixer": "^10.4.16",
-		"cypress": "^13.8.1",
+		"cypress": "^13.15.0",
 		"eslint": "^8.56.0",
 		"eslint-config-prettier": "^9.1.0",
 		"eslint-plugin-cypress": "^3.4.0",
@@ -50,6 +50,7 @@
 	"dependencies": {
 		"@codemirror/lang-javascript": "^6.2.2",
 		"@codemirror/lang-python": "^6.1.6",
+		"@codemirror/language-data": "^6.5.1",
 		"@codemirror/theme-one-dark": "^6.1.2",
 		"@pyscript/core": "^0.4.32",
 		"@sveltejs/adapter-node": "^2.0.0",
@@ -73,6 +74,7 @@
 		"marked": "^9.1.0",
 		"mermaid": "^10.9.1",
 		"paneforge": "^0.0.6",
+		"panzoom": "^9.4.3",
 		"pyodide": "^0.26.1",
 		"socket.io-client": "^4.2.0",
 		"sortablejs": "^1.15.2",
@@ -82,7 +84,7 @@
 		"uuid": "^9.0.1"
 	},
 	"engines": {
-		"node": ">=18.13.0 <=21.x.x",
+		"node": ">=18.13.0 <=22.x.x",
 		"npm": ">=6.0.0"
 	}
 }

+ 4 - 2
pyproject.toml

@@ -46,13 +46,15 @@ dependencies = [
     "langchain-chroma==0.1.2",
 
     "fake-useragent==1.5.1",
-    "chromadb==0.5.5",
-    "pymilvus==2.4.6",
+    "chromadb==0.5.9",
+    "pymilvus==2.4.7",
 
     "sentence-transformers==3.0.1",
     "colbert-ai==0.2.21",
     "einops==0.8.0",
     
+
+    "ftfy==6.2.3",
     "pypdf==4.3.1",
     "docx2txt==0.8",
     "python-pptx==1.0.0",

+ 6 - 0
src/app.css

@@ -62,6 +62,12 @@ li p {
 	border-width: 1px;
 }
 
+/* Dark theme scrollbar styles */
+.dark ::-webkit-scrollbar-thumb {
+	background-color: rgba(69, 69, 74, 0.8); /* Darker color for dark theme */
+	border-color: rgba(0, 0, 0, var(--tw-border-opacity));
+}
+
 ::-webkit-scrollbar {
 	height: 0.4rem;
 	width: 0.4rem;

+ 1 - 0
src/app.html

@@ -3,6 +3,7 @@
 	<head>
 		<meta charset="utf-8" />
 		<link rel="icon" href="%sveltekit.assets%/favicon.png" />
+		<link rel="apple-touch-icon" href="%sveltekit.assets%/favicon.png" />
 		<link rel="manifest" href="%sveltekit.assets%/manifest.json" crossorigin="use-credentials" />
 		<meta
 			name="viewport"

+ 60 - 0
src/lib/apis/files/index.ts

@@ -30,6 +30,32 @@ export const uploadFile = async (token: string, file: File) => {
 	return res;
 };
 
+export const uploadDir = async (token: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/files/upload/dir`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			authorization: `Bearer ${token}`
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			error = err.detail;
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
 export const getFiles = async (token: string = '') => {
 	let error = null;
 
@@ -92,6 +118,40 @@ export const getFileById = async (token: string, id: string) => {
 	return res;
 };
 
+export const updateFileDataContentById = async (token: string, id: string, content: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/files/${id}/data/content/update`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			content: content
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
 export const getFileContentById = async (id: string) => {
 	let error = null;
 

+ 308 - 0
src/lib/apis/knowledge/index.ts

@@ -0,0 +1,308 @@
+import { WEBUI_API_BASE_URL } from '$lib/constants';
+
+export const createNewKnowledge = async (token: string, name: string, description: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/create`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			name: name,
+			description: description
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.catch((err) => {
+			error = err.detail;
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+export const getKnowledgeItems = async (token: string = '') => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/`, {
+		method: 'GET',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+export const getKnowledgeById = async (token: string, id: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/${id}`, {
+		method: 'GET',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+type KnowledgeUpdateForm = {
+	name?: string;
+	description?: string;
+	data?: object;
+};
+
+export const updateKnowledgeById = async (token: string, id: string, form: KnowledgeUpdateForm) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/${id}/update`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			name: form?.name ? form.name : undefined,
+			description: form?.description ? form.description : undefined,
+			data: form?.data ? form.data : undefined
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+export const addFileToKnowledgeById = async (token: string, id: string, fileId: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/${id}/file/add`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			file_id: fileId
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+export const updateFileFromKnowledgeById = async (token: string, id: string, fileId: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/${id}/file/update`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			file_id: fileId
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+export const removeFileFromKnowledgeById = async (token: string, id: string, fileId: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/${id}/file/remove`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			file_id: fileId
+		})
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+export const resetKnowledgeById = async (token: string, id: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/${id}/reset`, {
+		method: 'POST',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};
+
+export const deleteKnowledgeById = async (token: string, id: string) => {
+	let error = null;
+
+	const res = await fetch(`${WEBUI_API_BASE_URL}/knowledge/${id}/delete`, {
+		method: 'DELETE',
+		headers: {
+			Accept: 'application/json',
+			'Content-Type': 'application/json',
+			authorization: `Bearer ${token}`
+		}
+	})
+		.then(async (res) => {
+			if (!res.ok) throw await res.json();
+			return res.json();
+		})
+		.then((json) => {
+			return json;
+		})
+		.catch((err) => {
+			error = err.detail;
+
+			console.log(err);
+			return null;
+		});
+
+	if (error) {
+		throw error;
+	}
+
+	return res;
+};

+ 121 - 174
src/lib/apis/rag/index.ts → src/lib/apis/retrieval/index.ts

@@ -1,9 +1,9 @@
-import { RAG_API_BASE_URL } from '$lib/constants';
+import { RETRIEVAL_API_BASE_URL } from '$lib/constants';
 
 export const getRAGConfig = async (token: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/config`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/config`, {
 		method: 'GET',
 		headers: {
 			'Content-Type': 'application/json',
@@ -53,7 +53,7 @@ type RAGConfigForm = {
 export const updateRAGConfig = async (token: string, payload: RAGConfigForm) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/config/update`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/config/update`, {
 		method: 'POST',
 		headers: {
 			'Content-Type': 'application/json',
@@ -83,7 +83,7 @@ export const updateRAGConfig = async (token: string, payload: RAGConfigForm) =>
 export const getRAGTemplate = async (token: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/template`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/template`, {
 		method: 'GET',
 		headers: {
 			'Content-Type': 'application/json',
@@ -110,7 +110,7 @@ export const getRAGTemplate = async (token: string) => {
 export const getQuerySettings = async (token: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/query/settings`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/query/settings`, {
 		method: 'GET',
 		headers: {
 			'Content-Type': 'application/json',
@@ -143,7 +143,7 @@ type QuerySettings = {
 export const updateQuerySettings = async (token: string, settings: QuerySettings) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/query/settings/update`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/query/settings/update`, {
 		method: 'POST',
 		headers: {
 			'Content-Type': 'application/json',
@@ -170,27 +170,23 @@ export const updateQuerySettings = async (token: string, settings: QuerySettings
 	return res;
 };
 
-export const processDocToVectorDB = async (token: string, file_id: string) => {
+export const getEmbeddingConfig = async (token: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/process/doc`, {
-		method: 'POST',
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/embedding`, {
+		method: 'GET',
 		headers: {
-			Accept: 'application/json',
 			'Content-Type': 'application/json',
-			authorization: `Bearer ${token}`
-		},
-		body: JSON.stringify({
-			file_id: file_id
-		})
+			Authorization: `Bearer ${token}`
+		}
 	})
 		.then(async (res) => {
 			if (!res.ok) throw await res.json();
 			return res.json();
 		})
 		.catch((err) => {
-			error = err.detail;
 			console.log(err);
+			error = err.detail;
 			return null;
 		});
 
@@ -201,51 +197,29 @@ export const processDocToVectorDB = async (token: string, file_id: string) => {
 	return res;
 };
 
-export const uploadDocToVectorDB = async (token: string, collection_name: string, file: File) => {
-	const data = new FormData();
-	data.append('file', file);
-	data.append('collection_name', collection_name);
-
-	let error = null;
-
-	const res = await fetch(`${RAG_API_BASE_URL}/doc`, {
-		method: 'POST',
-		headers: {
-			Accept: 'application/json',
-			authorization: `Bearer ${token}`
-		},
-		body: data
-	})
-		.then(async (res) => {
-			if (!res.ok) throw await res.json();
-			return res.json();
-		})
-		.catch((err) => {
-			error = err.detail;
-			console.log(err);
-			return null;
-		});
-
-	if (error) {
-		throw error;
-	}
+type OpenAIConfigForm = {
+	key: string;
+	url: string;
+	batch_size: number;
+};
 
-	return res;
+type EmbeddingModelUpdateForm = {
+	openai_config?: OpenAIConfigForm;
+	embedding_engine: string;
+	embedding_model: string;
 };
 
-export const uploadWebToVectorDB = async (token: string, collection_name: string, url: string) => {
+export const updateEmbeddingConfig = async (token: string, payload: EmbeddingModelUpdateForm) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/web`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/embedding/update`, {
 		method: 'POST',
 		headers: {
-			Accept: 'application/json',
 			'Content-Type': 'application/json',
-			authorization: `Bearer ${token}`
+			Authorization: `Bearer ${token}`
 		},
 		body: JSON.stringify({
-			url: url,
-			collection_name: collection_name
+			...payload
 		})
 	})
 		.then(async (res) => {
@@ -253,8 +227,8 @@ export const uploadWebToVectorDB = async (token: string, collection_name: string
 			return res.json();
 		})
 		.catch((err) => {
-			error = err.detail;
 			console.log(err);
+			error = err.detail;
 			return null;
 		});
 
@@ -265,27 +239,23 @@ export const uploadWebToVectorDB = async (token: string, collection_name: string
 	return res;
 };
 
-export const uploadYoutubeTranscriptionToVectorDB = async (token: string, url: string) => {
+export const getRerankingConfig = async (token: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/youtube`, {
-		method: 'POST',
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/reranking`, {
+		method: 'GET',
 		headers: {
-			Accept: 'application/json',
 			'Content-Type': 'application/json',
-			authorization: `Bearer ${token}`
-		},
-		body: JSON.stringify({
-			url: url
-		})
+			Authorization: `Bearer ${token}`
+		}
 	})
 		.then(async (res) => {
 			if (!res.ok) throw await res.json();
 			return res.json();
 		})
 		.catch((err) => {
-			error = err.detail;
 			console.log(err);
+			error = err.detail;
 			return null;
 		});
 
@@ -296,25 +266,21 @@ export const uploadYoutubeTranscriptionToVectorDB = async (token: string, url: s
 	return res;
 };
 
-export const queryDoc = async (
-	token: string,
-	collection_name: string,
-	query: string,
-	k: number | null = null
-) => {
+type RerankingModelUpdateForm = {
+	reranking_model: string;
+};
+
+export const updateRerankingConfig = async (token: string, payload: RerankingModelUpdateForm) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/query/doc`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/reranking/update`, {
 		method: 'POST',
 		headers: {
-			Accept: 'application/json',
 			'Content-Type': 'application/json',
-			authorization: `Bearer ${token}`
+			Authorization: `Bearer ${token}`
 		},
 		body: JSON.stringify({
-			collection_name: collection_name,
-			query: query,
-			k: k
+			...payload
 		})
 	})
 		.then(async (res) => {
@@ -322,6 +288,7 @@ export const queryDoc = async (
 			return res.json();
 		})
 		.catch((err) => {
+			console.log(err);
 			error = err.detail;
 			return null;
 		});
@@ -333,15 +300,20 @@ export const queryDoc = async (
 	return res;
 };
 
-export const queryCollection = async (
+export interface SearchDocument {
+	status: boolean;
+	collection_name: string;
+	filenames: string[];
+}
+
+export const processFile = async (
 	token: string,
-	collection_names: string,
-	query: string,
-	k: number | null = null
+	file_id: string,
+	collection_name: string | null = null
 ) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/query/collection`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/process/file`, {
 		method: 'POST',
 		headers: {
 			Accept: 'application/json',
@@ -349,9 +321,8 @@ export const queryCollection = async (
 			authorization: `Bearer ${token}`
 		},
 		body: JSON.stringify({
-			collection_names: collection_names,
-			query: query,
-			k: k
+			file_id: file_id,
+			collection_name: collection_name ? collection_name : undefined
 		})
 	})
 		.then(async (res) => {
@@ -360,6 +331,7 @@ export const queryCollection = async (
 		})
 		.catch((err) => {
 			error = err.detail;
+			console.log(err);
 			return null;
 		});
 
@@ -370,41 +342,19 @@ export const queryCollection = async (
 	return res;
 };
 
-export const scanDocs = async (token: string) => {
-	let error = null;
-
-	const res = await fetch(`${RAG_API_BASE_URL}/scan`, {
-		method: 'GET',
-		headers: {
-			Accept: 'application/json',
-			authorization: `Bearer ${token}`
-		}
-	})
-		.then(async (res) => {
-			if (!res.ok) throw await res.json();
-			return res.json();
-		})
-		.catch((err) => {
-			error = err.detail;
-			return null;
-		});
-
-	if (error) {
-		throw error;
-	}
-
-	return res;
-};
-
-export const resetUploadDir = async (token: string) => {
+export const processYoutubeVideo = async (token: string, url: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/reset/uploads`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/process/youtube`, {
 		method: 'POST',
 		headers: {
 			Accept: 'application/json',
+			'Content-Type': 'application/json',
 			authorization: `Bearer ${token}`
-		}
+		},
+		body: JSON.stringify({
+			url: url
+		})
 	})
 		.then(async (res) => {
 			if (!res.ok) throw await res.json();
@@ -412,6 +362,7 @@ export const resetUploadDir = async (token: string) => {
 		})
 		.catch((err) => {
 			error = err.detail;
+			console.log(err);
 			return null;
 		});
 
@@ -422,15 +373,20 @@ export const resetUploadDir = async (token: string) => {
 	return res;
 };
 
-export const resetVectorDB = async (token: string) => {
+export const processWeb = async (token: string, collection_name: string, url: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/reset/db`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/process/web`, {
 		method: 'POST',
 		headers: {
 			Accept: 'application/json',
+			'Content-Type': 'application/json',
 			authorization: `Bearer ${token}`
-		}
+		},
+		body: JSON.stringify({
+			url: url,
+			collection_name: collection_name
+		})
 	})
 		.then(async (res) => {
 			if (!res.ok) throw await res.json();
@@ -438,6 +394,7 @@ export const resetVectorDB = async (token: string) => {
 		})
 		.catch((err) => {
 			error = err.detail;
+			console.log(err);
 			return null;
 		});
 
@@ -448,15 +405,23 @@ export const resetVectorDB = async (token: string) => {
 	return res;
 };
 
-export const getEmbeddingConfig = async (token: string) => {
+export const processWebSearch = async (
+	token: string,
+	query: string,
+	collection_name?: string
+): Promise<SearchDocument | null> => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/embedding`, {
-		method: 'GET',
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/process/web/search`, {
+		method: 'POST',
 		headers: {
 			'Content-Type': 'application/json',
 			Authorization: `Bearer ${token}`
-		}
+		},
+		body: JSON.stringify({
+			query,
+			collection_name: collection_name ?? ''
+		})
 	})
 		.then(async (res) => {
 			if (!res.ok) throw await res.json();
@@ -475,29 +440,25 @@ export const getEmbeddingConfig = async (token: string) => {
 	return res;
 };
 
-type OpenAIConfigForm = {
-	key: string;
-	url: string;
-	batch_size: number;
-};
-
-type EmbeddingModelUpdateForm = {
-	openai_config?: OpenAIConfigForm;
-	embedding_engine: string;
-	embedding_model: string;
-};
-
-export const updateEmbeddingConfig = async (token: string, payload: EmbeddingModelUpdateForm) => {
+export const queryDoc = async (
+	token: string,
+	collection_name: string,
+	query: string,
+	k: number | null = null
+) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/embedding/update`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/query/doc`, {
 		method: 'POST',
 		headers: {
+			Accept: 'application/json',
 			'Content-Type': 'application/json',
-			Authorization: `Bearer ${token}`
+			authorization: `Bearer ${token}`
 		},
 		body: JSON.stringify({
-			...payload
+			collection_name: collection_name,
+			query: query,
+			k: k
 		})
 	})
 		.then(async (res) => {
@@ -505,7 +466,6 @@ export const updateEmbeddingConfig = async (token: string, payload: EmbeddingMod
 			return res.json();
 		})
 		.catch((err) => {
-			console.log(err);
 			error = err.detail;
 			return null;
 		});
@@ -517,22 +477,32 @@ export const updateEmbeddingConfig = async (token: string, payload: EmbeddingMod
 	return res;
 };
 
-export const getRerankingConfig = async (token: string) => {
+export const queryCollection = async (
+	token: string,
+	collection_names: string,
+	query: string,
+	k: number | null = null
+) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/reranking`, {
-		method: 'GET',
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/query/collection`, {
+		method: 'POST',
 		headers: {
+			Accept: 'application/json',
 			'Content-Type': 'application/json',
-			Authorization: `Bearer ${token}`
-		}
+			authorization: `Bearer ${token}`
+		},
+		body: JSON.stringify({
+			collection_names: collection_names,
+			query: query,
+			k: k
+		})
 	})
 		.then(async (res) => {
 			if (!res.ok) throw await res.json();
 			return res.json();
 		})
 		.catch((err) => {
-			console.log(err);
 			error = err.detail;
 			return null;
 		});
@@ -544,29 +514,21 @@ export const getRerankingConfig = async (token: string) => {
 	return res;
 };
 
-type RerankingModelUpdateForm = {
-	reranking_model: string;
-};
-
-export const updateRerankingConfig = async (token: string, payload: RerankingModelUpdateForm) => {
+export const resetUploadDir = async (token: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/reranking/update`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/reset/uploads`, {
 		method: 'POST',
 		headers: {
-			'Content-Type': 'application/json',
-			Authorization: `Bearer ${token}`
-		},
-		body: JSON.stringify({
-			...payload
-		})
+			Accept: 'application/json',
+			authorization: `Bearer ${token}`
+		}
 	})
 		.then(async (res) => {
 			if (!res.ok) throw await res.json();
 			return res.json();
 		})
 		.catch((err) => {
-			console.log(err);
 			error = err.detail;
 			return null;
 		});
@@ -578,30 +540,21 @@ export const updateRerankingConfig = async (token: string, payload: RerankingMod
 	return res;
 };
 
-export const runWebSearch = async (
-	token: string,
-	query: string,
-	collection_name?: string
-): Promise<SearchDocument | null> => {
+export const resetVectorDB = async (token: string) => {
 	let error = null;
 
-	const res = await fetch(`${RAG_API_BASE_URL}/web/search`, {
+	const res = await fetch(`${RETRIEVAL_API_BASE_URL}/reset/db`, {
 		method: 'POST',
 		headers: {
-			'Content-Type': 'application/json',
-			Authorization: `Bearer ${token}`
-		},
-		body: JSON.stringify({
-			query,
-			collection_name: collection_name ?? ''
-		})
+			Accept: 'application/json',
+			authorization: `Bearer ${token}`
+		}
 	})
 		.then(async (res) => {
 			if (!res.ok) throw await res.json();
 			return res.json();
 		})
 		.catch((err) => {
-			console.log(err);
 			error = err.detail;
 			return null;
 		});
@@ -612,9 +565,3 @@ export const runWebSearch = async (
 
 	return res;
 };
-
-export interface SearchDocument {
-	status: boolean;
-	collection_name: string;
-	filenames: string[];
-}

+ 2 - 0
src/lib/apis/streaming/index.ts

@@ -18,6 +18,8 @@ type ResponseUsage = {
 	completion_tokens: number;
 	/** Sum of the above two fields */
 	total_tokens: number;
+	/** Any other fields that aren't part of the base OpenAI spec */
+	[other: string]: unknown;
 };
 
 // createOpenAITextStream takes a responseBody with a SSE response,

+ 9 - 1
src/lib/components/AddFilesPlaceholder.svelte

@@ -1,10 +1,18 @@
 <script>
 	import { getContext } from 'svelte';
+
+	export let title = '';
 	const i18n = getContext('i18n');
 </script>
 
 <div class="  text-center text-6xl mb-3">📄</div>
-<div class="text-center dark:text-white text-2xl font-semibold z-50">{$i18n.t('Add Files')}</div>
+<div class="text-center dark:text-white text-2xl font-semibold z-50">
+	{#if title}
+		{title}
+	{:else}
+		{$i18n.t('Add Files')}
+	{/if}
+</div>
 
 <slot
 	><div class=" mt-2 text-center text-sm dark:text-gray-200 w-full">

+ 4 - 1
src/lib/components/admin/Settings/Audio.svelte

@@ -66,6 +66,7 @@
 				// do your loop
 				if (voices.length > 0) {
 					clearInterval(getVoicesLoop);
+					voices.sort((a, b) => a.name.localeCompare(b.name, $i18n.resolvedLanguage));
 				}
 			}, 100);
 		} else {
@@ -76,6 +77,7 @@
 			if (res) {
 				console.log(res);
 				voices = res.voices;
+				voices.sort((a, b) => a.name.localeCompare(b.name, $i18n.resolvedLanguage));
 			}
 		}
 	};
@@ -295,7 +297,8 @@
 										<option
 											value={voice.voiceURI}
 											class="bg-gray-100 dark:bg-gray-700"
-											selected={TTS_VOICE === voice.voiceURI}>{voice.name}</option
+											selected={TTS_VOICE === voice.voiceURI}
+											>{voice.name.replace('+', ', ')}</option
 										>
 									{/each}
 								</select>

+ 8 - 70
src/lib/components/admin/Settings/Documents.svelte

@@ -1,13 +1,12 @@
 <script lang="ts">
+	import { toast } from 'svelte-sonner';
+
 	import { onMount, getContext, createEventDispatcher } from 'svelte';
 
 	const dispatch = createEventDispatcher();
 
-	import { getDocs } from '$lib/apis/documents';
-	import { deleteAllFiles, deleteFileById } from '$lib/apis/files';
 	import {
 		getQuerySettings,
-		scanDocs,
 		updateQuerySettings,
 		resetVectorDB,
 		getEmbeddingConfig,
@@ -17,12 +16,14 @@
 		resetUploadDir,
 		getRAGConfig,
 		updateRAGConfig
-	} from '$lib/apis/rag';
+	} from '$lib/apis/retrieval';
+
+	import { knowledge, models } from '$lib/stores';
+	import { getKnowledgeItems } from '$lib/apis/knowledge';
+	import { uploadDir, deleteAllFiles, deleteFileById } from '$lib/apis/files';
+
 	import ResetUploadDirConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
 	import ResetVectorDBConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
-
-	import { documents, models } from '$lib/stores';
-	import { toast } from 'svelte-sonner';
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 
@@ -61,17 +62,6 @@
 		hybrid: false
 	};
 
-	const scanHandler = async () => {
-		scanDirLoading = true;
-		const res = await scanDocs(localStorage.token);
-		scanDirLoading = false;
-
-		if (res) {
-			await documents.set(await getDocs(localStorage.token));
-			toast.success($i18n.t('Scan complete!'));
-		}
-	};
-
 	const embeddingModelUpdateHandler = async () => {
 		if (embeddingEngine === '' && embeddingModel.split('/').length - 1 > 1) {
 			toast.error(
@@ -284,58 +274,6 @@
 		<div class="flex flex-col gap-0.5">
 			<div class=" mb-0.5 text-sm font-medium">{$i18n.t('General Settings')}</div>
 
-			<div class="  flex w-full justify-between">
-				<div class=" self-center text-xs font-medium">
-					{$i18n.t('Scan for documents from {{path}}', { path: 'DOCS_DIR (/data/docs)' })}
-				</div>
-
-				<button
-					class=" self-center text-xs p-1 px-3 bg-gray-50 dark:bg-gray-800 dark:hover:bg-gray-700 rounded-lg flex flex-row space-x-1 items-center {scanDirLoading
-						? ' cursor-not-allowed'
-						: ''}"
-					on:click={() => {
-						scanHandler();
-						console.log('check');
-					}}
-					type="button"
-					disabled={scanDirLoading}
-				>
-					<div class="self-center font-medium">{$i18n.t('Scan')}</div>
-
-					{#if scanDirLoading}
-						<div class="ml-3 self-center">
-							<svg
-								class=" w-3 h-3"
-								viewBox="0 0 24 24"
-								fill="currentColor"
-								xmlns="http://www.w3.org/2000/svg"
-							>
-								<style>
-									.spinner_ajPY {
-										transform-origin: center;
-										animation: spinner_AtaB 0.75s infinite linear;
-									}
-
-									@keyframes spinner_AtaB {
-										100% {
-											transform: rotate(360deg);
-										}
-									}
-								</style>
-								<path
-									d="M12,1A11,11,0,1,0,23,12,11,11,0,0,0,12,1Zm0,19a8,8,0,1,1,8-8A8,8,0,0,1,12,20Z"
-									opacity=".25"
-								/>
-								<path
-									d="M10.14,1.16a11,11,0,0,0-9,8.92A1.59,1.59,0,0,0,2.46,12,1.52,1.52,0,0,0,4.11,10.7a8,8,0,0,1,6.66-6.61A1.42,1.42,0,0,0,12,2.69h0A1.57,1.57,0,0,0,10.14,1.16Z"
-									class="spinner_ajPY"
-								/>
-							</svg>
-						</div>
-					{/if}
-				</button>
-			</div>
-
 			<div class=" flex w-full justify-between">
 				<div class=" self-center text-xs font-medium">{$i18n.t('Embedding Model Engine')}</div>
 				<div class="flex items-center relative">

+ 2 - 2
src/lib/components/admin/Settings/WebSearch.svelte

@@ -1,8 +1,8 @@
 <script lang="ts">
-	import { getRAGConfig, updateRAGConfig } from '$lib/apis/rag';
+	import { getRAGConfig, updateRAGConfig } from '$lib/apis/retrieval';
 	import Switch from '$lib/components/common/Switch.svelte';
 
-	import { documents, models } from '$lib/stores';
+	import { models } from '$lib/stores';
 	import { onMount, getContext } from 'svelte';
 	import { toast } from 'svelte-sonner';
 	import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';

+ 305 - 0
src/lib/components/chat/Artifacts.svelte

@@ -0,0 +1,305 @@
+<script lang="ts">
+	import { toast } from 'svelte-sonner';
+	import { onMount, getContext, createEventDispatcher } from 'svelte';
+	const i18n = getContext('i18n');
+	const dispatch = createEventDispatcher();
+
+	import { chatId, showArtifacts, showControls } from '$lib/stores';
+	import XMark from '../icons/XMark.svelte';
+	import { copyToClipboard, createMessagesList } from '$lib/utils';
+	import ArrowsPointingOut from '../icons/ArrowsPointingOut.svelte';
+	import Tooltip from '../common/Tooltip.svelte';
+	import SvgPanZoom from '../common/SVGPanZoom.svelte';
+
+	export let overlay = false;
+	export let history;
+	let messages = [];
+
+	let contents: Array<{ type: string; content: string }> = [];
+	let selectedContentIdx = 0;
+
+	let copied = false;
+	let iframeElement: HTMLIFrameElement;
+
+	$: if (history) {
+		messages = createMessagesList(history, history.currentId);
+		getContents();
+	} else {
+		messages = [];
+		getContents();
+	}
+
+	const getContents = () => {
+		contents = [];
+		messages.forEach((message) => {
+			if (message?.role !== 'user' && message?.content) {
+				const codeBlockContents = message.content.match(/```[\s\S]*?```/g);
+				let codeBlocks = [];
+
+				if (codeBlockContents) {
+					codeBlockContents.forEach((block) => {
+						const lang = block.split('\n')[0].replace('```', '').trim().toLowerCase();
+						const code = block.replace(/```[\s\S]*?\n/, '').replace(/```$/, '');
+						codeBlocks.push({ lang, code });
+					});
+				}
+
+				let htmlContent = '';
+				let cssContent = '';
+				let jsContent = '';
+
+				codeBlocks.forEach((block) => {
+					const { lang, code } = block;
+
+					if (lang === 'html') {
+						htmlContent += code + '\n';
+					} else if (lang === 'css') {
+						cssContent += code + '\n';
+					} else if (lang === 'javascript' || lang === 'js') {
+						jsContent += code + '\n';
+					}
+				});
+
+				const inlineHtml = message.content.match(/<html>[\s\S]*?<\/html>/gi);
+				const inlineCss = message.content.match(/<style>[\s\S]*?<\/style>/gi);
+				const inlineJs = message.content.match(/<script>[\s\S]*?<\/script>/gi);
+
+				if (inlineHtml) {
+					inlineHtml.forEach((block) => {
+						const content = block.replace(/<\/?html>/gi, ''); // Remove <html> tags
+						htmlContent += content + '\n';
+					});
+				}
+				if (inlineCss) {
+					inlineCss.forEach((block) => {
+						const content = block.replace(/<\/?style>/gi, ''); // Remove <style> tags
+						cssContent += content + '\n';
+					});
+				}
+				if (inlineJs) {
+					inlineJs.forEach((block) => {
+						const content = block.replace(/<\/?script>/gi, ''); // Remove <script> tags
+						jsContent += content + '\n';
+					});
+				}
+
+				if (htmlContent || cssContent || jsContent) {
+					const renderedContent = `
+                        <!DOCTYPE html>
+                        <html lang="en">
+                        <head>
+                            <meta charset="UTF-8">
+                            <meta name="viewport" content="width=device-width, initial-scale=1.0">
+							<${''}style>
+								body {
+									background-color: white; /* Ensure the iframe has a white background */
+								}
+
+								${cssContent}
+							</${''}style>
+                        </head>
+                        <body>
+                            ${htmlContent}
+
+							<${''}script>
+                            	${jsContent}
+							</${''}script>
+                        </body>
+                        </html>
+                    `;
+					contents = [...contents, { type: 'iframe', content: renderedContent }];
+				} else {
+					// Check for SVG content
+					for (const block of codeBlocks) {
+						if (block.lang === 'svg' || (block.lang === 'xml' && block.code.includes('<svg'))) {
+							contents = [...contents, { type: 'svg', content: block.code }];
+						}
+					}
+				}
+			}
+		});
+
+		selectedContentIdx = contents ? contents.length - 1 : 0;
+	};
+
+	function navigateContent(direction: 'prev' | 'next') {
+		console.log(selectedContentIdx);
+
+		selectedContentIdx =
+			direction === 'prev'
+				? Math.max(selectedContentIdx - 1, 0)
+				: Math.min(selectedContentIdx + 1, contents.length - 1);
+
+		console.log(selectedContentIdx);
+	}
+
+	const iframeLoadHandler = () => {
+		iframeElement.contentWindow.addEventListener(
+			'click',
+			function (e) {
+				const target = e.target.closest('a');
+				if (target && target.href) {
+					e.preventDefault();
+					const url = new URL(target.href, iframeElement.baseURI);
+					if (url.origin === window.location.origin) {
+						iframeElement.contentWindow.history.pushState(
+							null,
+							'',
+							url.pathname + url.search + url.hash
+						);
+					} else {
+						console.log('External navigation blocked:', url.href);
+					}
+				}
+			},
+			true
+		);
+
+		// Cancel drag when hovering over iframe
+		iframeElement.contentWindow.addEventListener('mouseenter', function (e) {
+			e.preventDefault();
+			iframeElement.contentWindow.addEventListener('dragstart', (event) => {
+				event.preventDefault();
+			});
+		});
+	};
+
+	const showFullScreen = () => {
+		if (iframeElement.requestFullscreen) {
+			iframeElement.requestFullscreen();
+		} else if (iframeElement.webkitRequestFullscreen) {
+			iframeElement.webkitRequestFullscreen();
+		} else if (iframeElement.msRequestFullscreen) {
+			iframeElement.msRequestFullscreen();
+		}
+	};
+
+	onMount(() => {});
+</script>
+
+<div class=" w-full h-full relative flex flex-col bg-gray-50 dark:bg-gray-850">
+	<div class="w-full h-full flex-1 relative">
+		{#if overlay}
+			<div class=" absolute top-0 left-0 right-0 bottom-0 z-10"></div>
+		{/if}
+
+		<div class=" absolute pointer-events-none z-50 w-full flex items-center justify-end p-4">
+			<button
+				class="self-center pointer-events-auto p-1 rounded-full bg-white dark:bg-gray-850"
+				on:click={() => {
+					dispatch('close');
+					showControls.set(false);
+					showArtifacts.set(false);
+				}}
+			>
+				<XMark className="size-3 text-gray-900 dark:text-white" />
+			</button>
+		</div>
+
+		<div class="flex-1 w-full h-full">
+			<div class=" h-full flex flex-col">
+				{#if contents.length > 0}
+					<div class="max-w-full w-full h-full">
+						{#if contents[selectedContentIdx].type === 'iframe'}
+							<iframe
+								bind:this={iframeElement}
+								title="Content"
+								srcdoc={contents[selectedContentIdx].content}
+								class="w-full border-0 h-full rounded-none"
+								sandbox="allow-scripts allow-forms allow-same-origin"
+								on:load={iframeLoadHandler}
+							></iframe>
+						{:else if contents[selectedContentIdx].type === 'svg'}
+							<SvgPanZoom
+								className=" w-full h-full max-h-full overflow-hidden"
+								svg={contents[selectedContentIdx].content}
+							/>
+						{/if}
+					</div>
+				{:else}
+					<div class="m-auto font-medium text-xs text-gray-900 dark:text-white">
+						{$i18n.t('No HTML, CSS, or JavaScript content found.')}
+					</div>
+				{/if}
+			</div>
+		</div>
+	</div>
+
+	{#if contents.length > 0}
+		<div class="flex justify-between items-center p-2.5 font-primar text-gray-900 dark:text-white">
+			<div class="flex items-center space-x-2">
+				<div class="flex items-center gap-0.5 self-center min-w-fit" dir="ltr">
+					<button
+						class="self-center p-1 hover:bg-black/5 dark:hover:bg-white/5 dark:hover:text-white hover:text-black rounded-md transition disabled:cursor-not-allowed"
+						on:click={() => navigateContent('prev')}
+						disabled={contents.length <= 1}
+					>
+						<svg
+							xmlns="http://www.w3.org/2000/svg"
+							fill="none"
+							viewBox="0 0 24 24"
+							stroke="currentColor"
+							stroke-width="2.5"
+							class="size-3.5"
+						>
+							<path
+								stroke-linecap="round"
+								stroke-linejoin="round"
+								d="M15.75 19.5 8.25 12l7.5-7.5"
+							/>
+						</svg>
+					</button>
+
+					<div class="text-xs self-center dark:text-gray-100 min-w-fit">
+						{$i18n.t('Version {{selectedVersion}} of {{totalVersions}}', {
+							selectedVersion: selectedContentIdx + 1,
+							totalVersions: contents.length
+						})}
+					</div>
+
+					<button
+						class="self-center p-1 hover:bg-black/5 dark:hover:bg-white/5 dark:hover:text-white hover:text-black rounded-md transition disabled:cursor-not-allowed"
+						on:click={() => navigateContent('next')}
+						disabled={contents.length <= 1}
+					>
+						<svg
+							xmlns="http://www.w3.org/2000/svg"
+							fill="none"
+							viewBox="0 0 24 24"
+							stroke="currentColor"
+							stroke-width="2.5"
+							class="size-3.5"
+						>
+							<path stroke-linecap="round" stroke-linejoin="round" d="m8.25 4.5 7.5 7.5-7.5 7.5" />
+						</svg>
+					</button>
+				</div>
+			</div>
+
+			<div class="flex items-center gap-1">
+				<button
+					class="copy-code-button bg-none border-none text-xs bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"
+					on:click={() => {
+						copyToClipboard(contents[selectedContentIdx].content);
+						copied = true;
+
+						setTimeout(() => {
+							copied = false;
+						}, 2000);
+					}}>{copied ? $i18n.t('Copied') : $i18n.t('Copy')}</button
+				>
+
+				{#if contents[selectedContentIdx].type === 'iframe'}
+					<Tooltip content={$i18n.t('Open in full screen')}>
+						<button
+							class=" bg-none border-none text-xs bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md p-0.5"
+							on:click={showFullScreen}
+						>
+							<ArrowsPointingOut className="size-3.5" />
+						</button>
+					</Tooltip>
+				{/if}
+			</div>
+		</div>
+	{/if}
+</div>

+ 166 - 41
src/lib/components/chat/Chat.svelte

@@ -32,7 +32,8 @@
 		temporaryChatEnabled,
 		mobile,
 		showOverview,
-		chatTitle
+		chatTitle,
+		showArtifacts
 	} from '$lib/stores';
 	import {
 		convertMessagesToHistory,
@@ -52,7 +53,7 @@
 		updateChatById
 	} from '$lib/apis/chats';
 	import { generateOpenAIChatCompletion } from '$lib/apis/openai';
-	import { runWebSearch } from '$lib/apis/rag';
+	import { processWebSearch } from '$lib/apis/retrieval';
 	import { createOpenAITextStream } from '$lib/apis/streaming';
 	import { queryMemory } from '$lib/apis/memories';
 	import { getAndUpdateUserLocation, getUserSettings } from '$lib/apis/users';
@@ -70,6 +71,7 @@
 	import Navbar from '$lib/components/layout/Navbar.svelte';
 	import ChatControls from './ChatControls.svelte';
 	import EventConfirmDialog from '../common/ConfirmDialog.svelte';
+	import Placeholder from './Placeholder.svelte';
 
 	export let chatIdProp = '';
 
@@ -311,6 +313,11 @@
 	//////////////////////////
 
 	const initNewChat = async () => {
+		await showControls.set(false);
+		await showCallOverlay.set(false);
+		await showOverview.set(false);
+		await showArtifacts.set(false);
+
 		if ($page.url.pathname.includes('/c/')) {
 			window.history.replaceState(history.state, '', `/`);
 		}
@@ -653,7 +660,7 @@
 			);
 		} else if (
 			files.length > 0 &&
-			files.filter((file) => file.type !== 'image' && file.status !== 'processed').length > 0
+			files.filter((file) => file.type !== 'image' && file.status === 'uploading').length > 0
 		) {
 			// Upload not done
 			toast.error(
@@ -689,7 +696,6 @@
 			);
 
 			files = [];
-
 			prompt = '';
 
 			// Create user message
@@ -937,7 +943,26 @@
 					done: false
 				}
 			];
-			files.push(...model.info.meta.knowledge);
+			files.push(
+				...model.info.meta.knowledge.map((item) => {
+					if (item?.collection_name) {
+						return {
+							id: item.collection_name,
+							name: item.name,
+							legacy: true
+						};
+					} else if (item?.collection_names) {
+						return {
+							name: item.name,
+							type: 'collection',
+							collection_names: item.collection_names,
+							legacy: true
+						};
+					} else {
+						return item;
+					}
+				})
+			);
 			history.messages[responseMessageId] = responseMessage;
 		}
 		files.push(
@@ -947,6 +972,12 @@
 			...(responseMessage?.files ?? []).filter((item) => ['web_search_results'].includes(item.type))
 		);
 
+		// Remove duplicates
+		files = files.filter(
+			(item, index, array) =>
+				array.findIndex((i) => JSON.stringify(i) === JSON.stringify(item)) === index
+		);
+
 		scrollToBottom();
 
 		eventTarget.dispatchEvent(
@@ -1237,7 +1268,26 @@
 					done: false
 				}
 			];
-			files.push(...model.info.meta.knowledge);
+			files.push(
+				...model.info.meta.knowledge.map((item) => {
+					if (item?.collection_name) {
+						return {
+							id: item.collection_name,
+							name: item.name,
+							legacy: true
+						};
+					} else if (item?.collection_names) {
+						return {
+							name: item.name,
+							type: 'collection',
+							collection_names: item.collection_names,
+							legacy: true
+						};
+					} else {
+						return item;
+					}
+				})
+			);
 			history.messages[responseMessageId] = responseMessage;
 		}
 		files.push(
@@ -1246,6 +1296,11 @@
 			),
 			...(responseMessage?.files ?? []).filter((item) => ['web_search_results'].includes(item.type))
 		);
+		// Remove duplicates
+		files = files.filter(
+			(item, index, array) =>
+				array.findIndex((i) => JSON.stringify(i) === JSON.stringify(item)) === index
+		);
 
 		scrollToBottom();
 
@@ -1382,7 +1437,7 @@
 						}
 
 						if (usage) {
-							responseMessage.info = { ...usage, openai: true };
+							responseMessage.info = { ...usage, openai: true, usage };
 						}
 
 						if (citations) {
@@ -1737,7 +1792,7 @@
 		});
 		history.messages[responseMessageId] = responseMessage;
 
-		const results = await runWebSearch(localStorage.token, searchQuery).catch((error) => {
+		const results = await processWebSearch(localStorage.token, searchQuery).catch((error) => {
 			console.log(error);
 			toast.error(error);
 
@@ -1880,7 +1935,7 @@
 		<PaneGroup direction="horizontal" class="w-full h-full">
 			<Pane defaultSize={50} class="h-full flex w-full relative">
 				{#if $banners.length > 0 && !history.currentId && !$chatId && selectedModels.length <= 1}
-					<div class="absolute top-3 left-0 right-0 w-full z-20">
+					<div class="absolute top-12 left-0 right-0 w-full z-30">
 						<div class=" flex flex-col gap-1 w-full">
 							{#each $banners.filter( (b) => (b.dismissible ? !JSON.parse(localStorage.getItem('dismissedBannerIds') ?? '[]').includes(b.id) : true) ) as banner}
 								<Banner
@@ -1905,44 +1960,111 @@
 				{/if}
 
 				<div class="flex flex-col flex-auto z-10 w-full">
-					<div
-						class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0 max-w-full z-10 scrollbar-hidden"
-						id="messages-container"
-						bind:this={messagesContainerElement}
-						on:scroll={(e) => {
-							autoScroll =
-								messagesContainerElement.scrollHeight - messagesContainerElement.scrollTop <=
-								messagesContainerElement.clientHeight + 5;
-						}}
-					>
-						<div class=" h-full w-full flex flex-col {chatIdProp ? 'py-4' : 'pt-2 pb-4'}">
-							<Messages
-								chatId={$chatId}
-								bind:history
-								bind:autoScroll
-								bind:prompt
+					{#if $settings?.landingPageMode === 'chat' || createMessagesList(history.currentId).length > 0}
+						<div
+							class=" pb-2.5 flex flex-col justify-between w-full flex-auto overflow-auto h-0 max-w-full z-10 scrollbar-hidden"
+							id="messages-container"
+							bind:this={messagesContainerElement}
+							on:scroll={(e) => {
+								autoScroll =
+									messagesContainerElement.scrollHeight - messagesContainerElement.scrollTop <=
+									messagesContainerElement.clientHeight + 5;
+							}}
+						>
+							<div class=" h-full w-full flex flex-col">
+								<Messages
+									chatId={$chatId}
+									bind:history
+									bind:autoScroll
+									bind:prompt
+									{selectedModels}
+									{sendPrompt}
+									{showMessage}
+									{continueResponse}
+									{regenerateResponse}
+									{mergeResponses}
+									{chatActionHandler}
+									bottomPadding={files.length > 0}
+									on:submit={async (e) => {
+										if (e.detail) {
+											// New user message
+											let userPrompt = e.detail.prompt;
+											let userMessageId = uuidv4();
+
+											let userMessage = {
+												id: userMessageId,
+												parentId: e.detail.parentId,
+												childrenIds: [],
+												role: 'user',
+												content: userPrompt,
+												models: selectedModels
+											};
+
+											let messageParentId = e.detail.parentId;
+
+											if (messageParentId !== null) {
+												history.messages[messageParentId].childrenIds = [
+													...history.messages[messageParentId].childrenIds,
+													userMessageId
+												];
+											}
+
+											history.messages[userMessageId] = userMessage;
+											history.currentId = userMessageId;
+
+											await tick();
+											await sendPrompt(userPrompt, userMessageId);
+										}
+									}}
+								/>
+							</div>
+						</div>
+
+						<div class=" pb-[1.6rem]">
+							<MessageInput
+								{history}
 								{selectedModels}
-								{sendPrompt}
-								{showMessage}
-								{continueResponse}
-								{regenerateResponse}
-								{mergeResponses}
-								{chatActionHandler}
-								bottomPadding={files.length > 0}
+								bind:files
+								bind:prompt
+								bind:autoScroll
+								bind:selectedToolIds
+								bind:webSearchEnabled
+								bind:atSelectedModel
+								availableToolIds={selectedModelIds.reduce((a, e, i, arr) => {
+									const model = $models.find((m) => m.id === e);
+									if (model?.info?.meta?.toolIds ?? false) {
+										return [...new Set([...a, ...model.info.meta.toolIds])];
+									}
+									return a;
+								}, [])}
+								transparentBackground={$settings?.backgroundImageUrl ?? false}
+								{stopResponse}
+								{createMessagePair}
+								on:submit={async (e) => {
+									if (e.detail) {
+										prompt = '';
+										await tick();
+										submitPrompt(e.detail);
+									}
+								}}
 							/>
-						</div>
-					</div>
 
-					<div class="">
-						<MessageInput
+							<div
+								class="absolute bottom-1.5 text-xs text-gray-500 text-center line-clamp-1 right-0 left-0"
+							>
+								{$i18n.t('LLMs can make mistakes. Verify important information.')}
+							</div>
+						</div>
+					{:else}
+						<Placeholder
 							{history}
+							{selectedModels}
 							bind:files
 							bind:prompt
 							bind:autoScroll
 							bind:selectedToolIds
 							bind:webSearchEnabled
 							bind:atSelectedModel
-							{selectedModels}
 							availableToolIds={selectedModelIds.reduce((a, e, i, arr) => {
 								const model = $models.find((m) => m.id === e);
 								if (model?.info?.meta?.toolIds ?? false) {
@@ -1951,14 +2073,17 @@
 								return a;
 							}, [])}
 							transparentBackground={$settings?.backgroundImageUrl ?? false}
-							{submitPrompt}
 							{stopResponse}
 							{createMessagePair}
-							on:call={async () => {
-								await showControls.set(true);
+							on:submit={async (e) => {
+								if (e.detail) {
+									prompt = '';
+									await tick();
+									submitPrompt(e.detail);
+								}
 							}}
 						/>
-					</div>
+					{/if}
 				</div>
 			</Pane>
 

+ 67 - 25
src/lib/components/chat/ChatControls.svelte

@@ -2,8 +2,8 @@
 	import { SvelteFlowProvider } from '@xyflow/svelte';
 	import { slide } from 'svelte/transition';
 
-	import { onDestroy, onMount } from 'svelte';
-	import { mobile, showControls, showCallOverlay, showOverview } from '$lib/stores';
+	import { onDestroy, onMount, tick } from 'svelte';
+	import { mobile, showControls, showCallOverlay, showOverview, showArtifacts } from '$lib/stores';
 
 	import Modal from '../common/Modal.svelte';
 	import Controls from './Controls/Controls.svelte';
@@ -12,12 +12,13 @@
 	import Overview from './Overview.svelte';
 	import { Pane, PaneResizer } from 'paneforge';
 	import EllipsisVertical from '../icons/EllipsisVertical.svelte';
-	import { get } from 'svelte/store';
+	import Artifacts from './Artifacts.svelte';
 
 	export let history;
 	export let models = [];
 
 	export let chatId = null;
+
 	export let chatFiles = [];
 	export let params = {};
 
@@ -29,36 +30,67 @@
 	export let modelId;
 
 	export let pane;
+
+	let mediaQuery;
 	let largeScreen = false;
+	let dragged = false;
+
+	const handleMediaQuery = async (e) => {
+		if (e.matches) {
+			largeScreen = true;
+
+			if ($showCallOverlay) {
+				showCallOverlay.set(false);
+				await tick();
+				showCallOverlay.set(true);
+			}
+		} else {
+			largeScreen = false;
+
+			if ($showCallOverlay) {
+				showCallOverlay.set(false);
+				await tick();
+				showCallOverlay.set(true);
+			}
+			pane = null;
+		}
+	};
+
+	const onMouseDown = (event) => {
+		dragged = true;
+	};
+
+	const onMouseUp = (event) => {
+		dragged = false;
+	};
 
 	onMount(() => {
 		// listen to resize 1024px
-		const mediaQuery = window.matchMedia('(min-width: 1024px)');
-
-		const handleMediaQuery = (e) => {
-			if (e.matches) {
-				largeScreen = true;
-			} else {
-				largeScreen = false;
-				pane = null;
-			}
-		};
+		mediaQuery = window.matchMedia('(min-width: 1024px)');
 
 		mediaQuery.addEventListener('change', handleMediaQuery);
-
 		handleMediaQuery(mediaQuery);
 
-		return () => {
-			mediaQuery.removeEventListener('change', handleMediaQuery);
-		};
+		document.addEventListener('mousedown', onMouseDown);
+		document.addEventListener('mouseup', onMouseUp);
 	});
 
 	onDestroy(() => {
 		showControls.set(false);
+
+		mediaQuery.removeEventListener('change', handleMediaQuery);
+		document.removeEventListener('mousedown', onMouseDown);
+		document.removeEventListener('mouseup', onMouseUp);
 	});
 
 	$: if (!chatId) {
+		showControls.set(false);
 		showOverview.set(false);
+		showArtifacts.set(false);
+
+		if ($showCallOverlay) {
+			showCallOverlay.set(false);
+		}
 	}
 </script>
 
@@ -72,7 +104,9 @@
 				}}
 			>
 				<div
-					class=" {$showCallOverlay || $showOverview ? ' h-screen  w-screen' : 'px-6 py-4'} h-full"
+					class=" {$showCallOverlay || $showOverview || $showArtifacts
+						? ' h-screen  w-screen'
+						: 'px-6 py-4'} h-full"
 				>
 					{#if $showCallOverlay}
 						<div
@@ -90,6 +124,8 @@
 								}}
 							/>
 						</div>
+					{:else if $showArtifacts}
+						<Artifacts {history} />
 					{:else if $showOverview}
 						<Overview
 							{history}
@@ -115,11 +151,14 @@
 		{/if}
 	{:else}
 		<!-- if $showControls -->
-		<PaneResizer class="relative flex w-2 items-center justify-center bg-background group">
-			<div class="z-10 flex h-7 w-5 items-center justify-center rounded-sm">
-				<EllipsisVertical className="size-4 invisible group-hover:visible" />
-			</div>
-		</PaneResizer>
+
+		{#if $showControls}
+			<PaneResizer class="relative flex w-2 items-center justify-center bg-background group">
+				<div class="z-10 flex h-7 w-5 items-center justify-center rounded-sm">
+					<EllipsisVertical className="size-4 invisible group-hover:visible" />
+				</div>
+			</PaneResizer>
+		{/if}
 		<Pane
 			bind:pane
 			defaultSize={$showControls
@@ -137,13 +176,14 @@
 					localStorage.chatControlsSize = size;
 				}
 			}}
+			class="pt-8"
 		>
 			{#if $showControls}
 				<div class="pr-4 pb-8 flex max-h-full min-h-full">
 					<div
-						class="w-full {$showOverview && !$showCallOverlay
+						class="w-full {($showOverview || $showArtifacts) && !$showCallOverlay
 							? ' '
-							: 'px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850  border border-gray-50 dark:border-gray-800'}  rounded-lg z-50 pointer-events-auto overflow-y-auto scrollbar-hidden"
+							: 'px-5 py-4 bg-white dark:shadow-lg dark:bg-gray-850  border border-gray-50 dark:border-gray-800'}  rounded-lg z-40 pointer-events-auto overflow-y-auto scrollbar-hidden"
 					>
 						{#if $showCallOverlay}
 							<div class="w-full h-full flex justify-center">
@@ -159,6 +199,8 @@
 									}}
 								/>
 							</div>
+						{:else if $showArtifacts}
+							<Artifacts {history} overlay={dragged} />
 						{:else if $showOverview}
 							<Overview
 								{history}

+ 5 - 2
src/lib/components/chat/Messages/Placeholder.svelte → src/lib/components/chat/ChatPlaceholder.svelte

@@ -7,7 +7,7 @@
 
 	import { blur, fade } from 'svelte/transition';
 
-	import Suggestions from '../MessageInput/Suggestions.svelte';
+	import Suggestions from './Suggestions.svelte';
 	import { sanitizeResponseContent } from '$lib/utils';
 	import Tooltip from '$lib/components/common/Tooltip.svelte';
 	import EyeSlash from '$lib/components/icons/EyeSlash.svelte';
@@ -125,10 +125,13 @@
 
 		<div class=" w-full font-primary" in:fade={{ duration: 200, delay: 300 }}>
 			<Suggestions
+				className="grid grid-cols-2"
 				suggestionPrompts={models[selectedModelIdx]?.info?.meta?.suggestion_prompts ??
 					$config?.default_prompt_suggestions ??
 					[]}
-				{submitPrompt}
+				on:select={(e) => {
+					submitPrompt(e.detail);
+				}}
 			/>
 		</div>
 	</div>

+ 6 - 1
src/lib/components/chat/Controls/Controls.svelte

@@ -35,7 +35,9 @@
 					{#each chatFiles as file, fileIdx}
 						<FileItem
 							className="w-full"
-							url={`${file?.url}`}
+							item={file}
+							edit={true}
+							url={file?.url ? file.url : null}
 							name={file.name}
 							type={file.type}
 							size={file?.size}
@@ -46,6 +48,9 @@
 								chatFiles.splice(fileIdx, 1);
 								chatFiles = chatFiles;
 							}}
+							on:click={() => {
+								console.log(file);
+							}}
 						/>
 					{/each}
 				</div>

+ 32 - 81
src/lib/components/chat/MessageInput.svelte

@@ -12,20 +12,14 @@
 		config,
 		showCallOverlay,
 		tools,
-		user as _user
+		user as _user,
+		showControls
 	} from '$lib/stores';
 	import { blobToFile, findWordIndices } from '$lib/utils';
-
 	import { transcribeAudio } from '$lib/apis/audio';
-	import { processDocToVectorDB } from '$lib/apis/rag';
 	import { uploadFile } from '$lib/apis/files';
 
-	import {
-		SUPPORTED_FILE_TYPE,
-		SUPPORTED_FILE_EXTENSIONS,
-		WEBUI_BASE_URL,
-		WEBUI_API_BASE_URL
-	} from '$lib/constants';
+	import { WEBUI_BASE_URL, WEBUI_API_BASE_URL } from '$lib/constants';
 
 	import Tooltip from '../common/Tooltip.svelte';
 	import InputMenu from './MessageInput/InputMenu.svelte';
@@ -40,7 +34,6 @@
 
 	export let transparentBackground = false;
 
-	export let submitPrompt: Function;
 	export let createMessagePair: Function;
 	export let stopResponse: Function;
 
@@ -49,6 +42,14 @@
 	export let atSelectedModel: Model | undefined;
 	export let selectedModels: [''];
 
+	export let history;
+
+	export let prompt = '';
+	export let files = [];
+	export let availableToolIds = [];
+	export let selectedToolIds = [];
+	export let webSearchEnabled = false;
+
 	let recording = false;
 
 	let chatTextAreaElement: HTMLTextAreaElement;
@@ -60,15 +61,7 @@
 	let dragged = false;
 
 	let user = null;
-	let chatInputPlaceholder = '';
-
-	export let history;
-
-	export let prompt = '';
-	export let files = [];
-	export let availableToolIds = [];
-	export let selectedToolIds = [];
-	export let webSearchEnabled = false;
+	export let placeholder = '';
 
 	let visionCapableModels = [];
 	$: visionCapableModels = [...(atSelectedModel ? [atSelectedModel] : selectedModels)].filter(
@@ -100,7 +93,7 @@
 			url: '',
 			name: file.name,
 			collection_name: '',
-			status: '',
+			status: 'uploading',
 			size: file.size,
 			error: ''
 		};
@@ -124,29 +117,17 @@
 		}
 
 		try {
+			// During the file upload, file content is automatically extracted.
 			const uploadedFile = await uploadFile(localStorage.token, file);
 
 			if (uploadedFile) {
 				fileItem.status = 'uploaded';
 				fileItem.file = uploadedFile;
 				fileItem.id = uploadedFile.id;
+				fileItem.collection_name = uploadedFile?.meta?.collection_name;
 				fileItem.url = `${WEBUI_API_BASE_URL}/files/${uploadedFile.id}`;
 
-				// TODO: Check if tools & functions have files support to skip this step to delegate file processing
-				// Default Upload to VectorDB
-				if (
-					SUPPORTED_FILE_TYPE.includes(file['type']) ||
-					SUPPORTED_FILE_EXTENSIONS.includes(file.name.split('.').at(-1))
-				) {
-					processFileItem(fileItem);
-				} else {
-					toast.error(
-						$i18n.t(`Unknown file type '{{file_type}}'. Proceeding with the file upload anyway.`, {
-							file_type: file['type']
-						})
-					);
-					processFileItem(fileItem);
-				}
+				files = files;
 			} else {
 				files = files.filter((item) => item.status !== null);
 			}
@@ -156,24 +137,6 @@
 		}
 	};
 
-	const processFileItem = async (fileItem) => {
-		try {
-			const res = await processDocToVectorDB(localStorage.token, fileItem.id);
-
-			if (res) {
-				fileItem.status = 'processed';
-				fileItem.collection_name = res.collection_name;
-				files = files;
-			}
-		} catch (e) {
-			// Remove the failed doc from the files array
-			// files = files.filter((f) => f.id !== fileItem.id);
-			toast.error(e);
-			fileItem.status = 'processed';
-			files = files;
-		}
-	};
-
 	const inputFilesHandler = async (inputFiles) => {
 		inputFiles.forEach((file) => {
 			console.log(file, file.name.split('.').at(-1));
@@ -270,7 +233,7 @@
 
 <div class="w-full font-primary">
 	<div class=" -mb-0.5 mx-auto inset-x-0 bg-transparent flex justify-center">
-		<div class="flex flex-col max-w-6xl px-2.5 md:px-6 w-full">
+		<div class="flex flex-col px-2.5 max-w-6xl w-full">
 			<div class="relative">
 				{#if autoScroll === false && history?.currentId}
 					<div
@@ -303,13 +266,13 @@
 			<div class="w-full relative">
 				{#if atSelectedModel !== undefined}
 					<div
-						class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0.5 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900 z-10"
+						class="px-3 py-1 text-left w-full flex justify-between items-center absolute bottom-0 left-0 right-0 bg-gradient-to-t from-white dark:from-gray-900 z-10"
 					>
 						<div class="flex items-center gap-2 text-sm dark:text-gray-500">
 							<img
 								crossorigin="anonymous"
 								alt="model profile"
-								class="size-5 max-w-[28px] object-cover rounded-full"
+								class="size-4 max-w-[28px] object-cover rounded-full"
 								src={$models.find((model) => model.id === atSelectedModel.id)?.info?.meta
 									?.profile_image_url ??
 									($i18n.language === 'dg-DG'
@@ -352,8 +315,8 @@
 	</div>
 
 	<div class="{transparentBackground ? 'bg-transparent' : 'bg-white dark:bg-gray-900'} ">
-		<div class="max-w-6xl px-2.5 md:px-6 mx-auto inset-x-0 pb-safe-bottom">
-			<div class=" pb-2">
+		<div class="max-w-6xl px-4 mx-auto inset-x-0">
+			<div class="">
 				<input
 					bind:this={filesInputElement}
 					bind:files={inputFiles}
@@ -391,7 +354,7 @@
 							document.getElementById('chat-textarea')?.focus();
 
 							if ($settings?.speechAutoSend ?? false) {
-								submitPrompt(prompt);
+								dispatch('submit', prompt);
 							}
 						}}
 					/>
@@ -400,7 +363,7 @@
 						class="w-full flex gap-1.5"
 						on:submit|preventDefault={() => {
 							// check if selectedModels support image input
-							submitPrompt(prompt);
+							dispatch('submit', prompt);
 						}}
 					>
 						<div
@@ -466,15 +429,20 @@
 											</div>
 										{:else}
 											<FileItem
+												item={file}
 												name={file.name}
 												type={file.type}
 												size={file?.size}
-												status={file.status}
+												loading={file.status === 'uploading'}
 												dismissible={true}
+												edit={true}
 												on:dismiss={() => {
 													files.splice(fileIdx, 1);
 													files = files;
 												}}
+												on:click={() => {
+													console.log(file);
+												}}
 											/>
 										{/if}
 									{/each}
@@ -527,9 +495,7 @@
 									id="chat-textarea"
 									bind:this={chatTextAreaElement}
 									class="scrollbar-hidden bg-gray-50 dark:bg-gray-850 dark:text-gray-100 outline-none w-full py-3 px-1 rounded-xl resize-none h-[48px]"
-									placeholder={chatInputPlaceholder !== ''
-										? chatInputPlaceholder
-										: $i18n.t('Send a Message')}
+									placeholder={placeholder ? placeholder : $i18n.t('Send a Message')}
 									bind:value={prompt}
 									on:keypress={(e) => {
 										if (
@@ -547,7 +513,7 @@
 
 											// Submit the prompt when Enter key is pressed
 											if (prompt !== '' && e.key === 'Enter' && !e.shiftKey) {
-												submitPrompt(prompt);
+												dispatch('submit', prompt);
 											}
 										}
 									}}
@@ -784,7 +750,7 @@
 														stream = null;
 
 														showCallOverlay.set(true);
-														dispatch('call');
+														showControls.set(true);
 													} catch (err) {
 														// If the user denies the permission or an error occurs, show an error message
 														toast.error($i18n.t('Permission denied when accessing media devices'));
@@ -849,22 +815,7 @@
 						</div>
 					</form>
 				{/if}
-
-				<div class="mt-1.5 text-xs text-gray-500 text-center line-clamp-1">
-					{$i18n.t('LLMs can make mistakes. Verify important information.')}
-				</div>
 			</div>
 		</div>
 	</div>
 </div>
-
-<style>
-	.scrollbar-hidden:active::-webkit-scrollbar-thumb,
-	.scrollbar-hidden:focus::-webkit-scrollbar-thumb,
-	.scrollbar-hidden:hover::-webkit-scrollbar-thumb {
-		visibility: visible;
-	}
-	.scrollbar-hidden::-webkit-scrollbar-thumb {
-		visibility: hidden;
-	}
-</style>

+ 36 - 30
src/lib/components/chat/MessageInput/CallOverlay.svelte

@@ -1,9 +1,6 @@
 <script lang="ts">
 	import { config, models, settings, showCallOverlay } from '$lib/stores';
 	import { onMount, tick, getContext, onDestroy, createEventDispatcher } from 'svelte';
-	import { DropdownMenu } from 'bits-ui';
-	import Dropdown from '$lib/components/common/Dropdown.svelte';
-	import { flyAndScale } from '$lib/utils/transitions';
 
 	const dispatch = createEventDispatcher();
 
@@ -35,12 +32,10 @@
 	let assistantSpeaking = false;
 
 	let emoji = null;
-
 	let camera = false;
 	let cameraStream = null;
 
 	let chatStreaming = false;
-
 	let rmsLevel = 0;
 	let hasStartedSpeaking = false;
 	let mediaRecorder;
@@ -220,32 +215,42 @@
 	};
 
 	const startRecording = async () => {
-		if (!audioStream) {
-			audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
-		}
-		mediaRecorder = new MediaRecorder(audioStream);
+		if ($showCallOverlay) {
+			if (!audioStream) {
+				audioStream = await navigator.mediaDevices.getUserMedia({ audio: true });
+			}
+			mediaRecorder = new MediaRecorder(audioStream);
 
-		mediaRecorder.onstart = () => {
-			console.log('Recording started');
-			audioChunks = [];
-			analyseAudio(audioStream);
-		};
+			mediaRecorder.onstart = () => {
+				console.log('Recording started');
+				audioChunks = [];
+				analyseAudio(audioStream);
+			};
 
-		mediaRecorder.ondataavailable = (event) => {
-			if (hasStartedSpeaking) {
-				audioChunks.push(event.data);
-			}
-		};
+			mediaRecorder.ondataavailable = (event) => {
+				if (hasStartedSpeaking) {
+					audioChunks.push(event.data);
+				}
+			};
 
-		mediaRecorder.onstop = (e) => {
-			console.log('Recording stopped', audioStream, e);
-			stopRecordingCallback();
-		};
+			mediaRecorder.onstop = (e) => {
+				console.log('Recording stopped', audioStream, e);
+				stopRecordingCallback();
+			};
 
-		mediaRecorder.start();
+			mediaRecorder.start();
+		}
 	};
 
 	const stopAudioStream = async () => {
+		try {
+			if (mediaRecorder) {
+				mediaRecorder.stop();
+			}
+		} catch (error) {
+			console.log('Error stopping audio stream:', error);
+		}
+
 		if (!audioStream) return;
 
 		audioStream.getAudioTracks().forEach(function (track) {
@@ -451,7 +456,9 @@
 				if ($config.audio.tts.engine !== '') {
 					const res = await synthesizeOpenAISpeech(
 						localStorage.token,
-						$settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice,
+						$settings?.audio?.tts?.defaultVoice === $config.audio.tts.voice
+							? ($settings?.audio?.tts?.voice ?? $config?.audio?.tts?.voice)
+							: $config?.audio?.tts?.voice,
 						content
 					).catch((error) => {
 						console.error(error);
@@ -640,19 +647,18 @@
 
 	onDestroy(async () => {
 		await stopAllAudio();
-		stopAudioStream();
+		await stopRecordingCallback(false);
+		await stopCamera();
 
+		await stopAudioStream();
 		eventTarget.removeEventListener('chat:start', chatStartHandler);
 		eventTarget.removeEventListener('chat', chatEventHandler);
 		eventTarget.removeEventListener('chat:finish', chatFinishHandler);
-
 		audioAbortController.abort();
+
 		await tick();
 
 		await stopAllAudio();
-
-		await stopRecordingCallback(false);
-		await stopCamera();
 	});
 </script>
 

+ 24 - 15
src/lib/components/chat/MessageInput/Commands.svelte

@@ -5,11 +5,11 @@
 	const dispatch = createEventDispatcher();
 
 	import Prompts from './Commands/Prompts.svelte';
-	import Documents from './Commands/Documents.svelte';
+	import Knowledge from './Commands/Knowledge.svelte';
 	import Models from './Commands/Models.svelte';
 
 	import { removeLastWordFromString } from '$lib/utils';
-	import { uploadWebToVectorDB, uploadYoutubeTranscriptionToVectorDB } from '$lib/apis/rag';
+	import { processWeb, processYoutubeVideo } from '$lib/apis/retrieval';
 
 	export let prompt = '';
 	export let files = [];
@@ -30,7 +30,7 @@
 	const uploadWeb = async (url) => {
 		console.log(url);
 
-		const doc = {
+		const fileItem = {
 			type: 'doc',
 			name: url,
 			collection_name: '',
@@ -40,25 +40,30 @@
 		};
 
 		try {
-			files = [...files, doc];
-			const res = await uploadWebToVectorDB(localStorage.token, '', url);
+			files = [...files, fileItem];
+			const res = await processWeb(localStorage.token, '', url);
 
 			if (res) {
-				doc.status = 'processed';
-				doc.collection_name = res.collection_name;
+				fileItem.status = 'processed';
+				fileItem.collection_name = res.collection_name;
+				fileItem.file = {
+					content: res.content,
+					...fileItem.file
+				};
+
 				files = files;
 			}
 		} catch (e) {
 			// Remove the failed doc from the files array
 			files = files.filter((f) => f.name !== url);
-			toast.error(e);
+			toast.error(JSON.stringify(e));
 		}
 	};
 
 	const uploadYoutubeTranscription = async (url) => {
 		console.log(url);
 
-		const doc = {
+		const fileItem = {
 			type: 'doc',
 			name: url,
 			collection_name: '',
@@ -68,12 +73,16 @@
 		};
 
 		try {
-			files = [...files, doc];
-			const res = await uploadYoutubeTranscriptionToVectorDB(localStorage.token, url);
+			files = [...files, fileItem];
+			const res = await processYoutubeVideo(localStorage.token, url);
 
 			if (res) {
-				doc.status = 'processed';
-				doc.collection_name = res.collection_name;
+				fileItem.status = 'processed';
+				fileItem.collection_name = res.collection_name;
+				fileItem.file = {
+					content: res.content,
+					...fileItem.file
+				};
 				files = files;
 			}
 		} catch (e) {
@@ -88,7 +97,7 @@
 	{#if command?.charAt(0) === '/'}
 		<Prompts bind:this={commandElement} bind:prompt bind:files {command} />
 	{:else if command?.charAt(0) === '#'}
-		<Documents
+		<Knowledge
 			bind:this={commandElement}
 			bind:prompt
 			{command}
@@ -105,7 +114,7 @@
 				files = [
 					...files,
 					{
-						type: e?.detail?.type ?? 'file',
+						type: e?.detail?.meta?.document ? 'file' : 'collection',
 						...e.detail,
 						status: 'processed'
 					}

+ 92 - 77
src/lib/components/chat/MessageInput/Commands/Documents.svelte → src/lib/components/chat/MessageInput/Commands/Knowledge.svelte

@@ -1,10 +1,10 @@
 <script lang="ts">
-	import { createEventDispatcher } from 'svelte';
+	import { toast } from 'svelte-sonner';
+	import Fuse from 'fuse.js';
 
-	import { documents } from '$lib/stores';
+	import { createEventDispatcher, tick, getContext, onMount } from 'svelte';
 	import { removeLastWordFromString, isValidHttpUrl } from '$lib/utils';
-	import { tick, getContext } from 'svelte';
-	import { toast } from 'svelte-sonner';
+	import { knowledge } from '$lib/stores';
 
 	const i18n = getContext('i18n');
 
@@ -14,60 +14,22 @@
 	const dispatch = createEventDispatcher();
 	let selectedIdx = 0;
 
+	let items = [];
+	let fuse = null;
+
 	let filteredItems = [];
-	let filteredDocs = [];
-
-	let collections = [];
-
-	$: collections = [
-		...($documents.length > 0
-			? [
-					{
-						name: 'All Documents',
-						type: 'collection',
-						title: $i18n.t('All Documents'),
-						collection_names: $documents.map((doc) => doc.collection_name)
-					}
-				]
-			: []),
-		...$documents
-			.reduce((a, e, i, arr) => {
-				return [...new Set([...a, ...(e?.content?.tags ?? []).map((tag) => tag.name)])];
-			}, [])
-			.map((tag) => ({
-				name: tag,
-				type: 'collection',
-				collection_names: $documents
-					.filter((doc) => (doc?.content?.tags ?? []).map((tag) => tag.name).includes(tag))
-					.map((doc) => doc.collection_name)
-			}))
-	];
-
-	$: filteredCollections = collections
-		.filter((collection) => findByName(collection, command))
-		.sort((a, b) => a.name.localeCompare(b.name));
-
-	$: filteredDocs = $documents
-		.filter((doc) => findByName(doc, command))
-		.sort((a, b) => a.title.localeCompare(b.title));
-
-	$: filteredItems = [...filteredCollections, ...filteredDocs];
+	$: if (fuse) {
+		filteredItems = command.slice(1)
+			? fuse.search(command).map((e) => {
+					return e.item;
+				})
+			: items;
+	}
 
 	$: if (command) {
 		selectedIdx = 0;
-
-		console.log(filteredCollections);
 	}
 
-	type ObjectWithName = {
-		name: string;
-	};
-
-	const findByName = (obj: ObjectWithName, command: string) => {
-		const name = obj.name.toLowerCase();
-		return name.includes(command.toLowerCase().split(' ')?.at(0)?.substring(1) ?? '');
-	};
-
 	export const selectUp = () => {
 		selectedIdx = Math.max(0, selectedIdx - 1);
 	};
@@ -76,8 +38,8 @@
 		selectedIdx = Math.min(selectedIdx + 1, filteredItems.length - 1);
 	};
 
-	const confirmSelect = async (doc) => {
-		dispatch('select', doc);
+	const confirmSelect = async (item) => {
+		dispatch('select', item);
 
 		prompt = removeLastWordFromString(prompt, command);
 		const chatInputElement = document.getElementById('chat-textarea');
@@ -108,55 +70,108 @@
 		chatInputElement?.focus();
 		await tick();
 	};
+
+	onMount(() => {
+		let legacy_documents = $knowledge.filter((item) => item?.meta?.document);
+		let legacy_collections =
+			legacy_documents.length > 0
+				? [
+						{
+							name: 'All Documents',
+							legacy: true,
+							type: 'collection',
+							description: 'Deprecated (legacy collection), please create a new knowledge base.',
+							title: $i18n.t('All Documents'),
+							collection_names: legacy_documents.map((item) => item.id)
+						},
+
+						...legacy_documents
+							.reduce((a, item) => {
+								return [...new Set([...a, ...(item?.meta?.tags ?? []).map((tag) => tag.name)])];
+							}, [])
+							.map((tag) => ({
+								name: tag,
+								legacy: true,
+								type: 'collection',
+								description: 'Deprecated (legacy collection), please create a new knowledge base.',
+								collection_names: legacy_documents
+									.filter((item) => (item?.meta?.tags ?? []).map((tag) => tag.name).includes(tag))
+									.map((item) => item.id)
+							}))
+					]
+				: [];
+
+		items = [...$knowledge, ...legacy_collections].map((item) => {
+			return {
+				...item,
+				...(item?.legacy || item?.meta?.legacy || item?.meta?.document ? { legacy: true } : {})
+			};
+		});
+
+		fuse = new Fuse(items, {
+			keys: ['name', 'description']
+		});
+	});
 </script>
 
 {#if filteredItems.length > 0 || prompt.split(' ')?.at(0)?.substring(1).startsWith('http')}
 	<div
 		id="commands-container"
-		class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
+		class="pl-2 pr-14 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
 	>
 		<div class="flex w-full dark:border dark:border-gray-850 rounded-lg">
 			<div class=" bg-gray-50 dark:bg-gray-850 w-10 rounded-l-lg text-center">
-				<div class=" text-lg font-semibold mt-2">#</div>
+				<div class=" text-lg font-medium mt-2">#</div>
 			</div>
 
 			<div
 				class="max-h-60 flex flex-col w-full rounded-r-xl bg-white dark:bg-gray-900 dark:text-gray-100"
 			>
 				<div class="m-1 overflow-y-auto p-1 rounded-r-xl space-y-0.5 scrollbar-hidden">
-					{#each filteredItems as doc, docIdx}
+					{#each filteredItems as item, idx}
 						<button
-							class=" px-3 py-1.5 rounded-xl w-full text-left {docIdx === selectedIdx
+							class=" px-3 py-1.5 rounded-xl w-full text-left {idx === selectedIdx
 								? ' bg-gray-50 dark:bg-gray-850 dark:text-gray-100 selected-command-option-button'
 								: ''}"
 							type="button"
 							on:click={() => {
-								console.log(doc);
-
-								confirmSelect(doc);
+								console.log(item);
+								confirmSelect(item);
 							}}
 							on:mousemove={() => {
-								selectedIdx = docIdx;
+								selectedIdx = idx;
 							}}
 							on:focus={() => {}}
 						>
-							{#if doc.type === 'collection'}
-								<div class=" font-medium text-black dark:text-gray-100 line-clamp-1">
-									{doc?.title ?? `#${doc.name}`}
-								</div>
-
-								<div class=" text-xs text-gray-600 dark:text-gray-100 line-clamp-1">
-									{$i18n.t('Collection')}
-								</div>
-							{:else}
-								<div class=" font-medium text-black dark:text-gray-100 line-clamp-1">
-									#{doc.name} ({doc.filename})
+							<div class=" font-medium text-black dark:text-gray-100 flex items-center gap-1">
+								{#if item.legacy}
+									<div
+										class="bg-gray-500/20 text-gray-700 dark:text-gray-200 rounded uppercase text-xs font-bold px-1"
+									>
+										Legacy
+									</div>
+								{:else if item?.meta?.document}
+									<div
+										class="bg-gray-500/20 text-gray-700 dark:text-gray-200 rounded uppercase text-xs font-bold px-1"
+									>
+										Document
+									</div>
+								{:else}
+									<div
+										class="bg-green-500/20 text-green-700 dark:text-green-200 rounded uppercase text-xs font-bold px-1"
+									>
+										Collection
+									</div>
+								{/if}
+
+								<div class="line-clamp-1">
+									{item.name}
 								</div>
+							</div>
 
-								<div class=" text-xs text-gray-600 dark:text-gray-100 line-clamp-1">
-									{doc.title}
-								</div>
-							{/if}
+							<div class=" text-xs text-gray-600 dark:text-gray-100 line-clamp-1">
+								{item?.description}
+							</div>
 						</button>
 					{/each}
 

+ 31 - 11
src/lib/components/chat/MessageInput/Commands/Models.svelte

@@ -1,4 +1,6 @@
 <script lang="ts">
+	import Fuse from 'fuse.js';
+
 	import { createEventDispatcher, onMount } from 'svelte';
 	import { tick, getContext } from 'svelte';
 
@@ -11,13 +13,31 @@
 	export let command = '';
 
 	let selectedIdx = 0;
-	let filteredModels = [];
+	let filteredItems = [];
+
+	let fuse = new Fuse(
+		$models
+			.filter((model) => !model?.info?.meta?.hidden)
+			.map((model) => {
+				const _item = {
+					...model,
+					modelName: model?.name,
+					tags: model?.info?.meta?.tags?.map((tag) => tag.name).join(' '),
+					desc: model?.info?.meta?.description
+				};
+				return _item;
+			}),
+		{
+			keys: ['value', 'tags', 'modelName'],
+			threshold: 0.3
+		}
+	);
 
-	$: filteredModels = $models
-		.filter((p) =>
-			p.name.toLowerCase().includes(command.toLowerCase().split(' ')?.at(0)?.substring(1) ?? '')
-		)
-		.sort((a, b) => a.name.localeCompare(b.name));
+	$: filteredItems = command.slice(1)
+		? fuse.search(command).map((e) => {
+				return e.item;
+			})
+		: $models.filter((model) => !model?.info?.meta?.hidden);
 
 	$: if (command) {
 		selectedIdx = 0;
@@ -28,7 +48,7 @@
 	};
 
 	export const selectDown = () => {
-		selectedIdx = Math.min(selectedIdx + 1, filteredModels.length - 1);
+		selectedIdx = Math.min(selectedIdx + 1, filteredItems.length - 1);
 	};
 
 	const confirmSelect = async (model) => {
@@ -45,21 +65,21 @@
 	});
 </script>
 
-{#if filteredModels.length > 0}
+{#if filteredItems.length > 0}
 	<div
 		id="commands-container"
-		class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
+		class="pl-2 pr-14 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
 	>
 		<div class="flex w-full dark:border dark:border-gray-850 rounded-lg">
 			<div class=" bg-gray-50 dark:bg-gray-850 w-10 rounded-l-lg text-center">
-				<div class=" text-lg font-semibold mt-2">@</div>
+				<div class=" text-lg font-medium mt-2">@</div>
 			</div>
 
 			<div
 				class="max-h-60 flex flex-col w-full rounded-r-lg bg-white dark:bg-gray-900 dark:text-gray-100"
 			>
 				<div class="m-1 overflow-y-auto p-1 rounded-r-lg space-y-0.5 scrollbar-hidden">
-					{#each filteredModels as model, modelIdx}
+					{#each filteredItems as model, modelIdx}
 						<button
 							class="px-3 py-1.5 rounded-xl w-full text-left {modelIdx === selectedIdx
 								? 'bg-gray-50 dark:bg-gray-850 selected-command-option-button'

+ 2 - 2
src/lib/components/chat/MessageInput/Commands/Prompts.svelte

@@ -132,11 +132,11 @@
 {#if filteredPrompts.length > 0}
 	<div
 		id="commands-container"
-		class="pl-1 pr-12 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
+		class="pl-2 pr-14 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
 	>
 		<div class="flex w-full dark:border dark:border-gray-850 rounded-lg">
 			<div class="  bg-gray-50 dark:bg-gray-850 w-10 rounded-l-lg text-center">
-				<div class=" text-lg font-semibold mt-2">/</div>
+				<div class=" text-lg font-medium mt-2">/</div>
 			</div>
 
 			<div

+ 0 - 118
src/lib/components/chat/MessageInput/Suggestions.svelte

@@ -1,118 +0,0 @@
-<script lang="ts">
-	import Bolt from '$lib/components/icons/Bolt.svelte';
-	import { onMount, getContext } from 'svelte';
-
-	const i18n = getContext('i18n');
-
-	export let submitPrompt: Function;
-	export let suggestionPrompts = [];
-
-	let prompts = [];
-
-	$: prompts = (suggestionPrompts ?? [])
-		.reduce((acc, current) => [...acc, ...[current]], [])
-		.sort(() => Math.random() - 0.5);
-	// suggestionPrompts.length <= 4
-	// 	? suggestionPrompts
-	// 	: suggestionPrompts.sort(() => Math.random() - 0.5).slice(0, 4);
-
-	onMount(() => {
-		const containerElement = document.getElementById('suggestions-container');
-
-		if (containerElement) {
-			containerElement.addEventListener('wheel', function (event) {
-				if (event.deltaY !== 0) {
-					// If scrolling vertically, prevent default behavior
-					event.preventDefault();
-					// Adjust horizontal scroll position based on vertical scroll
-					containerElement.scrollLeft += event.deltaY;
-				}
-			});
-		}
-	});
-</script>
-
-{#if prompts.length > 0}
-	<div class="mb-2 flex gap-1 text-sm font-medium items-center text-gray-400 dark:text-gray-600">
-		<Bolt />
-		{$i18n.t('Suggested')}
-	</div>
-{/if}
-
-<div class="w-full">
-	<div
-		class="relative w-full flex gap-2 snap-x snap-mandatory md:snap-none overflow-x-auto tabs"
-		id="suggestions-container"
-	>
-		{#each prompts as prompt, promptIdx}
-			<div class="snap-center shrink-0">
-				<button
-					class="flex flex-col flex-1 shrink-0 w-64 justify-between h-36 p-5 px-6 bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 rounded-3xl transition group"
-					on:click={() => {
-						submitPrompt(prompt.content);
-					}}
-				>
-					<div class="flex flex-col text-left">
-						{#if prompt.title && prompt.title[0] !== ''}
-							<div
-								class="  font-medium dark:text-gray-300 dark:group-hover:text-gray-200 transition"
-							>
-								{prompt.title[0]}
-							</div>
-							<div class="text-sm text-gray-600 font-normal line-clamp-2">{prompt.title[1]}</div>
-						{:else}
-							<div
-								class="  text-sm font-medium dark:text-gray-300 dark:group-hover:text-gray-100 transition line-clamp-2"
-							>
-								{prompt.content}
-							</div>
-						{/if}
-					</div>
-
-					<div class="w-full flex justify-between">
-						<div
-							class="text-xs text-gray-400 group-hover:text-gray-500 dark:text-gray-600 dark:group-hover:text-gray-500 transition self-center"
-						>
-							{$i18n.t('Prompt')}
-						</div>
-
-						<div
-							class="self-end p-1 rounded-lg text-gray-300 group-hover:text-gray-800 dark:text-gray-700 dark:group-hover:text-gray-100 transition"
-						>
-							<svg
-								xmlns="http://www.w3.org/2000/svg"
-								viewBox="0 0 16 16"
-								fill="currentColor"
-								class="size-4"
-							>
-								<path
-									fill-rule="evenodd"
-									d="M8 14a.75.75 0 0 1-.75-.75V4.56L4.03 7.78a.75.75 0 0 1-1.06-1.06l4.5-4.5a.75.75 0 0 1 1.06 0l4.5 4.5a.75.75 0 0 1-1.06 1.06L8.75 4.56v8.69A.75.75 0 0 1 8 14Z"
-									clip-rule="evenodd"
-								/>
-							</svg>
-						</div>
-					</div>
-				</button>
-			</div>
-		{/each}
-
-		<!-- <div class="snap-center shrink-0">
-		<img
-			class="shrink-0 w-80 h-40 rounded-lg shadow-xl bg-white"
-			src="https://images.unsplash.com/photo-1604999565976-8913ad2ddb7c?ixlib=rb-1.2.1&amp;ixid=MnwxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8&amp;auto=format&amp;fit=crop&amp;w=320&amp;h=160&amp;q=80"
-		/>
-	</div> -->
-	</div>
-</div>
-
-<style>
-	.tabs::-webkit-scrollbar {
-		display: none; /* for Chrome, Safari and Opera */
-	}
-
-	.tabs {
-		-ms-overflow-style: none; /* IE and Edge */
-		scrollbar-width: none; /* Firefox */
-	}
-</style>

+ 10 - 5
src/lib/components/chat/Messages.svelte

@@ -1,17 +1,19 @@
 <script lang="ts">
 	import { v4 as uuidv4 } from 'uuid';
 	import { chats, config, settings, user as _user, mobile, currentChatPage } from '$lib/stores';
-	import { tick, getContext, onMount } from 'svelte';
+	import { tick, getContext, onMount, createEventDispatcher } from 'svelte';
+	const dispatch = createEventDispatcher();
 
 	import { toast } from 'svelte-sonner';
 	import { getChatList, updateChatById } from '$lib/apis/chats';
 	import { copyToClipboard, findWordIndices } from '$lib/utils';
 
-	import Placeholder from './Messages/Placeholder.svelte';
 	import Message from './Messages/Message.svelte';
 	import Loader from '../common/Loader.svelte';
 	import Spinner from '../common/Spinner.svelte';
 
+	import ChatPlaceholder from './ChatPlaceholder.svelte';
+
 	const i18n = getContext('i18n');
 
 	export let chatId = '';
@@ -79,6 +81,7 @@
 
 	const updateChatHistory = async () => {
 		await tick();
+		history = history;
 		await updateChatById(localStorage.token, chatId, {
 			history: history,
 			messages: messages
@@ -307,9 +310,9 @@
 	};
 </script>
 
-<div class="h-full flex">
+<div class="h-full flex pt-8">
 	{#if Object.keys(history?.messages ?? {}).length == 0}
-		<Placeholder
+		<ChatPlaceholder
 			modelIds={selectedModels}
 			submitPrompt={async (p) => {
 				let text = p;
@@ -382,8 +385,10 @@
 							{continueResponse}
 							{mergeResponses}
 							{readOnly}
+							on:submit={async (e) => {
+								dispatch('submit', e.detail);
+							}}
 							on:action={async (e) => {
-								const message = history.messages[message.id];
 								if (typeof e.detail === 'string') {
 									await chatActionHandler(chatId, e.detail, message.model, message.id);
 								} else {

+ 113 - 65
src/lib/components/chat/Messages/CodeBlock.svelte

@@ -5,21 +5,34 @@
 
 	import { v4 as uuidv4 } from 'uuid';
 
-	import { getContext, getAllContexts, onMount } from 'svelte';
+	import { getContext, getAllContexts, onMount, tick, createEventDispatcher } from 'svelte';
 	import { copyToClipboard } from '$lib/utils';
 
 	import 'highlight.js/styles/github-dark.min.css';
 
 	import PyodideWorker from '$lib/workers/pyodide.worker?worker';
+	import CodeEditor from '$lib/components/common/CodeEditor.svelte';
+	import SvgPanZoom from '$lib/components/common/SVGPanZoom.svelte';
 
 	const i18n = getContext('i18n');
+	const dispatch = createEventDispatcher();
 
 	export let id = '';
+	export let save = false;
 
 	export let token;
 	export let lang = '';
 	export let code = '';
 
+	let _code = '';
+	$: if (code) {
+		updateCode();
+	}
+
+	const updateCode = () => {
+		_code = code;
+	};
+
 	let _token = null;
 
 	let mermaidHtml = null;
@@ -32,6 +45,18 @@
 	let result = null;
 
 	let copied = false;
+	let saved = false;
+
+	const saveCode = () => {
+		saved = true;
+
+		code = _code;
+		dispatch('save', code);
+
+		setTimeout(() => {
+			saved = false;
+		}, 1000);
+	};
 
 	const copyCode = async () => {
 		copied = true;
@@ -233,22 +258,11 @@ __builtins__.input = input`);
 			(async () => {
 				await drawMermaidDiagram();
 			})();
-		} else {
-			// Function to perform the code highlighting
-			const highlightCode = () => {
-				highlightedCode = hljs.highlightAuto(code, hljs.getLanguage(lang)?.aliases).value || code;
-			};
-
-			// Clear the previous timeout if it exists
-			clearTimeout(debounceTimeout);
-			// Set a new timeout to debounce the code highlighting
-			debounceTimeout = setTimeout(highlightCode, 10);
 		}
 	};
 
 	$: if (token) {
 		if (JSON.stringify(token) !== JSON.stringify(_token)) {
-			console.log('hi');
 			_token = token;
 		}
 	}
@@ -257,8 +271,14 @@ __builtins__.input = input`);
 		render();
 	}
 
+	$: dispatch('code', { lang, code });
+
 	onMount(async () => {
 		console.log('codeblock', lang, code);
+
+		if (lang) {
+			dispatch('code', { lang, code });
+		}
 		if (document.documentElement.classList.contains('dark')) {
 			mermaid.initialize({
 				startOnLoad: true,
@@ -275,64 +295,92 @@ __builtins__.input = input`);
 	});
 </script>
 
-<div class="my-2" dir="ltr">
-	{#if lang === 'mermaid'}
-		{#if mermaidHtml}
-			{@html `${mermaidHtml}`}
+<div>
+	<div class="relative my-2 flex flex-col rounded-lg" dir="ltr">
+		{#if lang === 'mermaid'}
+			{#if mermaidHtml}
+				<SvgPanZoom
+					className=" border border-gray-50 dark:border-gray-850 rounded-lg max-h-fit overflow-hidden"
+					svg={mermaidHtml}
+				/>
+			{:else}
+				<pre class="mermaid">{code}</pre>
+			{/if}
 		{:else}
-			<pre class="mermaid">{code}</pre>
-		{/if}
-	{:else}
-		<div
-			class="flex justify-between bg-[#202123] text-white text-xs px-4 pt-1 pb-0.5 rounded-t-lg overflow-x-auto"
-		>
-			<div class="p-1">{lang}</div>
-
-			<div class="flex items-center">
-				{#if lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code))}
-					{#if executing}
-						<div class="copy-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
-					{:else}
+			<div class="text-text-300 absolute pl-4 py-1.5 text-xs font-medium dark:text-white">
+				{lang}
+			</div>
+
+			<div
+				class="sticky top-8 mb-1 py-1 pr-2.5 flex items-center justify-end z-10 text-xs text-black dark:text-white"
+			>
+				<div class="flex items-center gap-0.5 translate-y-[1px]">
+					{#if lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code))}
+						{#if executing}
+							<div class="run-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
+						{:else}
+							<button
+								class="run-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"
+								on:click={async () => {
+									code = _code;
+									await tick();
+									executePython(code);
+								}}>{$i18n.t('Run')}</button
+							>
+						{/if}
+					{/if}
+
+					{#if save}
 						<button
-							class="copy-code-button bg-none border-none p-1"
-							on:click={() => {
-								executePython(code);
-							}}>{$i18n.t('Run')}</button
+							class="save-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"
+							on:click={saveCode}
 						>
+							{saved ? $i18n.t('Saved') : $i18n.t('Save')}
+						</button>
 					{/if}
-				{/if}
-				<button class="copy-code-button bg-none border-none p-1" on:click={copyCode}
-					>{copied ? $i18n.t('Copied') : $i18n.t('Copy Code')}</button
-				>
-			</div>
-		</div>
-
-		<pre
-			class=" hljs p-4 px-5 overflow-x-auto"
-			style="border-top-left-radius: 0px; border-top-right-radius: 0px; {(executing ||
-				stdout ||
-				stderr ||
-				result) &&
-				'border-bottom-left-radius: 0px; border-bottom-right-radius: 0px;'}"><code
-				class="language-{lang} rounded-t-none whitespace-pre"
-				>{#if highlightedCode}{@html highlightedCode}{:else}{code}{/if}</code
-			></pre>
-
-		<div
-			id="plt-canvas-{id}"
-			class="bg-[#202123] text-white max-w-full overflow-x-auto scrollbar-hidden"
-		/>
-
-		{#if executing}
-			<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
-				<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
-				<div class="text-sm">Running...</div>
+
+					<button
+						class="copy-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"
+						on:click={copyCode}>{copied ? $i18n.t('Copied') : $i18n.t('Copy')}</button
+					>
+				</div>
 			</div>
-		{:else if stdout || stderr || result}
-			<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
-				<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
-				<div class="text-sm">{stdout || stderr || result}</div>
+
+			<div
+				class="language-{lang} rounded-t-lg -mt-8 {executing || stdout || stderr || result
+					? ''
+					: 'rounded-b-lg'} overflow-hidden"
+			>
+				<div class=" pt-7 bg-gray-50 dark:bg-gray-850"></div>
+				<CodeEditor
+					value={code}
+					{id}
+					{lang}
+					on:save={() => {
+						saveCode();
+					}}
+					on:change={(e) => {
+						_code = e.detail.value;
+					}}
+				/>
 			</div>
+
+			<div
+				id="plt-canvas-{id}"
+				class="bg-[#202123] text-white max-w-full overflow-x-auto scrollbar-hidden"
+			/>
+
+			{#if executing}
+				<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
+					<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
+					<div class="text-sm">Running...</div>
+				</div>
+			{:else if stdout || stderr || result}
+				<div class="bg-[#202123] text-white px-4 py-4 rounded-b-lg">
+					<div class=" text-gray-500 text-xs mb-1">STDOUT/STDERR</div>
+					<div class="text-sm">{stdout || stderr || result}</div>
+				</div>
+			{/if}
 		{/if}
-	{/if}
+	</div>
 </div>

+ 208 - 0
src/lib/components/chat/Messages/ContentRenderer.svelte

@@ -0,0 +1,208 @@
+<script>
+	import { onDestroy, onMount, tick, getContext, createEventDispatcher } from 'svelte';
+	const i18n = getContext('i18n');
+	const dispatch = createEventDispatcher();
+
+	import Markdown from './Markdown.svelte';
+	import LightBlub from '$lib/components/icons/LightBlub.svelte';
+	import { chatId, mobile, showArtifacts, showControls, showOverview } from '$lib/stores';
+	import ChatBubble from '$lib/components/icons/ChatBubble.svelte';
+
+	export let id;
+	export let content;
+	export let model = null;
+
+	export let save = false;
+	export let floatingButtons = true;
+
+	let contentContainerElement;
+	let buttonsContainerElement;
+
+	let selectedText = '';
+	let floatingInput = false;
+	let floatingInputValue = '';
+
+	const updateButtonPosition = (event) => {
+		setTimeout(async () => {
+			await tick();
+
+			// Check if the event target is within the content container
+			if (!contentContainerElement?.contains(event.target)) return;
+
+			let selection = window.getSelection();
+
+			if (selection.toString().trim().length > 0) {
+				floatingInput = false;
+				const range = selection.getRangeAt(0);
+				const rect = range.getBoundingClientRect();
+
+				// Calculate position relative to the viewport (now that it's in document.body)
+				const top = rect.bottom + window.scrollY;
+				const left = rect.left + window.scrollX;
+
+				if (buttonsContainerElement) {
+					buttonsContainerElement.style.display = 'block';
+					buttonsContainerElement.style.left = `${left}px`;
+					buttonsContainerElement.style.top = `${top + 5}px`; // +5 to add some spacing
+				}
+			} else {
+				if (buttonsContainerElement) {
+					buttonsContainerElement.style.display = 'none';
+					selectedText = '';
+					floatingInput = false;
+					floatingInputValue = '';
+				}
+			}
+		}, 0);
+	};
+
+	const selectAskHandler = () => {
+		dispatch('select', {
+			type: 'ask',
+			content: selectedText,
+			input: floatingInputValue
+		});
+
+		floatingInput = false;
+		floatingInputValue = '';
+		selectedText = '';
+
+		// Clear selection
+		window.getSelection().removeAllRanges();
+		buttonsContainerElement.style.display = 'none';
+	};
+
+	onMount(() => {
+		if (floatingButtons) {
+			contentContainerElement?.addEventListener('mouseup', updateButtonPosition);
+			document.addEventListener('mouseup', updateButtonPosition);
+		}
+	});
+
+	onDestroy(() => {
+		if (floatingButtons) {
+			contentContainerElement?.removeEventListener('mouseup', updateButtonPosition);
+			document.removeEventListener('mouseup', updateButtonPosition);
+		}
+	});
+
+	$: if (floatingButtons) {
+		if (buttonsContainerElement) {
+			document.body.appendChild(buttonsContainerElement);
+		}
+	}
+
+	onDestroy(() => {
+		if (buttonsContainerElement) {
+			document.body.removeChild(buttonsContainerElement);
+		}
+	});
+</script>
+
+<div bind:this={contentContainerElement}>
+	<Markdown
+		{id}
+		{content}
+		{model}
+		{save}
+		on:update={(e) => {
+			dispatch('update', e.detail);
+		}}
+		on:code={(e) => {
+			const { lang, code } = e.detail;
+
+			if (
+				(['html', 'svg'].includes(lang) || (lang === 'xml' && code.includes('svg'))) &&
+				!$mobile &&
+				$chatId
+			) {
+				showArtifacts.set(true);
+				showControls.set(true);
+			}
+		}}
+	/>
+</div>
+
+{#if floatingButtons}
+	<div
+		bind:this={buttonsContainerElement}
+		class="absolute rounded-lg mt-1 text-xs z-[9999]"
+		style="display: none"
+	>
+		{#if !floatingInput}
+			<div
+				class="flex flex-row gap-0.5 shrink-0 p-1 bg-white dark:bg-gray-850 dark:text-gray-100 text-medium shadow-xl"
+			>
+				<button
+					class="px-1 hover:bg-gray-50 dark:hover:bg-gray-800 rounded flex items-center gap-1 min-w-fit"
+					on:click={() => {
+						selectedText = window.getSelection().toString();
+						floatingInput = true;
+					}}
+				>
+					<ChatBubble className="size-3 shrink-0" />
+
+					<div class="shrink-0">Ask</div>
+				</button>
+				<button
+					class="px-1 hover:bg-gray-50 dark:hover:bg-gray-800 rounded flex items-center gap-1 min-w-fit"
+					on:click={() => {
+						const selection = window.getSelection();
+						dispatch('select', {
+							type: 'explain',
+							content: selection.toString()
+						});
+
+						// Clear selection
+						selection.removeAllRanges();
+						buttonsContainerElement.style.display = 'none';
+					}}
+				>
+					<LightBlub className="size-3 shrink-0" />
+
+					<div class="shrink-0">Explain</div>
+				</button>
+			</div>
+		{:else}
+			<div
+				class="py-1 flex dark:text-gray-100 bg-gray-50 dark:bg-gray-800 border dark:border-gray-800 w-72 rounded-full shadow-xl"
+			>
+				<input
+					type="text"
+					class="ml-5 bg-transparent outline-none w-full flex-1 text-sm"
+					placeholder={$i18n.t('Ask a question')}
+					bind:value={floatingInputValue}
+					on:keydown={(e) => {
+						if (e.key === 'Enter') {
+							selectAskHandler();
+						}
+					}}
+				/>
+
+				<div class="ml-1 mr-2">
+					<button
+						class="{floatingInputValue !== ''
+							? 'bg-black text-white hover:bg-gray-900 dark:bg-white dark:text-black dark:hover:bg-gray-100 '
+							: 'text-white bg-gray-200 dark:text-gray-900 dark:bg-gray-700 disabled'} transition rounded-full p-1.5 m-0.5 self-center"
+						on:click={() => {
+							selectAskHandler();
+						}}
+					>
+						<svg
+							xmlns="http://www.w3.org/2000/svg"
+							viewBox="0 0 16 16"
+							fill="currentColor"
+							class="size-4"
+						>
+							<path
+								fill-rule="evenodd"
+								d="M8 14a.75.75 0 0 1-.75-.75V4.56L4.03 7.78a.75.75 0 0 1-1.06-1.06l4.5-4.5a.75.75 0 0 1 1.06 0l4.5 4.5a.75.75 0 0 1-1.06 1.06L8.75 4.56v8.69A.75.75 0 0 1 8 14Z"
+								clip-rule="evenodd"
+							/>
+						</svg>
+					</button>
+				</div>
+			</div>
+		{/if}
+	</div>
+{/if}

+ 8 - 19
src/lib/components/chat/Messages/Error.svelte

@@ -1,26 +1,15 @@
 <script lang="ts">
+	import Info from '$lib/components/icons/Info.svelte';
+
 	export let content = '';
 </script>
 
-<div
-	class="flex mt-2 mb-4 space-x-2 border px-4 py-3 border-red-800 bg-red-800/30 font-medium rounded-lg"
->
-	<svg
-		xmlns="http://www.w3.org/2000/svg"
-		fill="none"
-		viewBox="0 0 24 24"
-		stroke-width="1.5"
-		stroke="currentColor"
-		class="w-5 h-5 self-center"
-	>
-		<path
-			stroke-linecap="round"
-			stroke-linejoin="round"
-			d="M12 9v3.75m9-.75a9 9 0 11-18 0 9 9 0 0118 0zm-9 3.75h.008v.008H12v-.008z"
-		/>
-	</svg>
+<div class="flex my-2 gap-2.5 border px-4 py-3 border-red-800 bg-red-800/30 rounded-lg">
+	<div class=" self-start mt-0.5">
+		<Info className="size-5" />
+	</div>
 
-	<div class=" self-center">
-		{content}
+	<div class=" self-center text-sm">
+		{typeof content === 'string' ? content : JSON.stringify(content)}
 	</div>
 </div>

+ 20 - 3
src/lib/components/chat/Messages/Markdown.svelte

@@ -1,14 +1,20 @@
 <script>
 	import { marked } from 'marked';
-	import markedKatex from '$lib/utils/marked/katex-extension';
 	import { replaceTokens, processResponseContent } from '$lib/utils';
 	import { user } from '$lib/stores';
 
+	import markedExtension from '$lib/utils/marked/extension';
+	import markedKatexExtension from '$lib/utils/marked/katex-extension';
+
 	import MarkdownTokens from './Markdown/MarkdownTokens.svelte';
+	import { createEventDispatcher } from 'svelte';
+
+	const dispatch = createEventDispatcher();
 
 	export let id;
 	export let content;
 	export let model = null;
+	export let save = false;
 
 	let tokens = [];
 
@@ -16,7 +22,8 @@
 		throwOnError: false
 	};
 
-	marked.use(markedKatex(options));
+	marked.use(markedKatexExtension(options));
+	marked.use(markedExtension(options));
 
 	$: (async () => {
 		if (content) {
@@ -28,5 +35,15 @@
 </script>
 
 {#key id}
-	<MarkdownTokens {tokens} {id} />
+	<MarkdownTokens
+		{tokens}
+		{id}
+		{save}
+		on:update={(e) => {
+			dispatch('update', e.detail);
+		}}
+		on:code={(e) => {
+			dispatch('code', e.detail);
+		}}
+	/>
 {/key}

+ 35 - 8
src/lib/components/chat/Messages/Markdown/MarkdownTokens.svelte

@@ -1,18 +1,24 @@
 <script lang="ts">
 	import DOMPurify from 'dompurify';
-	import { onMount } from 'svelte';
+	import { createEventDispatcher, onMount } from 'svelte';
 	import { marked, type Token } from 'marked';
 	import { revertSanitizedResponseContent, unescapeHtml } from '$lib/utils';
 
+	import { WEBUI_BASE_URL } from '$lib/constants';
+
 	import CodeBlock from '$lib/components/chat/Messages/CodeBlock.svelte';
 	import MarkdownInlineTokens from '$lib/components/chat/Messages/Markdown/MarkdownInlineTokens.svelte';
 	import KatexRenderer from './KatexRenderer.svelte';
-	import { WEBUI_BASE_URL } from '$lib/constants';
+	import Collapsible from '$lib/components/common/Collapsible.svelte';
+
+	const dispatch = createEventDispatcher();
 
 	export let id: string;
 	export let tokens: Token[];
 	export let top = true;
 
+	export let save = false;
+
 	const headerComponent = (depth: number) => {
 		return 'h' + depth;
 	};
@@ -27,12 +33,27 @@
 			<MarkdownInlineTokens id={`${id}-${tokenIdx}-h`} tokens={token.tokens} />
 		</svelte:element>
 	{:else if token.type === 'code'}
-		<CodeBlock
-			id={`${id}-${tokenIdx}`}
-			{token}
-			lang={token?.lang ?? ''}
-			code={revertSanitizedResponseContent(token?.text ?? '')}
-		/>
+		{#if token.raw.includes('```')}
+			<CodeBlock
+				id={`${id}-${tokenIdx}`}
+				{token}
+				lang={token?.lang ?? ''}
+				code={revertSanitizedResponseContent(token?.text ?? '')}
+				{save}
+				on:code={(e) => {
+					dispatch('code', e.detail);
+				}}
+				on:save={(e) => {
+					dispatch('update', {
+						raw: token.raw,
+						oldContent: token.text,
+						newContent: e.detail
+					});
+				}}
+			/>
+		{:else}
+			{token.text}
+		{/if}
 	{:else if token.type === 'table'}
 		<div class="scrollbar-hidden relative whitespace-nowrap overflow-x-auto max-w-full">
 			<table class="w-full">
@@ -94,6 +115,12 @@
 				{/each}
 			</ul>
 		{/if}
+	{:else if token.type === 'details'}
+		<Collapsible title={token.summary} className="w-fit space-y-1">
+			<div class=" mb-1.5" slot="content">
+				<svelte:self id={`${id}-${tokenIdx}-d`} tokens={marked.lexer(token.text)} />
+			</div>
+		</Collapsible>
 	{:else if token.type === 'html'}
 		{@const html = DOMPurify.sanitize(token.text)}
 		{#if html && html.includes('<video')}

+ 6 - 0
src/lib/components/chat/Messages/Message.svelte

@@ -76,6 +76,9 @@
 				{rateMessage}
 				{continueResponse}
 				{regenerateResponse}
+				on:submit={async (e) => {
+					dispatch('submit', e.detail);
+				}}
 				on:action={async (e) => {
 					dispatch('action', e.detail);
 				}}
@@ -106,6 +109,9 @@
 				{continueResponse}
 				{regenerateResponse}
 				{mergeResponses}
+				on:submit={async (e) => {
+					dispatch('submit', e.detail);
+				}}
 				on:action={async (e) => {
 					dispatch('action', e.detail);
 				}}

+ 8 - 4
src/lib/components/chat/Messages/MultiResponseMessages.svelte

@@ -186,12 +186,13 @@
 								}`} transition-all p-5 rounded-2xl"
 						on:click={() => {
 							if (messageId != _messageId) {
-								let messageChildrenIds = history.messages[_messageId].childrenIds;
+								let currentMessageId = _messageId;
+								let messageChildrenIds = history.messages[currentMessageId].childrenIds;
 								while (messageChildrenIds.length !== 0) {
-									messageId = messageChildrenIds.at(-1);
-									messageChildrenIds = history.messages[_messageId].childrenIds;
+									currentMessageId = messageChildrenIds.at(-1);
+									messageChildrenIds = history.messages[currentMessageId].childrenIds;
 								}
-								history.currentId = _messageId;
+								history.currentId = currentMessageId;
 								dispatch('change');
 							}
 						}}
@@ -214,6 +215,9 @@
 										groupedMessageIdsIdx[modelIdx] =
 											groupedMessageIds[modelIdx].messageIds.length - 1;
 									}}
+									on:submit={async (e) => {
+										dispatch('submit', e.detail);
+									}}
 									on:action={async (e) => {
 										dispatch('action', e.detail);
 									}}

+ 78 - 27
src/lib/components/chat/Messages/ResponseMessage.svelte

@@ -18,7 +18,8 @@
 		extractParagraphsForAudio,
 		extractSentencesForAudio,
 		cleanText,
-		getMessageContentParts
+		getMessageContentParts,
+		sanitizeResponseContent
 	} from '$lib/utils';
 	import { WEBUI_BASE_URL } from '$lib/constants';
 
@@ -37,6 +38,7 @@
 
 	import type { Writable } from 'svelte/store';
 	import type { i18n as i18nType } from 'i18next';
+	import ContentRenderer from './ContentRenderer.svelte';
 
 	interface MessageType {
 		id: string;
@@ -73,6 +75,7 @@
 			prompt_eval_duration?: number;
 			total_duration?: number;
 			load_duration?: number;
+			usage?: unknown;
 		};
 		annotation?: { type: string; rating: number };
 	}
@@ -339,7 +342,7 @@
 				($i18n.language === 'dg-DG' ? `/doge.png` : `${WEBUI_BASE_URL}/static/favicon.png`)}
 		/>
 
-		<div class="w-full overflow-hidden pl-1">
+		<div class="flex-auto w-0 pl-1">
 			<Name>
 				{model?.name ?? message.model}
 
@@ -466,13 +469,44 @@
 								</div>
 							</div>
 						{:else}
-							<div class="w-full flex flex-col">
+							<div class="w-full flex flex-col relative" id="response-content-container">
 								{#if message.content === '' && !message.error}
 									<Skeleton />
 								{:else if message.content && message.error !== true}
 									<!-- always show message contents even if there's an error -->
 									<!-- unless message.error === true which is legacy error handling, where the error message is stored in message.content -->
-									<Markdown id={message.id} content={message.content} {model} />
+									<ContentRenderer
+										id={message.id}
+										content={message.content}
+										floatingButtons={message?.done}
+										save={true}
+										{model}
+										on:update={(e) => {
+											const { raw, oldContent, newContent } = e.detail;
+
+											history.messages[message.id].content = history.messages[
+												message.id
+											].content.replace(raw, raw.replace(oldContent, newContent));
+
+											dispatch('update');
+										}}
+										on:select={(e) => {
+											const { type, content } = e.detail;
+
+											if (type === 'explain') {
+												dispatch('submit', {
+													parentId: message.id,
+													prompt: `Explain this section to me in more detail\n\n\`\`\`\n${content}\n\`\`\``
+												});
+											} else if (type === 'ask') {
+												const input = e.detail?.input ?? '';
+												dispatch('submit', {
+													parentId: message.id,
+													prompt: `\`\`\`\n${content}\n\`\`\`\n${input}`
+												});
+											}
+										}}
+									/>
 								{/if}
 
 								{#if message.error}
@@ -621,30 +655,32 @@
 												fill="currentColor"
 												viewBox="0 0 24 24"
 												xmlns="http://www.w3.org/2000/svg"
-												><style>
+											>
+												<style>
 													.spinner_S1WN {
 														animation: spinner_MGfb 0.8s linear infinite;
 														animation-delay: -0.8s;
 													}
+
 													.spinner_Km9P {
 														animation-delay: -0.65s;
 													}
+
 													.spinner_JApP {
 														animation-delay: -0.5s;
 													}
+
 													@keyframes spinner_MGfb {
 														93.75%,
 														100% {
 															opacity: 0.2;
 														}
 													}
-												</style><circle class="spinner_S1WN" cx="4" cy="12" r="3" /><circle
-													class="spinner_S1WN spinner_Km9P"
-													cx="12"
-													cy="12"
-													r="3"
-												/><circle class="spinner_S1WN spinner_JApP" cx="20" cy="12" r="3" /></svg
-											>
+												</style>
+												<circle class="spinner_S1WN" cx="4" cy="12" r="3" />
+												<circle class="spinner_S1WN spinner_Km9P" cx="12" cy="12" r="3" />
+												<circle class="spinner_S1WN spinner_JApP" cx="20" cy="12" r="3" />
+											</svg>
 										{:else if speaking}
 											<svg
 												xmlns="http://www.w3.org/2000/svg"
@@ -697,30 +733,32 @@
 													fill="currentColor"
 													viewBox="0 0 24 24"
 													xmlns="http://www.w3.org/2000/svg"
-													><style>
+												>
+													<style>
 														.spinner_S1WN {
 															animation: spinner_MGfb 0.8s linear infinite;
 															animation-delay: -0.8s;
 														}
+
 														.spinner_Km9P {
 															animation-delay: -0.65s;
 														}
+
 														.spinner_JApP {
 															animation-delay: -0.5s;
 														}
+
 														@keyframes spinner_MGfb {
 															93.75%,
 															100% {
 																opacity: 0.2;
 															}
 														}
-													</style><circle class="spinner_S1WN" cx="4" cy="12" r="3" /><circle
-														class="spinner_S1WN spinner_Km9P"
-														cx="12"
-														cy="12"
-														r="3"
-													/><circle class="spinner_S1WN spinner_JApP" cx="20" cy="12" r="3" /></svg
-												>
+													</style>
+													<circle class="spinner_S1WN" cx="4" cy="12" r="3" />
+													<circle class="spinner_S1WN spinner_Km9P" cx="12" cy="12" r="3" />
+													<circle class="spinner_S1WN spinner_JApP" cx="20" cy="12" r="3" />
+												</svg>
 											{:else}
 												<svg
 													xmlns="http://www.w3.org/2000/svg"
@@ -744,7 +782,17 @@
 								{#if message.info}
 									<Tooltip
 										content={message.info.openai
-											? `prompt_tokens: ${message.info.prompt_tokens ?? 'N/A'}<br/>
+											? message.info.usage
+												? `<pre>${sanitizeResponseContent(
+														JSON.stringify(message.info.usage, null, 2)
+															.replace(/"([^(")"]+)":/g, '$1:')
+															.slice(1, -1)
+															.split('\n')
+															.map((line) => line.slice(2))
+															.map((line) => (line.endsWith(',') ? line.slice(0, -1) : line))
+															.join('\n')
+													)}</pre>`
+												: `prompt_tokens: ${message.info.prompt_tokens ?? 'N/A'}<br/>
 													completion_tokens: ${message.info.completion_tokens ?? 'N/A'}<br/>
 													total_tokens: ${message.info.total_tokens ?? 'N/A'}`
 											: `response_token/s: ${
@@ -854,10 +902,11 @@
 													stroke-linejoin="round"
 													class="w-4 h-4"
 													xmlns="http://www.w3.org/2000/svg"
-													><path
-														d="M14 9V5a3 3 0 0 0-3-3l-4 9v11h11.28a2 2 0 0 0 2-1.7l1.38-9a2 2 0 0 0-2-2.3zM7 22H4a2 2 0 0 1-2-2v-7a2 2 0 0 1 2-2h3"
-													/></svg
 												>
+													<path
+														d="M14 9V5a3 3 0 0 0-3-3l-4 9v11h11.28a2 2 0 0 0 2-1.7l1.38-9a2 2 0 0 0-2-2.3zM7 22H4a2 2 0 0 1-2-2v-7a2 2 0 0 1 2-2h3"
+													/>
+												</svg>
 											</button>
 										</Tooltip>
 
@@ -903,10 +952,11 @@
 													stroke-linejoin="round"
 													class="w-4 h-4"
 													xmlns="http://www.w3.org/2000/svg"
-													><path
-														d="M10 15v4a3 3 0 0 0 3 3l4-9V2H5.72a2 2 0 0 0-2 1.7l-1.38 9a2 2 0 0 0 2 2.3zm7-13h2.67A2.31 2.31 0 0 1 22 4v7a2.31 2.31 0 0 1-2.33 2H17"
-													/></svg
 												>
+													<path
+														d="M10 15v4a3 3 0 0 0 3 3l4-9V2H5.72a2 2 0 0 0-2 1.7l-1.38 9a2 2 0 0 0 2 2.3zm7-13h2.67A2.31 2.31 0 0 1 22 4v7a2.31 2.31 0 0 1-2.33 2H17"
+													/>
+												</svg>
 											</button>
 										</Tooltip>
 									{/if}
@@ -1079,6 +1129,7 @@
 		-ms-overflow-style: none; /* IE and Edge */
 		scrollbar-width: none; /* Firefox */
 	}
+
 	@keyframes shimmer {
 		0% {
 			background-position: 200% 0;

+ 2 - 1
src/lib/components/chat/Messages/UserMessage.svelte

@@ -94,7 +94,7 @@
 				: (user?.profile_image_url ?? '/user.png')}
 		/>
 	{/if}
-	<div class="w-full overflow-hidden pl-1">
+	<div class="w-full w-0 pl-1">
 		{#if !($settings?.chatBubble ?? true)}
 			<div>
 				<Name>
@@ -127,6 +127,7 @@
 								<img src={file.url} alt="input" class=" max-h-96 rounded-lg" draggable="false" />
 							{:else}
 								<FileItem
+									item={file}
 									url={file.url}
 									name={file.name}
 									type={file.type}

+ 4 - 1
src/lib/components/chat/Overview.svelte

@@ -38,13 +38,16 @@
 
 	$: if (history && history.currentId) {
 		focusNode();
-		selectedMessageId = null;
 	}
 
 	const focusNode = async () => {
 		if (selectedMessageId === null) {
 			await fitView({ nodes: [{ id: history.currentId }] });
+		} else {
+			await fitView({ nodes: [{ id: selectedMessageId }] });
 		}
+
+		selectedMessageId = null;
 	};
 
 	const drawFlow = async () => {

+ 227 - 0
src/lib/components/chat/Placeholder.svelte

@@ -0,0 +1,227 @@
+<script lang="ts">
+	import { toast } from 'svelte-sonner';
+	import { marked } from 'marked';
+
+	import { onMount, getContext, tick, createEventDispatcher } from 'svelte';
+	import { blur, fade } from 'svelte/transition';
+
+	const dispatch = createEventDispatcher();
+
+	import { config, user, models as _models, temporaryChatEnabled } from '$lib/stores';
+	import { sanitizeResponseContent, findWordIndices } from '$lib/utils';
+	import { WEBUI_BASE_URL } from '$lib/constants';
+
+	import Suggestions from './Suggestions.svelte';
+	import Tooltip from '$lib/components/common/Tooltip.svelte';
+	import EyeSlash from '$lib/components/icons/EyeSlash.svelte';
+	import MessageInput from './MessageInput.svelte';
+
+	const i18n = getContext('i18n');
+
+	export let transparentBackground = false;
+
+	export let createMessagePair: Function;
+	export let stopResponse: Function;
+
+	export let autoScroll = false;
+
+	export let atSelectedModel: Model | undefined;
+	export let selectedModels: [''];
+
+	export let history;
+
+	export let prompt = '';
+	export let files = [];
+	export let availableToolIds = [];
+	export let selectedToolIds = [];
+	export let webSearchEnabled = false;
+
+	let models = [];
+
+	const selectSuggestionPrompt = async (p) => {
+		let text = p;
+
+		if (p.includes('{{CLIPBOARD}}')) {
+			const clipboardText = await navigator.clipboard.readText().catch((err) => {
+				toast.error($i18n.t('Failed to read clipboard contents'));
+				return '{{CLIPBOARD}}';
+			});
+
+			text = p.replaceAll('{{CLIPBOARD}}', clipboardText);
+
+			console.log('Clipboard text:', clipboardText, text);
+		}
+
+		prompt = text;
+
+		console.log(prompt);
+		await tick();
+
+		const chatInputElement = document.getElementById('chat-textarea');
+		if (chatInputElement) {
+			chatInputElement.style.height = '';
+			chatInputElement.style.height = Math.min(chatInputElement.scrollHeight, 200) + 'px';
+			chatInputElement.focus();
+
+			const words = findWordIndices(prompt);
+
+			if (words.length > 0) {
+				const word = words.at(0);
+				chatInputElement.setSelectionRange(word?.startIndex, word.endIndex + 1);
+			}
+		}
+
+		await tick();
+	};
+
+	let mounted = false;
+	let selectedModelIdx = 0;
+
+	$: if (selectedModels.length > 0) {
+		selectedModelIdx = models.length - 1;
+	}
+
+	$: models = selectedModels.map((id) => $_models.find((m) => m.id === id));
+
+	onMount(() => {
+		mounted = true;
+	});
+</script>
+
+{#key mounted}
+	<div class="m-auto w-full max-w-6xl px-2 xl:px-20 translate-y-6 text-center">
+		{#if $temporaryChatEnabled}
+			<Tooltip
+				content="This chat won't appear in history and your messages will not be saved."
+				className="w-full flex justify-center mb-0.5"
+				placement="top"
+			>
+				<div class="flex items-center gap-2 text-gray-500 font-medium text-lg my-2 w-fit">
+					<EyeSlash strokeWidth="2.5" className="size-5" /> Temporary Chat
+				</div>
+			</Tooltip>
+		{/if}
+
+		<div
+			class="w-full text-3xl text-gray-800 dark:text-gray-100 font-medium text-center flex items-center gap-4 font-primary"
+		>
+			<div class="w-full flex flex-col justify-center items-center">
+				<div class="flex flex-col md:flex-row justify-center gap-2 md:gap-3.5 w-fit">
+					<div class="flex flex-shrink-0 justify-center">
+						<div class="flex -space-x-4 mb-0.5" in:fade={{ duration: 100 }}>
+							{#each models as model, modelIdx}
+								<Tooltip
+									content={(models[modelIdx]?.info?.meta?.tags ?? [])
+										.map((tag) => tag.name.toUpperCase())
+										.join(', ')}
+									placement="top"
+								>
+									<button
+										on:click={() => {
+											selectedModelIdx = modelIdx;
+										}}
+									>
+										<img
+											crossorigin="anonymous"
+											src={model?.info?.meta?.profile_image_url ??
+												($i18n.language === 'dg-DG'
+													? `/doge.png`
+													: `${WEBUI_BASE_URL}/static/favicon.png`)}
+											class=" size-[2.5rem] rounded-full border-[1px] border-gray-200 dark:border-none"
+											alt="logo"
+											draggable="false"
+										/>
+									</button>
+								</Tooltip>
+							{/each}
+						</div>
+					</div>
+
+					<div class=" capitalize line-clamp-1 text-3xl md:text-4xl" in:fade={{ duration: 100 }}>
+						{#if models[selectedModelIdx]?.info}
+							{models[selectedModelIdx]?.info?.name}
+						{:else}
+							{$i18n.t('Hello, {{name}}', { name: $user.name })}
+						{/if}
+					</div>
+				</div>
+
+				<div class="flex mt-1 mb-2">
+					<div in:fade={{ duration: 100, delay: 50 }}>
+						{#if models[selectedModelIdx]?.info?.meta?.description ?? null}
+							<Tooltip
+								className=" w-fit"
+								content={marked.parse(
+									sanitizeResponseContent(models[selectedModelIdx]?.info?.meta?.description ?? '')
+								)}
+								placement="top"
+							>
+								<div
+									class="mt-0.5 px-2 text-sm font-normal text-gray-500 dark:text-gray-400 line-clamp-2 max-w-xl markdown"
+								>
+									{@html marked.parse(
+										sanitizeResponseContent(models[selectedModelIdx]?.info?.meta?.description)
+									)}
+								</div>
+							</Tooltip>
+
+							{#if models[selectedModelIdx]?.info?.meta?.user}
+								<div class="mt-0.5 text-sm font-normal text-gray-400 dark:text-gray-500">
+									By
+									{#if models[selectedModelIdx]?.info?.meta?.user.community}
+										<a
+											href="https://openwebui.com/m/{models[selectedModelIdx]?.info?.meta?.user
+												.username}"
+											>{models[selectedModelIdx]?.info?.meta?.user.name
+												? models[selectedModelIdx]?.info?.meta?.user.name
+												: `@${models[selectedModelIdx]?.info?.meta?.user.username}`}</a
+										>
+									{:else}
+										{models[selectedModelIdx]?.info?.meta?.user.name}
+									{/if}
+								</div>
+							{/if}
+						{/if}
+					</div>
+				</div>
+
+				<div
+					class="text-base font-normal xl:translate-x-6 lg:max-w-3xl w-full py-3 {atSelectedModel
+						? 'mt-2'
+						: ''}"
+				>
+					<MessageInput
+						{history}
+						{selectedModels}
+						bind:files
+						bind:prompt
+						bind:autoScroll
+						bind:selectedToolIds
+						bind:webSearchEnabled
+						bind:atSelectedModel
+						{availableToolIds}
+						{transparentBackground}
+						{stopResponse}
+						{createMessagePair}
+						placeholder={$i18n.t('How can I help you today?')}
+						on:submit={(e) => {
+							dispatch('submit', e.detail);
+						}}
+					/>
+				</div>
+			</div>
+		</div>
+		<div class="mx-auto max-w-2xl font-primary" in:fade={{ duration: 200, delay: 200 }}>
+			<div class="mx-5">
+				<Suggestions
+					suggestionPrompts={models[selectedModelIdx]?.info?.meta?.suggestion_prompts ??
+						$config?.default_prompt_suggestions ??
+						[]}
+					on:select={(e) => {
+						selectSuggestionPrompt(e.detail);
+					}}
+				/>
+			</div>
+		</div>
+	</div>
+{/key}

+ 27 - 0
src/lib/components/chat/Settings/Interface.svelte

@@ -29,6 +29,7 @@
 	let defaultModelId = '';
 	let showUsername = false;
 
+	let landingPageMode = '';
 	let chatBubble = true;
 	let chatDirection: 'LTR' | 'RTL' = 'LTR';
 
@@ -56,6 +57,11 @@
 		saveSettings({ chatBubble: chatBubble });
 	};
 
+	const toggleLandingPageMode = async () => {
+		landingPageMode = landingPageMode === '' ? 'chat' : '';
+		saveSettings({ landingPageMode: landingPageMode });
+	};
+
 	const toggleShowUsername = async () => {
 		showUsername = !showUsername;
 		saveSettings({ showUsername: showUsername });
@@ -150,6 +156,7 @@
 		showEmojiInCall = $settings.showEmojiInCall ?? false;
 		voiceInterruption = $settings.voiceInterruption ?? false;
 
+		landingPageMode = $settings.landingPageMode ?? '';
 		chatBubble = $settings.chatBubble ?? true;
 		widescreenMode = $settings.widescreenMode ?? false;
 		splitLargeChunks = $settings.splitLargeChunks ?? false;
@@ -229,6 +236,26 @@
 		<div>
 			<div class=" mb-1.5 text-sm font-medium">{$i18n.t('UI')}</div>
 
+			<div>
+				<div class=" py-0.5 flex w-full justify-between">
+					<div class=" self-center text-xs">{$i18n.t('Landing Page Mode')}</div>
+
+					<button
+						class="p-1 px-3 text-xs flex rounded transition"
+						on:click={() => {
+							toggleLandingPageMode();
+						}}
+						type="button"
+					>
+						{#if landingPageMode === ''}
+							<span class="ml-2 self-center">{$i18n.t('Default')}</span>
+						{:else}
+							<span class="ml-2 self-center">{$i18n.t('Chat')}</span>
+						{/if}
+					</button>
+				</div>
+			</div>
+
 			<div>
 				<div class=" py-0.5 flex w-full justify-between">
 					<div class=" self-center text-xs">{$i18n.t('Chat Bubble UI')}</div>

+ 53 - 0
src/lib/components/chat/Suggestions.svelte

@@ -0,0 +1,53 @@
+<script lang="ts">
+	import Bolt from '$lib/components/icons/Bolt.svelte';
+	import { onMount, getContext, createEventDispatcher } from 'svelte';
+
+	const i18n = getContext('i18n');
+	const dispatch = createEventDispatcher();
+
+	export let suggestionPrompts = [];
+	export let className = '';
+
+	let prompts = [];
+
+	$: prompts = (suggestionPrompts ?? [])
+		.reduce((acc, current) => [...acc, ...[current]], [])
+		.sort(() => Math.random() - 0.5);
+</script>
+
+{#if prompts.length > 0}
+	<div class="mb-1 flex gap-1 text-sm font-medium items-center text-gray-400 dark:text-gray-600">
+		<Bolt />
+		{$i18n.t('Suggested')}
+	</div>
+{/if}
+
+<div class=" h-40 max-h-full overflow-auto scrollbar-none {className}">
+	{#each prompts as prompt, promptIdx}
+		<button
+			class="flex flex-col flex-1 shrink-0 w-full justify-between px-3 py-2 rounded-xl bg-transparent hover:bg-black/5 dark:hover:bg-white/5 transition group"
+			on:click={() => {
+				dispatch('select', prompt.content);
+			}}
+		>
+			<div class="flex flex-col text-left">
+				{#if prompt.title && prompt.title[0] !== ''}
+					<div
+						class="  font-medium dark:text-gray-300 dark:group-hover:text-gray-200 transition line-clamp-1"
+					>
+						{prompt.title[0]}
+					</div>
+					<div class="text-xs text-gray-500 font-normal line-clamp-1">{prompt.title[1]}</div>
+				{:else}
+					<div
+						class="  font-medium dark:text-gray-300 dark:group-hover:text-gray-200 transition line-clamp-1"
+					>
+						{prompt.content}
+					</div>
+
+					<div class="text-xs text-gray-500 font-normal line-clamp-1">Prompt</div>
+				{/if}
+			</div>
+		</button>
+	{/each}
+</div>

+ 18 - 0
src/lib/components/common/Badge.svelte

@@ -0,0 +1,18 @@
+<script lang="ts">
+	export let type = 'info';
+	export let content = '';
+
+	const classNames: Record<string, string> = {
+		info: 'bg-blue-500/20 text-blue-700 dark:text-blue-200 ',
+		success: 'bg-green-500/20 text-green-700 dark:text-green-200',
+		warning: 'bg-yellow-500/20 text-yellow-700 dark:text-yellow-200',
+		error: 'bg-red-500/20 text-red-700 dark:text-red-200'
+	};
+</script>
+
+<div
+	class=" text-xs font-bold {classNames[type] ??
+		classNames['info']}  w-fit px-2 rounded uppercase line-clamp-1 mr-0.5"
+>
+	{content}
+</div>

+ 59 - 9
src/lib/components/common/CodeEditor.svelte

@@ -7,10 +7,15 @@
 	import { indentWithTab } from '@codemirror/commands';
 
 	import { indentUnit } from '@codemirror/language';
-	import { python } from '@codemirror/lang-python';
+	import { languages } from '@codemirror/language-data';
+
+	// import { python } from '@codemirror/lang-python';
+	// import { javascript } from '@codemirror/lang-javascript';
+
 	import { oneDark } from '@codemirror/theme-one-dark';
 
-	import { onMount, createEventDispatcher, getContext } from 'svelte';
+	import { onMount, createEventDispatcher, getContext, tick } from 'svelte';
+
 	import { formatPythonCode } from '$lib/apis/utils';
 	import { toast } from 'svelte-sonner';
 
@@ -19,15 +24,40 @@
 
 	export let boilerplate = '';
 	export let value = '';
+	let _value = '';
+
+	$: if (value) {
+		updateValue();
+	}
+
+	const updateValue = () => {
+		if (_value !== value) {
+			_value = value;
+			if (codeEditor) {
+				codeEditor.dispatch({
+					changes: [{ from: 0, to: codeEditor.state.doc.length, insert: _value }]
+				});
+			}
+		}
+	};
+
+	export let id = '';
+	export let lang = '';
 
 	let codeEditor;
 
 	let isDarkMode = false;
 	let editorTheme = new Compartment();
+	let editorLanguage = new Compartment();
+
+	const getLang = async () => {
+		const language = languages.find((l) => l.alias.includes(lang));
+		return await language?.load();
+	};
 
 	export const formatPythonCodeHandler = async () => {
 		if (codeEditor) {
-			const res = await formatPythonCode(value).catch((error) => {
+			const res = await formatPythonCode(_value).catch((error) => {
 				toast.error(error);
 				return null;
 			});
@@ -38,6 +68,10 @@
 					changes: [{ from: 0, to: codeEditor.state.doc.length, insert: formattedCode }]
 				});
 
+				_value = formattedCode;
+				dispatch('change', { value: _value });
+				await tick();
+
 				toast.success($i18n.t('Code formatted successfully'));
 				return true;
 			}
@@ -49,33 +83,49 @@
 	let extensions = [
 		basicSetup,
 		keymap.of([{ key: 'Tab', run: acceptCompletion }, indentWithTab]),
-		python(),
 		indentUnit.of('    '),
 		placeholder('Enter your code here...'),
 		EditorView.updateListener.of((e) => {
 			if (e.docChanged) {
-				value = e.state.doc.toString();
+				_value = e.state.doc.toString();
+				dispatch('change', { value: _value });
 			}
 		}),
-		editorTheme.of([])
+		editorTheme.of([]),
+		editorLanguage.of([])
 	];
 
+	$: if (lang) {
+		setLanguage();
+	}
+
+	const setLanguage = async () => {
+		const language = await getLang();
+		if (language) {
+			codeEditor.dispatch({
+				effects: editorLanguage.reconfigure(language)
+			});
+		}
+	};
+
 	onMount(() => {
 		console.log(value);
 		if (value === '') {
 			value = boilerplate;
 		}
 
+		_value = value;
+
 		// Check if html class has dark mode
 		isDarkMode = document.documentElement.classList.contains('dark');
 
 		// python code editor, highlight python code
 		codeEditor = new EditorView({
 			state: EditorState.create({
-				doc: value,
+				doc: _value,
 				extensions: extensions
 			}),
-			parent: document.getElementById('code-textarea')
+			parent: document.getElementById(`code-textarea-${id}`)
 		});
 
 		if (isDarkMode) {
@@ -133,4 +183,4 @@
 	});
 </script>
 
-<div id="code-textarea" class="h-full w-full" />
+<div id="code-textarea-{id}" class="h-full w-full" />

Algunos archivos no se mostraron porque demasiados archivos cambiaron en este cambio