浏览代码

Merge pull request #1144 from open-webui/dev

0.1.112
Timothy Jaeryang Baek 1 年之前
父节点
当前提交
5ce421e7fa

+ 8 - 0
CHANGELOG.md

@@ -5,6 +5,14 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.1.112] - 2024-03-15
+
+### Fixed
+
+- 🗨️ Resolved chat malfunction after image generation.
+- 🎨 Fixed various RAG issues.
+- 🧪 Rectified experimental broken GGUF upload logic.
+
 ## [0.1.111] - 2024-03-10
 
 ### Added

+ 8 - 5
backend/apps/images/main.py

@@ -293,6 +293,7 @@ def generate_image(
                 "size": form_data.size if form_data.size else app.state.IMAGE_SIZE,
                 "response_format": "b64_json",
             }
+
             r = requests.post(
                 url=f"https://api.openai.com/v1/images/generations",
                 json=data,
@@ -300,7 +301,6 @@ def generate_image(
             )
 
             r.raise_for_status()
-
             res = r.json()
 
             images = []
@@ -356,7 +356,10 @@ def generate_image(
             return images
 
     except Exception as e:
-        print(e)
-        if r:
-            print(r.json())
-        raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(e))
+        error = e
+
+        if r != None:
+            data = r.json()
+            if "error" in data:
+                error = data["error"]["message"]
+        raise HTTPException(status_code=400, detail=ERROR_MESSAGES.DEFAULT(error))

+ 11 - 4
backend/apps/ollama/main.py

@@ -123,6 +123,7 @@ async def get_all_models():
             map(lambda response: response["models"], responses)
         )
     }
+
     app.state.MODELS = {model["model"]: model for model in models["models"]}
 
     return models
@@ -181,11 +182,17 @@ async def get_ollama_versions(url_idx: Optional[int] = None):
         responses = await asyncio.gather(*tasks)
         responses = list(filter(lambda x: x is not None, responses))
 
-        lowest_version = min(
-            responses, key=lambda x: tuple(map(int, x["version"].split(".")))
-        )
+        if len(responses) > 0:
+            lowest_version = min(
+                responses, key=lambda x: tuple(map(int, x["version"].split(".")))
+            )
 
-        return {"version": lowest_version["version"]}
+            return {"version": lowest_version["version"]}
+        else:
+            raise HTTPException(
+                status_code=500,
+                detail=ERROR_MESSAGES.OLLAMA_NOT_FOUND,
+            )
     else:
         url = app.state.OLLAMA_BASE_URLS[url_idx]
         try:

+ 2 - 3
backend/apps/rag/utils.py

@@ -91,9 +91,8 @@ def query_collection(
 
 
 def rag_template(template: str, context: str, query: str):
-    template = re.sub(r"\[context\]", context, template)
-    template = re.sub(r"\[query\]", query, template)
-
+    template = template.replace("[context]", context)
+    template = template.replace("[query]", query)
     return template
 
 

+ 1 - 1
backend/apps/web/routers/utils.py

@@ -75,7 +75,7 @@ async def download_file_stream(url, file_path, file_name, chunk_size=1024 * 1024
                     hashed = calculate_sha256(file)
                     file.seek(0)
 
-                    url = f"{OLLAMA_BASE_URLS[0]}/blobs/sha256:{hashed}"
+                    url = f"{OLLAMA_BASE_URLS[0]}/api/blobs/sha256:{hashed}"
                     response = requests.post(url, data=file)
 
                     if response.ok:

+ 1 - 0
backend/constants.py

@@ -52,3 +52,4 @@ class ERROR_MESSAGES(str, Enum):
 
     MODEL_NOT_FOUND = lambda name="": f"Model '{name}' was not found"
     OPENAI_NOT_FOUND = lambda name="": f"OpenAI API was not found"
+    OLLAMA_NOT_FOUND = "WebUI could not connect to Ollama"

+ 1 - 1
package.json

@@ -1,6 +1,6 @@
 {
 	"name": "open-webui",
-	"version": "0.1.111",
+	"version": "0.1.112",
 	"private": true,
 	"scripts": {
 		"dev": "vite dev --host",

+ 4 - 2
src/lib/components/chat/Settings/Images.svelte

@@ -116,11 +116,13 @@
 	class="flex flex-col h-full justify-between space-y-3 text-sm"
 	on:submit|preventDefault={async () => {
 		loading = true;
-		await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
 
-		await updateDefaultImageGenerationModel(localStorage.token, selectedModel);
+		if (imageGenerationEngine === 'openai') {
+			await updateOpenAIKey(localStorage.token, OPENAI_API_KEY);
+		}
 
 		await updateDefaultImageGenerationModel(localStorage.token, selectedModel);
+
 		await updateImageSize(localStorage.token, imageSize).catch((error) => {
 			toast.error(error);
 			return null;

+ 6 - 3
src/routes/(app)/+page.svelte

@@ -140,7 +140,9 @@
 	};
 
 	const scrollToBottom = () => {
-		messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight;
+		if (messagesContainerElement) {
+			messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight;
+		}
 	};
 
 	//////////////////////////
@@ -308,7 +310,7 @@
 					.map((file) => file.url.slice(file.url.indexOf(',') + 1));
 
 				// Add images array only if it contains elements
-				if (imageUrls && imageUrls.length > 0) {
+				if (imageUrls && imageUrls.length > 0 && message.role === 'user') {
 					baseMessage.images = imageUrls;
 				}
 
@@ -532,7 +534,8 @@
 					.filter((message) => message)
 					.map((message, idx, arr) => ({
 						role: message.role,
-						...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false
+						...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) &&
+						message.role === 'user'
 							? {
 									content: [
 										{

+ 12 - 4
src/routes/(app)/c/[id]/+page.svelte

@@ -160,7 +160,9 @@
 	};
 
 	const scrollToBottom = () => {
-		messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight;
+		if (messagesContainerElement) {
+			messagesContainerElement.scrollTop = messagesContainerElement.scrollHeight;
+		}
 	};
 
 	//////////////////////////
@@ -321,7 +323,7 @@
 					.map((file) => file.url.slice(file.url.indexOf(',') + 1));
 
 				// Add images array only if it contains elements
-				if (imageUrls && imageUrls.length > 0) {
+				if (imageUrls && imageUrls.length > 0 && message.role === 'user') {
 					baseMessage.images = imageUrls;
 				}
 
@@ -545,7 +547,8 @@
 					.filter((message) => message)
 					.map((message, idx, arr) => ({
 						role: message.role,
-						...(message.files?.filter((file) => file.type === 'image').length > 0 ?? false
+						...((message.files?.filter((file) => file.type === 'image').length > 0 ?? false) &&
+						message.role === 'user'
 							? {
 									content: [
 										{
@@ -688,7 +691,12 @@
 
 		if (messages.length == 2) {
 			window.history.replaceState(history.state, '', `/c/${_chatId}`);
-			await setChatTitle(_chatId, userPrompt);
+
+			if ($settings?.titleAutoGenerateModel) {
+				await generateChatTitle(_chatId, userPrompt);
+			} else {
+				await setChatTitle(_chatId, userPrompt);
+			}
 		}
 	};
 

+ 4 - 7
src/tailwind.css

@@ -3,16 +3,13 @@
 @tailwind utilities;
 
 @layer base {
-	html {
+	html, pre {
 		font-family: -apple-system, 'Arimo', ui-sans-serif, system-ui, 'Segoe UI', Roboto, Ubuntu,
 			Cantarell, 'Noto Sans', sans-serif, 'Helvetica Neue', Arial, 'Apple Color Emoji',
 			'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';
 	}
 
-	pre {
-		font-family: -apple-system, 'Arimo', ui-sans-serif, system-ui, 'Segoe UI', Roboto, Ubuntu,
-			Cantarell, 'Noto Sans', sans-serif, 'Helvetica Neue', Arial, 'Apple Color Emoji',
-			'Segoe UI Emoji', 'Segoe UI Symbol', 'Noto Color Emoji';
-		white-space: pre-wrap;
-	}
+  pre {
+    white-space: pre-wrap;
+  }
 }