Browse Source

fix: pip dependency

Timothy J. Baek 1 year ago
parent
commit
bfba72d486

+ 1 - 1
backend/requirements.txt

@@ -16,7 +16,7 @@ aiohttp
 peewee
 bcrypt
 
-litellm[proxy]
+litellm
 
 langchain
 langchain-community

+ 7 - 4
src/lib/components/chat/Settings/Connections.svelte

@@ -3,7 +3,7 @@
 	import { createEventDispatcher, onMount } from 'svelte';
 	const dispatch = createEventDispatcher();
 
-	import { getOllamaAPIUrl, updateOllamaAPIUrl } from '$lib/apis/ollama';
+	import { getOllamaAPIUrl, getOllamaVersion, updateOllamaAPIUrl } from '$lib/apis/ollama';
 	import { getOpenAIKey, getOpenAIUrl, updateOpenAIKey, updateOpenAIUrl } from '$lib/apis/openai';
 	import toast from 'svelte-french-toast';
 
@@ -24,11 +24,14 @@
 
 	const updateOllamaAPIUrlHandler = async () => {
 		API_BASE_URL = await updateOllamaAPIUrl(localStorage.token, API_BASE_URL);
-		const _models = await getModels('ollama');
 
-		if (_models.length > 0) {
+		const ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => {
+			return null;
+		});
+
+		if (ollamaVersion) {
 			toast.success('Server connection verified');
-			await models.set(_models);
+			await models.set(await getModels());
 		}
 	};
 

+ 17 - 19
src/lib/components/chat/SettingsModal.svelte

@@ -28,31 +28,29 @@
 
 	let selectedTab = 'general';
 
-	const getModels = async (type = 'all') => {
-		const models = [];
-		models.push(
-			...(await getOllamaModels(localStorage.token).catch((error) => {
-				toast.error(error);
-				return [];
-			}))
-		);
-
-		if (type === 'all') {
-			const openAIModels = await getOpenAIModels(localStorage.token).catch((error) => {
+	const getModels = async () => {
+		let models = await Promise.all([
+			await getOllamaModels(localStorage.token).catch((error) => {
 				console.log(error);
 				return null;
-			});
-
-			models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
-
-			const liteLLMModels = await getLiteLLMModels(localStorage.token).catch((error) => {
+			}),
+			await getOpenAIModels(localStorage.token).catch((error) => {
+				console.log(error);
+				return null;
+			}),
+			await getLiteLLMModels(localStorage.token).catch((error) => {
 				console.log(error);
 				return null;
-			});
+			})
+		]);
 
-			models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
-		}
+		models = models
+			.filter((models) => models)
+			.reduce((a, e, i, arr) => a.concat(e, ...(i < arr.length - 1 ? [{ name: 'hr' }] : [])), []);
 
+		// models.push(...(ollamaModels ? [{ name: 'hr' }, ...ollamaModels] : []));
+		// models.push(...(openAIModels ? [{ name: 'hr' }, ...openAIModels] : []));
+		// models.push(...(liteLLMModels ? [{ name: 'hr' }, ...liteLLMModels] : []));
 		return models;
 	};
 </script>