Browse Source

feat: abort openai text completion when stopping responses

Jun Siang Cheah 1 năm trước cách đây
mục cha
commit
c095a7c291

+ 4 - 2
src/lib/apis/openai/index.ts

@@ -211,10 +211,12 @@ export const generateOpenAIChatCompletion = async (
 	token: string = '',
 	body: object,
 	url: string = OPENAI_API_BASE_URL
-) => {
+): Promise<[Response | null, AbortController]> => {
+	const controller = new AbortController();
 	let error = null;
 
 	const res = await fetch(`${url}/chat/completions`, {
+		signal: controller.signal,
 		method: 'POST',
 		headers: {
 			Authorization: `Bearer ${token}`,
@@ -231,7 +233,7 @@ export const generateOpenAIChatCompletion = async (
 		throw error;
 	}
 
-	return res;
+	return [res, controller];
 };
 
 export const synthesizeOpenAISpeech = async (

+ 6 - 1
src/routes/(app)/+page.svelte

@@ -532,7 +532,7 @@
 
 		console.log(model);
 
-		const res = await generateOpenAIChatCompletion(
+		const [res, controller] = await generateOpenAIChatCompletion(
 			localStorage.token,
 			{
 				model: model.id,
@@ -608,6 +608,11 @@
 				if (done || stopResponseFlag || _chatId !== $chatId) {
 					responseMessage.done = true;
 					messages = messages;
+
+					if (stopResponseFlag) {
+						controller.abort('User: Stop Response');
+					}
+
 					break;
 				}
 

+ 6 - 1
src/routes/(app)/c/[id]/+page.svelte

@@ -544,7 +544,7 @@
 
 		console.log(docs);
 
-		const res = await generateOpenAIChatCompletion(
+		const [res, controller] = await generateOpenAIChatCompletion(
 			localStorage.token,
 			{
 				model: model.id,
@@ -620,6 +620,11 @@
 				if (done || stopResponseFlag || _chatId !== $chatId) {
 					responseMessage.done = true;
 					messages = messages;
+
+					if (stopResponseFlag) {
+						controller.abort('User: Stop Response');
+					}
+
 					break;
 				}
 

+ 4 - 4
src/routes/(app)/playground/+page.svelte

@@ -67,7 +67,7 @@
 	const textCompletionHandler = async () => {
 		const model = $models.find((model) => model.id === selectedModelId);
 
-		const res = await generateOpenAIChatCompletion(
+		const [res, controller] = await generateOpenAIChatCompletion(
 			localStorage.token,
 			{
 				model: model.id,
@@ -96,7 +96,7 @@
 				const { value, done } = await reader.read();
 				if (done || stopResponseFlag) {
 					if (stopResponseFlag) {
-						await cancelOllamaRequest(localStorage.token, currentRequestId);
+						controller.abort('User: Stop Response');
 					}
 
 					currentRequestId = null;
@@ -135,7 +135,7 @@
 	const chatCompletionHandler = async () => {
 		const model = $models.find((model) => model.id === selectedModelId);
 
-		const res = await generateOpenAIChatCompletion(
+		const [res, controller] = await generateOpenAIChatCompletion(
 			localStorage.token,
 			{
 				model: model.id,
@@ -182,7 +182,7 @@
 				const { value, done } = await reader.read();
 				if (done || stopResponseFlag) {
 					if (stopResponseFlag) {
-						await cancelOllamaRequest(localStorage.token, currentRequestId);
+						controller.abort('User: Stop Response');
 					}
 
 					currentRequestId = null;