|
@@ -82,6 +82,152 @@ class Filter:
|
|
|
|
|
|
return {"messages": messages}
|
|
|
|
|
|
+`;
|
|
|
+
|
|
|
+ const _boilerplate = `from pydantic import BaseModel
|
|
|
+from typing import Optional, Union, Generator, Iterator
|
|
|
+from utils.misc import get_last_user_message
|
|
|
+
|
|
|
+import os
|
|
|
+import requests
|
|
|
+
|
|
|
+
|
|
|
+# Filter Class: This class is designed to serve as a pre-processor and post-processor
|
|
|
+# for request and response modifications. It checks and transforms requests and responses
|
|
|
+# to ensure they meet specific criteria before further processing or returning to the user.
|
|
|
+class Filter:
|
|
|
+ class Valves(BaseModel):
|
|
|
+ max_turns: int = 4
|
|
|
+ pass
|
|
|
+
|
|
|
+ def __init__(self):
|
|
|
+ # Indicates custom file handling logic. This flag helps disengage default routines in favor of custom
|
|
|
+ # implementations, informing the WebUI to defer file-related operations to designated methods within this class.
|
|
|
+ # Alternatively, you can remove the files directly from the body in from the inlet hook
|
|
|
+ self.file_handler = True
|
|
|
+
|
|
|
+ # Initialize 'valves' with specific configurations. Using 'Valves' instance helps encapsulate settings,
|
|
|
+ # which ensures settings are managed cohesively and not confused with operational flags like 'file_handler'.
|
|
|
+ self.valves = self.Valves(**{"max_turns": 2})
|
|
|
+ pass
|
|
|
+
|
|
|
+ def inlet(self, body: dict, user: Optional[dict] = None) -> dict:
|
|
|
+ # Modify the request body or validate it before processing by the chat completion API.
|
|
|
+ # This function is the pre-processor for the API where various checks on the input can be performed.
|
|
|
+ # It can also modify the request before sending it to the API.
|
|
|
+ print(f"inlet:{__name__}")
|
|
|
+ print(f"inlet:body:{body}")
|
|
|
+ print(f"inlet:user:{user}")
|
|
|
+
|
|
|
+ if user.get("role", "admin") in ["user", "admin"]:
|
|
|
+ messages = body.get("messages", [])
|
|
|
+ if len(messages) > self.valves.max_turns:
|
|
|
+ raise Exception(
|
|
|
+ f"Conversation turn limit exceeded. Max turns: {self.valves.max_turns}"
|
|
|
+ )
|
|
|
+
|
|
|
+ return body
|
|
|
+
|
|
|
+ def outlet(self, body: dict, user: Optional[dict] = None) -> dict:
|
|
|
+ # Modify or analyze the response body after processing by the API.
|
|
|
+ # This function is the post-processor for the API, which can be used to modify the response
|
|
|
+ # or perform additional checks and analytics.
|
|
|
+ print(f"outlet:{__name__}")
|
|
|
+ print(f"outlet:body:{body}")
|
|
|
+ print(f"outlet:user:{user}")
|
|
|
+
|
|
|
+ messages = [
|
|
|
+ {
|
|
|
+ **message,
|
|
|
+ "content": f"{message['content']} - @@Modified from Filter Outlet",
|
|
|
+ }
|
|
|
+ for message in body.get("messages", [])
|
|
|
+ ]
|
|
|
+
|
|
|
+ return {"messages": messages}
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+# Pipe Class: This class functions as a customizable pipeline.
|
|
|
+# It can be adapted to work with any external or internal models,
|
|
|
+# making it versatile for various use cases outside of just OpenAI models.
|
|
|
+class Pipe:
|
|
|
+ class Valves(BaseModel):
|
|
|
+ OPENAI_API_BASE_URL: str = "https://api.openai.com/v1"
|
|
|
+ OPENAI_API_KEY: str = "your-key"
|
|
|
+ pass
|
|
|
+
|
|
|
+ def __init__(self):
|
|
|
+ self.type = "manifold"
|
|
|
+ self.valves = self.Valves()
|
|
|
+ self.pipes = self.get_openai_models()
|
|
|
+ pass
|
|
|
+
|
|
|
+ def get_openai_models(self):
|
|
|
+ if self.valves.OPENAI_API_KEY:
|
|
|
+ try:
|
|
|
+ headers = {}
|
|
|
+ headers["Authorization"] = f"Bearer {self.valves.OPENAI_API_KEY}"
|
|
|
+ headers["Content-Type"] = "application/json"
|
|
|
+
|
|
|
+ r = requests.get(
|
|
|
+ f"{self.valves.OPENAI_API_BASE_URL}/models", headers=headers
|
|
|
+ )
|
|
|
+
|
|
|
+ models = r.json()
|
|
|
+ return [
|
|
|
+ {
|
|
|
+ "id": model["id"],
|
|
|
+ "name": model["name"] if "name" in model else model["id"],
|
|
|
+ }
|
|
|
+ for model in models["data"]
|
|
|
+ if "gpt" in model["id"]
|
|
|
+ ]
|
|
|
+
|
|
|
+ except Exception as e:
|
|
|
+
|
|
|
+ print(f"Error: {e}")
|
|
|
+ return [
|
|
|
+ {
|
|
|
+ "id": "error",
|
|
|
+ "name": "Could not fetch models from OpenAI, please update the API Key in the valves.",
|
|
|
+ },
|
|
|
+ ]
|
|
|
+ else:
|
|
|
+ return []
|
|
|
+
|
|
|
+ def pipe(self, body: dict) -> Union[str, Generator, Iterator]:
|
|
|
+ # This is where you can add your custom pipelines like RAG.
|
|
|
+ print(f"pipe:{__name__}")
|
|
|
+
|
|
|
+ if "user" in body:
|
|
|
+ print(body["user"])
|
|
|
+ del body["user"]
|
|
|
+
|
|
|
+ headers = {}
|
|
|
+ headers["Authorization"] = f"Bearer {self.valves.OPENAI_API_KEY}"
|
|
|
+ headers["Content-Type"] = "application/json"
|
|
|
+
|
|
|
+ model_id = body["model"][body["model"].find(".") + 1 :]
|
|
|
+ payload = {**body, "model": model_id}
|
|
|
+ print(payload)
|
|
|
+
|
|
|
+ try:
|
|
|
+ r = requests.post(
|
|
|
+ url=f"{self.valves.OPENAI_API_BASE_URL}/chat/completions",
|
|
|
+ json=payload,
|
|
|
+ headers=headers,
|
|
|
+ stream=True,
|
|
|
+ )
|
|
|
+
|
|
|
+ r.raise_for_status()
|
|
|
+
|
|
|
+ if body["stream"]:
|
|
|
+ return r.iter_lines()
|
|
|
+ else:
|
|
|
+ return r.json()
|
|
|
+ except Exception as e:
|
|
|
+ return f"Error: {e}"
|
|
|
`;
|
|
|
|
|
|
const saveHandler = async () => {
|