|
@@ -15,8 +15,9 @@ from langchain_core.documents import Document
|
|
|
from open_webui.config import VECTOR_DB
|
|
|
from open_webui.retrieval.vector.connector import VECTOR_DB_CLIENT
|
|
|
from open_webui.utils.misc import get_last_user_message
|
|
|
+from open_webui.models.users import UserModel
|
|
|
|
|
|
-from open_webui.env import SRC_LOG_LEVELS, OFFLINE_MODE
|
|
|
+from open_webui.env import SRC_LOG_LEVELS, OFFLINE_MODE, ENABLE_FORWARD_USER_INFO_HEADERS
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
|
@@ -64,6 +65,7 @@ def query_doc(
|
|
|
collection_name: str,
|
|
|
query_embedding: list[float],
|
|
|
k: int,
|
|
|
+ user: UserModel=None
|
|
|
):
|
|
|
try:
|
|
|
result = VECTOR_DB_CLIENT.search(
|
|
@@ -256,29 +258,32 @@ def get_embedding_function(
|
|
|
embedding_function,
|
|
|
url,
|
|
|
key,
|
|
|
- embedding_batch_size,
|
|
|
+ embedding_batch_size
|
|
|
):
|
|
|
if embedding_engine == "":
|
|
|
- return lambda query: embedding_function.encode(query).tolist()
|
|
|
+ return lambda query, user=None: embedding_function.encode(query).tolist()
|
|
|
elif embedding_engine in ["ollama", "openai"]:
|
|
|
- func = lambda query: generate_embeddings(
|
|
|
+ func = lambda query, user=None: generate_embeddings(
|
|
|
engine=embedding_engine,
|
|
|
model=embedding_model,
|
|
|
text=query,
|
|
|
url=url,
|
|
|
key=key,
|
|
|
+ user=user
|
|
|
)
|
|
|
|
|
|
- def generate_multiple(query, func):
|
|
|
+ def generate_multiple(query, user, func):
|
|
|
if isinstance(query, list):
|
|
|
embeddings = []
|
|
|
for i in range(0, len(query), embedding_batch_size):
|
|
|
- embeddings.extend(func(query[i : i + embedding_batch_size]))
|
|
|
+ embeddings.extend(func(query[i : i + embedding_batch_size], user=user))
|
|
|
return embeddings
|
|
|
else:
|
|
|
- return func(query)
|
|
|
+ return func(query, user)
|
|
|
|
|
|
- return lambda query: generate_multiple(query, func)
|
|
|
+ return lambda query, user=None: generate_multiple(query, user, func)
|
|
|
+ else:
|
|
|
+ raise ValueError(f"Unknown embedding engine: {embedding_engine}")
|
|
|
|
|
|
|
|
|
def get_sources_from_files(
|
|
@@ -423,7 +428,7 @@ def get_model_path(model: str, update_model: bool = False):
|
|
|
|
|
|
|
|
|
def generate_openai_batch_embeddings(
|
|
|
- model: str, texts: list[str], url: str = "https://api.openai.com/v1", key: str = ""
|
|
|
+ model: str, texts: list[str], url: str = "https://api.openai.com/v1", key: str = "", user: UserModel = None
|
|
|
) -> Optional[list[list[float]]]:
|
|
|
try:
|
|
|
r = requests.post(
|
|
@@ -431,6 +436,16 @@ def generate_openai_batch_embeddings(
|
|
|
headers={
|
|
|
"Content-Type": "application/json",
|
|
|
"Authorization": f"Bearer {key}",
|
|
|
+ **(
|
|
|
+ {
|
|
|
+ "X-OpenWebUI-User-Name": user.name,
|
|
|
+ "X-OpenWebUI-User-Id": user.id,
|
|
|
+ "X-OpenWebUI-User-Email": user.email,
|
|
|
+ "X-OpenWebUI-User-Role": user.role,
|
|
|
+ }
|
|
|
+ if ENABLE_FORWARD_USER_INFO_HEADERS and user
|
|
|
+ else {}
|
|
|
+ ),
|
|
|
},
|
|
|
json={"input": texts, "model": model},
|
|
|
)
|
|
@@ -446,7 +461,7 @@ def generate_openai_batch_embeddings(
|
|
|
|
|
|
|
|
|
def generate_ollama_batch_embeddings(
|
|
|
- model: str, texts: list[str], url: str, key: str = ""
|
|
|
+ model: str, texts: list[str], url: str, key: str = "", user: UserModel = None
|
|
|
) -> Optional[list[list[float]]]:
|
|
|
try:
|
|
|
r = requests.post(
|
|
@@ -454,6 +469,16 @@ def generate_ollama_batch_embeddings(
|
|
|
headers={
|
|
|
"Content-Type": "application/json",
|
|
|
"Authorization": f"Bearer {key}",
|
|
|
+ **(
|
|
|
+ {
|
|
|
+ "X-OpenWebUI-User-Name": user.name,
|
|
|
+ "X-OpenWebUI-User-Id": user.id,
|
|
|
+ "X-OpenWebUI-User-Email": user.email,
|
|
|
+ "X-OpenWebUI-User-Role": user.role,
|
|
|
+ }
|
|
|
+ if ENABLE_FORWARD_USER_INFO_HEADERS
|
|
|
+ else {}
|
|
|
+ ),
|
|
|
},
|
|
|
json={"input": texts, "model": model},
|
|
|
)
|
|
@@ -472,22 +497,23 @@ def generate_ollama_batch_embeddings(
|
|
|
def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], **kwargs):
|
|
|
url = kwargs.get("url", "")
|
|
|
key = kwargs.get("key", "")
|
|
|
+ user = kwargs.get("user")
|
|
|
|
|
|
if engine == "ollama":
|
|
|
if isinstance(text, list):
|
|
|
embeddings = generate_ollama_batch_embeddings(
|
|
|
- **{"model": model, "texts": text, "url": url, "key": key}
|
|
|
+ **{"model": model, "texts": text, "url": url, "key": key, "user": user}
|
|
|
)
|
|
|
else:
|
|
|
embeddings = generate_ollama_batch_embeddings(
|
|
|
- **{"model": model, "texts": [text], "url": url, "key": key}
|
|
|
+ **{"model": model, "texts": [text], "url": url, "key": key, "user": user}
|
|
|
)
|
|
|
return embeddings[0] if isinstance(text, str) else embeddings
|
|
|
elif engine == "openai":
|
|
|
if isinstance(text, list):
|
|
|
- embeddings = generate_openai_batch_embeddings(model, text, url, key)
|
|
|
+ embeddings = generate_openai_batch_embeddings(model, text, url, key, user)
|
|
|
else:
|
|
|
- embeddings = generate_openai_batch_embeddings(model, [text], url, key)
|
|
|
+ embeddings = generate_openai_batch_embeddings(model, [text], url, key, user)
|
|
|
|
|
|
return embeddings[0] if isinstance(text, str) else embeddings
|
|
|
|