milvus.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. from pymilvus import MilvusClient as Client
  2. from pymilvus import FieldSchema, DataType
  3. import json
  4. import logging
  5. from typing import Optional
  6. from open_webui.retrieval.vector.main import VectorItem, SearchResult, GetResult
  7. from open_webui.config import (
  8. MILVUS_URI,
  9. MILVUS_DB,
  10. MILVUS_TOKEN,
  11. )
  12. from open_webui.env import SRC_LOG_LEVELS
  13. log = logging.getLogger(__name__)
  14. log.setLevel(SRC_LOG_LEVELS["RAG"])
  15. class MilvusClient:
  16. def __init__(self):
  17. self.collection_prefix = "open_webui"
  18. if MILVUS_TOKEN is None:
  19. self.client = Client(uri=MILVUS_URI, db_name=MILVUS_DB)
  20. else:
  21. self.client = Client(uri=MILVUS_URI, db_name=MILVUS_DB, token=MILVUS_TOKEN)
  22. def _result_to_get_result(self, result) -> GetResult:
  23. ids = []
  24. documents = []
  25. metadatas = []
  26. for match in result:
  27. _ids = []
  28. _documents = []
  29. _metadatas = []
  30. for item in match:
  31. _ids.append(item.get("id"))
  32. _documents.append(item.get("data", {}).get("text"))
  33. _metadatas.append(item.get("metadata"))
  34. ids.append(_ids)
  35. documents.append(_documents)
  36. metadatas.append(_metadatas)
  37. return GetResult(
  38. **{
  39. "ids": ids,
  40. "documents": documents,
  41. "metadatas": metadatas,
  42. }
  43. )
  44. def _result_to_search_result(self, result) -> SearchResult:
  45. ids = []
  46. distances = []
  47. documents = []
  48. metadatas = []
  49. for match in result:
  50. _ids = []
  51. _distances = []
  52. _documents = []
  53. _metadatas = []
  54. for item in match:
  55. _ids.append(item.get("id"))
  56. _distances.append(item.get("distance"))
  57. _documents.append(item.get("entity", {}).get("data", {}).get("text"))
  58. _metadatas.append(item.get("entity", {}).get("metadata"))
  59. ids.append(_ids)
  60. distances.append(_distances)
  61. documents.append(_documents)
  62. metadatas.append(_metadatas)
  63. return SearchResult(
  64. **{
  65. "ids": ids,
  66. "distances": distances,
  67. "documents": documents,
  68. "metadatas": metadatas,
  69. }
  70. )
  71. def _create_collection(self, collection_name: str, dimension: int):
  72. schema = self.client.create_schema(
  73. auto_id=False,
  74. enable_dynamic_field=True,
  75. )
  76. schema.add_field(
  77. field_name="id",
  78. datatype=DataType.VARCHAR,
  79. is_primary=True,
  80. max_length=65535,
  81. )
  82. schema.add_field(
  83. field_name="vector",
  84. datatype=DataType.FLOAT_VECTOR,
  85. dim=dimension,
  86. description="vector",
  87. )
  88. schema.add_field(field_name="data", datatype=DataType.JSON, description="data")
  89. schema.add_field(
  90. field_name="metadata", datatype=DataType.JSON, description="metadata"
  91. )
  92. index_params = self.client.prepare_index_params()
  93. index_params.add_index(
  94. field_name="vector",
  95. index_type="HNSW",
  96. metric_type="COSINE",
  97. params={"M": 16, "efConstruction": 100},
  98. )
  99. self.client.create_collection(
  100. collection_name=f"{self.collection_prefix}_{collection_name}",
  101. schema=schema,
  102. index_params=index_params,
  103. )
  104. def has_collection(self, collection_name: str) -> bool:
  105. # Check if the collection exists based on the collection name.
  106. collection_name = collection_name.replace("-", "_")
  107. return self.client.has_collection(
  108. collection_name=f"{self.collection_prefix}_{collection_name}"
  109. )
  110. def delete_collection(self, collection_name: str):
  111. # Delete the collection based on the collection name.
  112. collection_name = collection_name.replace("-", "_")
  113. return self.client.drop_collection(
  114. collection_name=f"{self.collection_prefix}_{collection_name}"
  115. )
  116. def search(
  117. self, collection_name: str, vectors: list[list[float | int]], limit: int
  118. ) -> Optional[SearchResult]:
  119. # Search for the nearest neighbor items based on the vectors and return 'limit' number of results.
  120. collection_name = collection_name.replace("-", "_")
  121. result = self.client.search(
  122. collection_name=f"{self.collection_prefix}_{collection_name}",
  123. data=vectors,
  124. limit=limit,
  125. output_fields=["data", "metadata"],
  126. )
  127. return self._result_to_search_result(result)
  128. def query(self, collection_name: str, filter: dict, limit: Optional[int] = None):
  129. # Construct the filter string for querying
  130. collection_name = collection_name.replace("-", "_")
  131. if not self.has_collection(collection_name):
  132. return None
  133. filter_string = " && ".join(
  134. [
  135. f'metadata["{key}"] == {json.dumps(value)}'
  136. for key, value in filter.items()
  137. ]
  138. )
  139. max_limit = 16383 # The maximum number of records per request
  140. all_results = []
  141. if limit is None:
  142. limit = float("inf") # Use infinity as a placeholder for no limit
  143. # Initialize offset and remaining to handle pagination
  144. offset = 0
  145. remaining = limit
  146. try:
  147. # Loop until there are no more items to fetch or the desired limit is reached
  148. while remaining > 0:
  149. log.info(f"remaining: {remaining}")
  150. current_fetch = min(
  151. max_limit, remaining
  152. ) # Determine how many items to fetch in this iteration
  153. results = self.client.query(
  154. collection_name=f"{self.collection_prefix}_{collection_name}",
  155. filter=filter_string,
  156. output_fields=["*"],
  157. limit=current_fetch,
  158. offset=offset,
  159. )
  160. if not results:
  161. break
  162. all_results.extend(results)
  163. results_count = len(results)
  164. remaining -= (
  165. results_count # Decrease remaining by the number of items fetched
  166. )
  167. offset += results_count
  168. # Break the loop if the results returned are less than the requested fetch count
  169. if results_count < current_fetch:
  170. break
  171. log.debug(all_results)
  172. return self._result_to_get_result([all_results])
  173. except Exception as e:
  174. log.exception(
  175. f"Error querying collection {collection_name} with limit {limit}: {e}"
  176. )
  177. return None
  178. def get(self, collection_name: str) -> Optional[GetResult]:
  179. # Get all the items in the collection.
  180. collection_name = collection_name.replace("-", "_")
  181. result = self.client.query(
  182. collection_name=f"{self.collection_prefix}_{collection_name}",
  183. filter='id != ""',
  184. )
  185. return self._result_to_get_result([result])
  186. def insert(self, collection_name: str, items: list[VectorItem]):
  187. # Insert the items into the collection, if the collection does not exist, it will be created.
  188. collection_name = collection_name.replace("-", "_")
  189. if not self.client.has_collection(
  190. collection_name=f"{self.collection_prefix}_{collection_name}"
  191. ):
  192. self._create_collection(
  193. collection_name=collection_name, dimension=len(items[0]["vector"])
  194. )
  195. return self.client.insert(
  196. collection_name=f"{self.collection_prefix}_{collection_name}",
  197. data=[
  198. {
  199. "id": item["id"],
  200. "vector": item["vector"],
  201. "data": {"text": item["text"]},
  202. "metadata": item["metadata"],
  203. }
  204. for item in items
  205. ],
  206. )
  207. def upsert(self, collection_name: str, items: list[VectorItem]):
  208. # Update the items in the collection, if the items are not present, insert them. If the collection does not exist, it will be created.
  209. collection_name = collection_name.replace("-", "_")
  210. if not self.client.has_collection(
  211. collection_name=f"{self.collection_prefix}_{collection_name}"
  212. ):
  213. self._create_collection(
  214. collection_name=collection_name, dimension=len(items[0]["vector"])
  215. )
  216. return self.client.upsert(
  217. collection_name=f"{self.collection_prefix}_{collection_name}",
  218. data=[
  219. {
  220. "id": item["id"],
  221. "vector": item["vector"],
  222. "data": {"text": item["text"]},
  223. "metadata": item["metadata"],
  224. }
  225. for item in items
  226. ],
  227. )
  228. def delete(
  229. self,
  230. collection_name: str,
  231. ids: Optional[list[str]] = None,
  232. filter: Optional[dict] = None,
  233. ):
  234. # Delete the items from the collection based on the ids.
  235. collection_name = collection_name.replace("-", "_")
  236. if ids:
  237. return self.client.delete(
  238. collection_name=f"{self.collection_prefix}_{collection_name}",
  239. ids=ids,
  240. )
  241. elif filter:
  242. # Convert the filter dictionary to a string using JSON_CONTAINS.
  243. filter_string = " && ".join(
  244. [
  245. f'metadata["{key}"] == {json.dumps(value)}'
  246. for key, value in filter.items()
  247. ]
  248. )
  249. return self.client.delete(
  250. collection_name=f"{self.collection_prefix}_{collection_name}",
  251. filter=filter_string,
  252. )
  253. def reset(self):
  254. # Resets the database. This will delete all collections and item entries.
  255. collection_names = self.client.list_collections()
  256. for collection_name in collection_names:
  257. if collection_name.startswith(self.collection_prefix):
  258. self.client.drop_collection(collection_name=collection_name)