vectordb-bench 0.0.17__py3-none-any.whl → 0.0.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. vectordb_bench/backend/cases.py +1 -1
  2. vectordb_bench/backend/clients/__init__.py +39 -0
  3. vectordb_bench/backend/clients/aliyun_elasticsearch/aliyun_elasticsearch.py +27 -0
  4. vectordb_bench/backend/clients/aliyun_elasticsearch/config.py +19 -0
  5. vectordb_bench/backend/clients/aliyun_opensearch/aliyun_opensearch.py +304 -0
  6. vectordb_bench/backend/clients/aliyun_opensearch/config.py +48 -0
  7. vectordb_bench/backend/clients/alloydb/alloydb.py +372 -0
  8. vectordb_bench/backend/clients/alloydb/cli.py +147 -0
  9. vectordb_bench/backend/clients/alloydb/config.py +168 -0
  10. vectordb_bench/backend/clients/api.py +5 -0
  11. vectordb_bench/backend/clients/milvus/cli.py +25 -1
  12. vectordb_bench/backend/clients/milvus/config.py +16 -2
  13. vectordb_bench/backend/clients/milvus/milvus.py +4 -6
  14. vectordb_bench/backend/runner/rate_runner.py +32 -15
  15. vectordb_bench/backend/runner/read_write_runner.py +102 -36
  16. vectordb_bench/backend/runner/serial_runner.py +8 -2
  17. vectordb_bench/backend/runner/util.py +0 -16
  18. vectordb_bench/backend/task_runner.py +4 -3
  19. vectordb_bench/backend/utils.py +1 -0
  20. vectordb_bench/cli/vectordbbench.py +2 -0
  21. vectordb_bench/frontend/config/dbCaseConfigs.py +224 -0
  22. vectordb_bench/models.py +9 -0
  23. {vectordb_bench-0.0.17.dist-info → vectordb_bench-0.0.19.dist-info}/METADATA +13 -23
  24. {vectordb_bench-0.0.17.dist-info → vectordb_bench-0.0.19.dist-info}/RECORD +28 -21
  25. {vectordb_bench-0.0.17.dist-info → vectordb_bench-0.0.19.dist-info}/LICENSE +0 -0
  26. {vectordb_bench-0.0.17.dist-info → vectordb_bench-0.0.19.dist-info}/WHEEL +0 -0
  27. {vectordb_bench-0.0.17.dist-info → vectordb_bench-0.0.19.dist-info}/entry_points.txt +0 -0
  28. {vectordb_bench-0.0.17.dist-info → vectordb_bench-0.0.19.dist-info}/top_level.txt +0 -0
@@ -289,7 +289,7 @@ class Performance1536D50K(PerformanceCase):
289
289
  description: str = """This case tests the search performance of a vector database with a medium 50K dataset (<b>OpenAI 50K vectors</b>, 1536 dimensions), at varying parallel levels.
290
290
  Results will show index building time, recall, and maximum QPS."""
291
291
  load_timeout: float | int = 3600
292
- optimize_timeout: float | int | None = 15 * 60
292
+ optimize_timeout: float | int | None = config.OPTIMIZE_TIMEOUT_DEFAULT
293
293
 
294
294
 
295
295
  def metric_type_map(s: str) -> MetricType:
@@ -32,11 +32,14 @@ class DB(Enum):
32
32
  PgVectoRS = "PgVectoRS"
33
33
  PgVectorScale = "PgVectorScale"
34
34
  PgDiskANN = "PgDiskANN"
35
+ AlloyDB = "AlloyDB"
35
36
  Redis = "Redis"
36
37
  MemoryDB = "MemoryDB"
37
38
  Chroma = "Chroma"
38
39
  AWSOpenSearch = "OpenSearch"
40
+ AliyunElasticsearch = "AliyunElasticsearch"
39
41
  Test = "test"
42
+ AliyunOpenSearch = "AliyunOpenSearch"
40
43
 
41
44
 
42
45
  @property
@@ -97,6 +100,18 @@ class DB(Enum):
97
100
  if self == DB.AWSOpenSearch:
98
101
  from .aws_opensearch.aws_opensearch import AWSOpenSearch
99
102
  return AWSOpenSearch
103
+
104
+ if self == DB.AlloyDB:
105
+ from .alloydb.alloydb import AlloyDB
106
+ return AlloyDB
107
+
108
+ if self == DB.AliyunElasticsearch:
109
+ from .aliyun_elasticsearch.aliyun_elasticsearch import AliyunElasticsearch
110
+ return AliyunElasticsearch
111
+
112
+ if self == DB.AliyunOpenSearch:
113
+ from .aliyun_opensearch.aliyun_opensearch import AliyunOpenSearch
114
+ return AliyunOpenSearch
100
115
 
101
116
  @property
102
117
  def config_cls(self) -> Type[DBConfig]:
@@ -156,6 +171,18 @@ class DB(Enum):
156
171
  if self == DB.AWSOpenSearch:
157
172
  from .aws_opensearch.config import AWSOpenSearchConfig
158
173
  return AWSOpenSearchConfig
174
+
175
+ if self == DB.AlloyDB:
176
+ from .alloydb.config import AlloyDBConfig
177
+ return AlloyDBConfig
178
+
179
+ if self == DB.AliyunElasticsearch:
180
+ from .aliyun_elasticsearch.config import AliyunElasticsearchConfig
181
+ return AliyunElasticsearchConfig
182
+
183
+ if self == DB.AliyunOpenSearch:
184
+ from .aliyun_opensearch.config import AliyunOpenSearchConfig
185
+ return AliyunOpenSearchConfig
159
186
 
160
187
  def case_config_cls(self, index_type: IndexType | None = None) -> Type[DBCaseConfig]:
161
188
  if self == DB.Milvus:
@@ -197,6 +224,18 @@ class DB(Enum):
197
224
  if self == DB.PgDiskANN:
198
225
  from .pgdiskann.config import _pgdiskann_case_config
199
226
  return _pgdiskann_case_config.get(index_type)
227
+
228
+ if self == DB.AlloyDB:
229
+ from .alloydb.config import _alloydb_case_config
230
+ return _alloydb_case_config.get(index_type)
231
+
232
+ if self == DB.AliyunElasticsearch:
233
+ from .elastic_cloud.config import ElasticCloudIndexConfig
234
+ return ElasticCloudIndexConfig
235
+
236
+ if self == DB.AliyunOpenSearch:
237
+ from .aliyun_opensearch.config import AliyunOpenSearchIndexConfig
238
+ return AliyunOpenSearchIndexConfig
200
239
 
201
240
  # DB.Pinecone, DB.Chroma, DB.Redis
202
241
  return EmptyDBCaseConfig
@@ -0,0 +1,27 @@
1
+ from ..elastic_cloud.elastic_cloud import ElasticCloud
2
+ from ..elastic_cloud.config import ElasticCloudIndexConfig
3
+
4
+
5
+ class AliyunElasticsearch(ElasticCloud):
6
+ def __init__(
7
+ self,
8
+ dim: int,
9
+ db_config: dict,
10
+ db_case_config: ElasticCloudIndexConfig,
11
+ indice: str = "vdb_bench_indice", # must be lowercase
12
+ id_col_name: str = "id",
13
+ vector_col_name: str = "vector",
14
+ drop_old: bool = False,
15
+ **kwargs,
16
+ ):
17
+ super().__init__(
18
+ dim=dim,
19
+ db_config=db_config,
20
+ db_case_config=db_case_config,
21
+ indice=indice,
22
+ id_col_name=id_col_name,
23
+ vector_col_name=vector_col_name,
24
+ drop_old=drop_old,
25
+ **kwargs,
26
+ )
27
+
@@ -0,0 +1,19 @@
1
+ from enum import Enum
2
+ from pydantic import SecretStr, BaseModel
3
+
4
+ from ..api import DBConfig, DBCaseConfig, MetricType, IndexType
5
+
6
+
7
+ class AliyunElasticsearchConfig(DBConfig, BaseModel):
8
+ #: Protocol in use to connect to the node
9
+ scheme: str = "http"
10
+ host: str = ""
11
+ port: int = 9200
12
+ user: str = "elastic"
13
+ password: SecretStr
14
+
15
+ def to_dict(self) -> dict:
16
+ return {
17
+ "hosts": [{'scheme': self.scheme, 'host': self.host, 'port': self.port}],
18
+ "basic_auth": (self.user, self.password.get_secret_value()),
19
+ }
@@ -0,0 +1,304 @@
1
+ import json
2
+ import logging
3
+ from contextlib import contextmanager
4
+ import time
5
+
6
+ from alibabacloud_ha3engine_vector.models import QueryRequest
7
+
8
+ from ..api import VectorDB, MetricType
9
+ from .config import AliyunOpenSearchIndexConfig
10
+
11
+ from alibabacloud_searchengine20211025.client import Client as searchengineClient
12
+ from alibabacloud_searchengine20211025 import models as searchengine_models
13
+ from alibabacloud_tea_openapi import models as open_api_models
14
+ from alibabacloud_ha3engine_vector import models, client
15
+
16
+ log = logging.getLogger(__name__)
17
+
18
+ ALIYUN_OPENSEARCH_MAX_SIZE_PER_BATCH = 2 * 1024 * 1024 # 2MB
19
+ ALIYUN_OPENSEARCH_MAX_NUM_PER_BATCH = 100
20
+
21
+ class AliyunOpenSearch(VectorDB):
22
+ def __init__(
23
+ self,
24
+ dim: int,
25
+ db_config: dict,
26
+ db_case_config: AliyunOpenSearchIndexConfig,
27
+ collection_name: str = "VectorDBBenchCollection",
28
+ drop_old: bool = False,
29
+ **kwargs,
30
+ ):
31
+ self.control_client = None
32
+ self.dim = dim
33
+ self.db_config = db_config
34
+ self.case_config = db_case_config
35
+ self.collection_name = collection_name
36
+ self.instance_id = db_config["host"].split(".")[0].replace("http://", "").replace("https://", "")
37
+
38
+ self._primary_field = "id"
39
+ self._scalar_field = "int_id"
40
+ self._vector_field = "vector"
41
+ self._index_name = "vector_idx"
42
+
43
+ self.batch_size = int(
44
+ min(ALIYUN_OPENSEARCH_MAX_SIZE_PER_BATCH / (dim * 25), ALIYUN_OPENSEARCH_MAX_NUM_PER_BATCH)
45
+ )
46
+
47
+ log.info(f"Aliyun_OpenSearch client config: {self.db_config}")
48
+ control_config = open_api_models.Config(
49
+ access_key_id=self.db_config["ak"],
50
+ access_key_secret=self.db_config["sk"],
51
+ endpoint=self.db_config["control_host"]
52
+ )
53
+ self.control_client = searchengineClient(control_config)
54
+
55
+ if drop_old:
56
+ log.info(f"aliyun_OpenSearch client drop old index: {self.collection_name}")
57
+ if self._index_exists(self.control_client):
58
+ self._modify_index(self.control_client)
59
+ else:
60
+ self._create_index(self.control_client)
61
+
62
+ def _create_index(self, client: searchengineClient):
63
+ create_table_request = searchengine_models.CreateTableRequest()
64
+ create_table_request.name = self.collection_name
65
+ create_table_request.primary_key = self._primary_field
66
+ create_table_request.partition_count = 1
67
+ create_table_request.field_schema = {
68
+ self._primary_field: "INT64",
69
+ self._vector_field: "MULTI_FLOAT",
70
+ self._scalar_field: "INT64"
71
+ }
72
+ vector_index = searchengine_models.ModifyTableRequestVectorIndex()
73
+ vector_index.index_name = self._index_name
74
+ vector_index.dimension = self.dim
75
+ vector_index.distance_type = self.case_config.distance_type()
76
+ vector_index.vector_field = self._vector_field
77
+ vector_index.vector_index_type = "HNSW"
78
+
79
+ advance_params = searchengine_models.ModifyTableRequestVectorIndexAdvanceParams()
80
+ advance_params.build_index_params = "{\"proxima.hnsw.builder.max_neighbor_count\":" + str(self.case_config.M) + ",\"proxima.hnsw.builder.efconstruction\":" + str(self.case_config.efConstruction) + ",\"proxima.hnsw.builder.enable_adsampling\":true,\"proxima.hnsw.builder.slack_pruning_factor\":1.1,\"proxima.hnsw.builder.thread_count\":16}"
81
+ advance_params.search_index_params = "{\"proxima.hnsw.searcher.ef\":400,\"proxima.hnsw.searcher.dynamic_termination.prob_threshold\":0.7}"
82
+ vector_index.advance_params = advance_params
83
+ create_table_request.vector_index = [vector_index]
84
+
85
+ try:
86
+ response = client.create_table(self.instance_id, create_table_request)
87
+ log.info(f"create table success: {response.body}")
88
+ except Exception as error:
89
+ log.info(error.message)
90
+ log.info(error.data.get("Recommend"))
91
+ log.info(f"Failed to create index: error: {str(error)}")
92
+ raise error from None
93
+
94
+ # check if index create success
95
+ self._active_index(client)
96
+
97
+ # check if index create success
98
+ def _active_index(self, client: searchengineClient) -> None:
99
+ retry_times = 0
100
+ while True:
101
+ time.sleep(10)
102
+ log.info(f"begin to {retry_times} times get table")
103
+ retry_times += 1
104
+ response = client.get_table(self.instance_id, self.collection_name)
105
+ if response.body.result.status == 'IN_USE':
106
+ log.info(f"{self.collection_name} table begin to use.")
107
+ return
108
+
109
+ def _index_exists(self, client: searchengineClient) -> bool:
110
+ try:
111
+ client.get_table(self.instance_id, self.collection_name)
112
+ return True
113
+ except Exception as error:
114
+ log.info(f'get table from searchengine error')
115
+ log.info(error.message)
116
+ return False
117
+
118
+ # check if index build success, Insert the embeddings to the vector database after index build success
119
+ def _index_build_success(self, client: searchengineClient) -> None:
120
+ log.info(f"begin to check if table build success.")
121
+ time.sleep(50)
122
+
123
+ retry_times = 0
124
+ while True:
125
+ time.sleep(10)
126
+ log.info(f"begin to {retry_times} times get table fsm")
127
+ retry_times += 1
128
+ request = searchengine_models.ListTasksRequest()
129
+ request.start = (int(time.time()) - 3600) * 1000
130
+ request.end = int(time.time()) * 1000
131
+ response = client.list_tasks(self.instance_id, request)
132
+ fsms = response.body.result
133
+ cur_fsm = None
134
+ for fsm in fsms:
135
+ if fsm["type"] != "datasource_flow_fsm":
136
+ continue
137
+ if self.collection_name not in fsm["fsmId"]:
138
+ continue
139
+ cur_fsm = fsm
140
+ break
141
+ if cur_fsm is None:
142
+ print("no build index fsm")
143
+ return
144
+ if "success" == cur_fsm["status"]:
145
+ return
146
+
147
+ def _modify_index(self, client: searchengineClient) -> None:
148
+ # check if index create success
149
+ self._active_index(client)
150
+
151
+ modify_table_request = searchengine_models.ModifyTableRequest()
152
+ modify_table_request.partition_count = 1
153
+ modify_table_request.primary_key = self._primary_field
154
+ modify_table_request.field_schema = {
155
+ self._primary_field: "INT64",
156
+ self._vector_field: "MULTI_FLOAT",
157
+ self._scalar_field: "INT64"
158
+ }
159
+ vector_index = searchengine_models.ModifyTableRequestVectorIndex()
160
+ vector_index.index_name = self._index_name
161
+ vector_index.dimension = self.dim
162
+ vector_index.distance_type = self.case_config.distance_type()
163
+ vector_index.vector_field = self._vector_field
164
+ vector_index.vector_index_type = "HNSW"
165
+ advance_params = searchengine_models.ModifyTableRequestVectorIndexAdvanceParams()
166
+ advance_params.build_index_params = "{\"proxima.hnsw.builder.max_neighbor_count\":" + str(self.case_config.M) + ",\"proxima.hnsw.builder.efconstruction\":" + str(self.case_config.efConstruction) + ",\"proxima.hnsw.builder.enable_adsampling\":true,\"proxima.hnsw.builder.slack_pruning_factor\":1.1,\"proxima.hnsw.builder.thread_count\":16}"
167
+ advance_params.search_index_params = "{\"proxima.hnsw.searcher.ef\":400,\"proxima.hnsw.searcher.dynamic_termination.prob_threshold\":0.7}"
168
+ vector_index.advance_params = advance_params
169
+
170
+ modify_table_request.vector_index = [vector_index]
171
+
172
+ try:
173
+ response = client.modify_table(self.instance_id, self.collection_name, modify_table_request)
174
+ log.info(f"modify table success: {response.body}")
175
+ except Exception as error:
176
+ log.info(error.message)
177
+ log.info(error.data.get("Recommend"))
178
+ log.info(f"Failed to modify index: error: {str(error)}")
179
+ raise error from None
180
+
181
+ # check if modify index & delete data fsm success
182
+ self._index_build_success(client)
183
+
184
+ # get collection records total count
185
+ def _get_total_count(self):
186
+ try:
187
+ response = self.client.stats(self.collection_name)
188
+ body = json.loads(response.body)
189
+ log.info(f"stats info: {response.body}")
190
+
191
+ if "result" in body and "totalDocCount" in body.get("result"):
192
+ return body.get("result").get("totalDocCount")
193
+ else:
194
+ return 0
195
+ except Exception as e:
196
+ print(f"Error querying index: {e}")
197
+ return 0
198
+
199
+ @contextmanager
200
+ def init(self) -> None:
201
+ """connect to aliyun opensearch"""
202
+ config = models.Config(
203
+ endpoint=self.db_config["host"],
204
+ protocol="http",
205
+ access_user_name=self.db_config["user"],
206
+ access_pass_word=self.db_config["password"]
207
+ )
208
+
209
+ self.client = client.Client(config)
210
+
211
+ yield
212
+ # self.client.transport.close()
213
+ self.client = None
214
+ del self.client
215
+
216
+ def insert_embeddings(
217
+ self,
218
+ embeddings: list[list[float]],
219
+ metadata: list[int],
220
+ **kwargs,
221
+ ) -> tuple[int, Exception]:
222
+ """Insert the embeddings to the opensearch."""
223
+ assert self.client is not None, "should self.init() first"
224
+ assert len(embeddings) == len(metadata)
225
+ insert_count = 0
226
+
227
+ try:
228
+ for batch_start_offset in range(0, len(embeddings), self.batch_size):
229
+ batch_end_offset = min(
230
+ batch_start_offset + self.batch_size, len(embeddings)
231
+ )
232
+ documents = []
233
+ for i in range(batch_start_offset, batch_end_offset):
234
+ documentFields = {
235
+ self._primary_field: metadata[i],
236
+ self._vector_field: embeddings[i],
237
+ self._scalar_field: metadata[i],
238
+ "ops_build_channel": "inc"
239
+ }
240
+ document = {
241
+ "fields": documentFields,
242
+ "cmd": "add"
243
+ }
244
+ documents.append(document)
245
+
246
+ pushDocumentsRequest = models.PushDocumentsRequest({}, documents)
247
+ self.client.push_documents(self.collection_name, self._primary_field, pushDocumentsRequest)
248
+ insert_count += batch_end_offset - batch_start_offset
249
+ except Exception as e:
250
+ log.info(f"Failed to insert data: {e}")
251
+ return (insert_count, e)
252
+ return (insert_count, None)
253
+
254
+ def search_embedding(
255
+ self,
256
+ query: list[float],
257
+ k: int = 100,
258
+ filters: dict | None = None,
259
+ ) -> list[int]:
260
+ assert self.client is not None, "should self.init() first"
261
+ search_params = "{\"proxima.hnsw.searcher.ef\":"+ str(self.case_config.ef_search) +"}"
262
+
263
+ os_filter = f"{self._scalar_field} {filters.get('metadata')}" if filters else ""
264
+
265
+ try:
266
+ request = QueryRequest(table_name=self.collection_name,
267
+ vector=query,
268
+ top_k=k,
269
+ search_params=search_params, filter=os_filter)
270
+ result = self.client.query(request)
271
+ except Exception as e:
272
+ log.info(f"Error querying index: {e}")
273
+ raise e
274
+ res = json.loads(result.body)
275
+ id_res = [one_res["id"] for one_res in res["result"]]
276
+ return id_res
277
+
278
+ def need_normalize_cosine(self) -> bool:
279
+ """Wheather this database need to normalize dataset to support COSINE"""
280
+ if self.case_config.metric_type == MetricType.COSINE:
281
+ log.info(f"cosine dataset need normalize.")
282
+ return True
283
+
284
+ return False
285
+
286
+ def optimize(self):
287
+ pass
288
+
289
+ def optimize_with_size(self, data_size: int):
290
+ log.info(f"optimize count: {data_size}")
291
+ retry_times = 0
292
+ while True:
293
+ time.sleep(10)
294
+ log.info(f"begin to {retry_times} times get optimize table")
295
+ retry_times += 1
296
+ total_count = self._get_total_count()
297
+ # check if the data is inserted
298
+ if total_count == data_size:
299
+ log.info(f"optimize table finish.")
300
+ return
301
+
302
+ def ready_to_load(self):
303
+ """ready_to_load will be called before load in load cases."""
304
+ pass
@@ -0,0 +1,48 @@
1
+ import logging
2
+ from enum import Enum
3
+ from pydantic import SecretStr, BaseModel
4
+
5
+ from ..api import DBConfig, DBCaseConfig, MetricType, IndexType
6
+
7
+ log = logging.getLogger(__name__)
8
+
9
+
10
+ class AliyunOpenSearchConfig(DBConfig, BaseModel):
11
+ host: str = ""
12
+ user: str = ""
13
+ password: SecretStr = ""
14
+
15
+ ak: str = ""
16
+ sk: SecretStr = ""
17
+ control_host: str = "searchengine.cn-hangzhou.aliyuncs.com"
18
+
19
+ def to_dict(self) -> dict:
20
+ return {
21
+ "host": self.host,
22
+ "user": self.user,
23
+ "password": self.password.get_secret_value(),
24
+ "ak": self.ak,
25
+ "sk": self.sk.get_secret_value(),
26
+ "control_host": self.control_host,
27
+ }
28
+
29
+ class AliyunOpenSearchIndexConfig(BaseModel, DBCaseConfig):
30
+ metric_type: MetricType = MetricType.L2
31
+ efConstruction: int = 500
32
+ M: int = 100
33
+ ef_search: int = 40
34
+
35
+ def distance_type(self) -> str:
36
+ if self.metric_type == MetricType.L2:
37
+ return "SquaredEuclidean"
38
+ elif self.metric_type == MetricType.IP:
39
+ return "InnerProduct"
40
+ elif self.metric_type == MetricType.COSINE:
41
+ return "InnerProduct"
42
+ return "SquaredEuclidean"
43
+
44
+ def index_param(self) -> dict:
45
+ return {}
46
+
47
+ def search_param(self) -> dict:
48
+ return {}