vectordb-bench 0.0.20__tar.gz → 0.0.21__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/PKG-INFO +1 -1
  2. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/pyproject.toml +1 -0
  3. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/assembler.py +2 -2
  4. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/__init__.py +12 -2
  5. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/aliyun_opensearch/aliyun_opensearch.py +1 -7
  6. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/alloydb/alloydb.py +1 -4
  7. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/api.py +8 -15
  8. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/aws_opensearch/aws_opensearch.py +4 -7
  9. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/chroma/chroma.py +1 -4
  10. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/elastic_cloud/elastic_cloud.py +1 -4
  11. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/memorydb/cli.py +2 -2
  12. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/memorydb/memorydb.py +2 -5
  13. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/milvus/milvus.py +1 -20
  14. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgdiskann/pgdiskann.py +1 -4
  15. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvecto_rs/pgvecto_rs.py +3 -11
  16. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvector/pgvector.py +2 -7
  17. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvectorscale/pgvectorscale.py +2 -7
  18. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pinecone/pinecone.py +1 -4
  19. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/qdrant_cloud/qdrant_cloud.py +3 -6
  20. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/redis/redis.py +1 -4
  21. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/test/cli.py +1 -1
  22. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/test/test.py +1 -4
  23. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/weaviate_cloud/weaviate_cloud.py +1 -4
  24. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/data_source.py +4 -12
  25. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/runner/mp_runner.py +16 -34
  26. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/runner/rate_runner.py +4 -4
  27. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/runner/read_write_runner.py +11 -15
  28. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/runner/serial_runner.py +20 -28
  29. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/task_runner.py +6 -26
  30. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/interface.py +10 -19
  31. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench.egg-info/PKG-INFO +1 -1
  32. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/.devcontainer/Dockerfile +0 -0
  33. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/.devcontainer/devcontainer.json +0 -0
  34. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/.env.example +0 -0
  35. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/.github/workflows/publish_package_on_release.yml +0 -0
  36. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/.github/workflows/pull_request.yml +0 -0
  37. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/.gitignore +0 -0
  38. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/Dockerfile +0 -0
  39. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/LICENSE +0 -0
  40. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/Makefile +0 -0
  41. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/OWNERS +0 -0
  42. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/README.md +0 -0
  43. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/fig/custom_case_run_test.png +0 -0
  44. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/fig/custom_dataset.png +0 -0
  45. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/install/requirements_py3.11.txt +0 -0
  46. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/install.py +0 -0
  47. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/setup.cfg +0 -0
  48. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/conftest.py +0 -0
  49. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/pytest.ini +0 -0
  50. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_bench_runner.py +0 -0
  51. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_chroma.py +0 -0
  52. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_data_source.py +0 -0
  53. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_dataset.py +0 -0
  54. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_elasticsearch_cloud.py +0 -0
  55. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_models.py +0 -0
  56. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_rate_runner.py +0 -0
  57. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_redis.py +0 -0
  58. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/test_utils.py +0 -0
  59. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/tests/ut_cases.py +0 -0
  60. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/__init__.py +0 -0
  61. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/__main__.py +0 -0
  62. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/__init__.py +0 -0
  63. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/cases.py +0 -0
  64. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/aliyun_elasticsearch/aliyun_elasticsearch.py +0 -0
  65. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/aliyun_elasticsearch/config.py +0 -0
  66. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/aliyun_opensearch/config.py +0 -0
  67. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/alloydb/cli.py +0 -0
  68. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/alloydb/config.py +0 -0
  69. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/aws_opensearch/cli.py +0 -0
  70. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/aws_opensearch/config.py +0 -0
  71. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/aws_opensearch/run.py +0 -0
  72. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/chroma/config.py +0 -0
  73. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/elastic_cloud/config.py +0 -0
  74. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/memorydb/config.py +0 -0
  75. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/milvus/cli.py +0 -0
  76. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/milvus/config.py +0 -0
  77. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgdiskann/cli.py +0 -0
  78. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgdiskann/config.py +0 -0
  79. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvecto_rs/cli.py +0 -0
  80. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvecto_rs/config.py +0 -0
  81. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvector/cli.py +0 -0
  82. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvector/config.py +0 -0
  83. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvectorscale/cli.py +0 -0
  84. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pgvectorscale/config.py +0 -0
  85. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/pinecone/config.py +0 -0
  86. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/qdrant_cloud/config.py +0 -0
  87. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/redis/cli.py +0 -0
  88. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/redis/config.py +0 -0
  89. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/test/config.py +0 -0
  90. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/weaviate_cloud/cli.py +0 -0
  91. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/weaviate_cloud/config.py +0 -0
  92. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/zilliz_cloud/cli.py +0 -0
  93. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/zilliz_cloud/config.py +0 -0
  94. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/clients/zilliz_cloud/zilliz_cloud.py +0 -0
  95. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/dataset.py +0 -0
  96. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/result_collector.py +0 -0
  97. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/runner/__init__.py +0 -0
  98. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/runner/util.py +0 -0
  99. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/backend/utils.py +0 -0
  100. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/base.py +0 -0
  101. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/cli/__init__.py +0 -0
  102. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/cli/cli.py +0 -0
  103. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/cli/vectordbbench.py +0 -0
  104. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/config-files/sample_config.yml +0 -0
  105. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/custom/custom_case.json +0 -0
  106. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/charts.py +0 -0
  107. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/data.py +0 -0
  108. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/expanderStyle.py +0 -0
  109. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/filters.py +0 -0
  110. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/footer.py +0 -0
  111. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/headerIcon.py +0 -0
  112. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/nav.py +0 -0
  113. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/priceTable.py +0 -0
  114. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/check_results/stPageConfig.py +0 -0
  115. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/concurrent/charts.py +0 -0
  116. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/custom/displayCustomCase.py +0 -0
  117. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/custom/displaypPrams.py +0 -0
  118. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/custom/getCustomConfig.py +0 -0
  119. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/custom/initStyle.py +0 -0
  120. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/get_results/saveAsImage.py +0 -0
  121. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/run_test/autoRefresh.py +0 -0
  122. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/run_test/caseSelector.py +0 -0
  123. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/run_test/dbConfigSetting.py +0 -0
  124. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/run_test/dbSelector.py +0 -0
  125. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/run_test/generateTasks.py +0 -0
  126. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/run_test/hideSidebar.py +0 -0
  127. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/run_test/initStyle.py +0 -0
  128. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/run_test/submitTask.py +0 -0
  129. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/components/tables/data.py +0 -0
  130. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/config/dbCaseConfigs.py +0 -0
  131. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/config/dbPrices.py +0 -0
  132. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/config/styles.py +0 -0
  133. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/pages/concurrent.py +0 -0
  134. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/pages/custom.py +0 -0
  135. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/pages/quries_per_dollar.py +0 -0
  136. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/pages/run_test.py +0 -0
  137. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/pages/tables.py +0 -0
  138. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/utils.py +0 -0
  139. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/frontend/vdb_benchmark.py +0 -0
  140. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/log_util.py +0 -0
  141. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/metric.py +0 -0
  142. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/models.py +0 -0
  143. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/ElasticCloud/result_20230727_standard_elasticcloud.json +0 -0
  144. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/ElasticCloud/result_20230808_standard_elasticcloud.json +0 -0
  145. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/Milvus/result_20230727_standard_milvus.json +0 -0
  146. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/Milvus/result_20230808_standard_milvus.json +0 -0
  147. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/PgVector/result_20230727_standard_pgvector.json +0 -0
  148. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/PgVector/result_20230808_standard_pgvector.json +0 -0
  149. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/Pinecone/result_20230727_standard_pinecone.json +0 -0
  150. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/Pinecone/result_20230808_standard_pinecone.json +0 -0
  151. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/QdrantCloud/result_20230727_standard_qdrantcloud.json +0 -0
  152. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/QdrantCloud/result_20230808_standard_qdrantcloud.json +0 -0
  153. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/WeaviateCloud/result_20230727_standard_weaviatecloud.json +0 -0
  154. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/WeaviateCloud/result_20230808_standard_weaviatecloud.json +0 -0
  155. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/ZillizCloud/result_20230727_standard_zillizcloud.json +0 -0
  156. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/ZillizCloud/result_20230808_standard_zillizcloud.json +0 -0
  157. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/ZillizCloud/result_20240105_standard_202401_zillizcloud.json +0 -0
  158. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/dbPrices.json +0 -0
  159. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/getLeaderboardData.py +0 -0
  160. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench/results/leaderboard.json +0 -0
  161. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench.egg-info/SOURCES.txt +0 -0
  162. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench.egg-info/dependency_links.txt +0 -0
  163. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench.egg-info/entry_points.txt +0 -0
  164. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench.egg-info/requires.txt +0 -0
  165. {vectordb_bench-0.0.20 → vectordb_bench-0.0.21}/vectordb_bench.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: vectordb-bench
3
- Version: 0.0.20
3
+ Version: 0.0.21
4
4
  Summary: VectorDBBench is not just an offering of benchmark results for mainstream vector databases and cloud services, it's your go-to tool for the ultimate performance and cost-effectiveness comparison. Designed with ease-of-use in mind, VectorDBBench is devised to help users, even non-professionals, reproduce results or test new systems, making the hunt for the optimal choice amongst a plethora of cloud services and open-source vector databases a breeze.
5
5
  Author-email: XuanYang-cn <xuan.yang@zilliz.com>
6
6
  Project-URL: repository, https://github.com/zilliztech/VectorDBBench
@@ -133,6 +133,7 @@ lint.ignore = [
133
133
  "RUF017",
134
134
  "C416",
135
135
  "PLW0603",
136
+ "COM812",
136
137
  ]
137
138
 
138
139
  # Allow autofix for all enabled rules (when `--fix`) is provided.
@@ -53,8 +53,8 @@ class Assembler:
53
53
  _ = k.init_cls
54
54
 
55
55
  # sort by dataset size
56
- for k, _ in db2runner:
57
- db2runner[k].sort(key=lambda x: x.ca.dataset.data.size)
56
+ for _, runner in db2runner.items():
57
+ runner.sort(key=lambda x: x.ca.dataset.data.size)
58
58
 
59
59
  all_runners = []
60
60
  all_runners.extend(load_runners)
@@ -42,7 +42,7 @@ class DB(Enum):
42
42
  AliyunOpenSearch = "AliyunOpenSearch"
43
43
 
44
44
  @property
45
- def init_cls(self) -> type[VectorDB]: # noqa: PLR0911, PLR0912
45
+ def init_cls(self) -> type[VectorDB]: # noqa: PLR0911, PLR0912, C901
46
46
  """Import while in use"""
47
47
  if self == DB.Milvus:
48
48
  from .milvus.milvus import Milvus
@@ -129,11 +129,16 @@ class DB(Enum):
129
129
 
130
130
  return AliyunOpenSearch
131
131
 
132
+ if self == DB.Test:
133
+ from .test.test import Test
134
+
135
+ return Test
136
+
132
137
  msg = f"Unknown DB: {self.name}"
133
138
  raise ValueError(msg)
134
139
 
135
140
  @property
136
- def config_cls(self) -> type[DBConfig]: # noqa: PLR0911, PLR0912
141
+ def config_cls(self) -> type[DBConfig]: # noqa: PLR0911, PLR0912, C901
137
142
  """Import while in use"""
138
143
  if self == DB.Milvus:
139
144
  from .milvus.config import MilvusConfig
@@ -220,6 +225,11 @@ class DB(Enum):
220
225
 
221
226
  return AliyunOpenSearchConfig
222
227
 
228
+ if self == DB.Test:
229
+ from .test.config import TestConfig
230
+
231
+ return TestConfig
232
+
223
233
  msg = f"Unknown DB: {self.name}"
224
234
  raise ValueError(msg)
225
235
 
@@ -325,10 +325,7 @@ class AliyunOpenSearch(VectorDB):
325
325
 
326
326
  return False
327
327
 
328
- def optimize(self):
329
- pass
330
-
331
- def optimize_with_size(self, data_size: int):
328
+ def optimize(self, data_size: int):
332
329
  log.info(f"optimize count: {data_size}")
333
330
  retry_times = 0
334
331
  while True:
@@ -340,6 +337,3 @@ class AliyunOpenSearch(VectorDB):
340
337
  if total_count == data_size:
341
338
  log.info("optimize table finish.")
342
339
  return
343
-
344
- def ready_to_load(self):
345
- """ready_to_load will be called before load in load cases."""
@@ -149,10 +149,7 @@ class AlloyDB(VectorDB):
149
149
  )
150
150
  self.conn.commit()
151
151
 
152
- def ready_to_load(self):
153
- pass
154
-
155
- def optimize(self):
152
+ def optimize(self, data_size: int | None = None):
156
153
  self._post_insert()
157
154
 
158
155
  def _post_insert(self):
@@ -137,6 +137,13 @@ class VectorDB(ABC):
137
137
  @contextmanager
138
138
  def init(self) -> None:
139
139
  """create and destory connections to database.
140
+ Why contextmanager:
141
+
142
+ In multiprocessing search tasks, vectordbbench might init
143
+ totally hundreds of thousands of connections with DB server.
144
+
145
+ Too many connections may drain local FDs or server connection resources.
146
+ If the DB client doesn't have `close()` method, just set the object to None.
140
147
 
141
148
  Examples:
142
149
  >>> with self.init():
@@ -187,9 +194,8 @@ class VectorDB(ABC):
187
194
  """
188
195
  raise NotImplementedError
189
196
 
190
- # TODO: remove
191
197
  @abstractmethod
192
- def optimize(self):
198
+ def optimize(self, data_size: int | None = None):
193
199
  """optimize will be called between insertion and search in performance cases.
194
200
 
195
201
  Should be blocked until the vectorDB is ready to be tested on
@@ -199,16 +205,3 @@ class VectorDB(ABC):
199
205
  Optimize's execution time is limited, the limited time is based on cases.
200
206
  """
201
207
  raise NotImplementedError
202
-
203
- def optimize_with_size(self, data_size: int):
204
- self.optimize()
205
-
206
- # TODO: remove
207
- @abstractmethod
208
- def ready_to_load(self):
209
- """ready_to_load will be called before load in load cases.
210
-
211
- Should be blocked until the vectorDB is ready to be tested on
212
- heavy load cases.
213
- """
214
- raise NotImplementedError
@@ -145,15 +145,15 @@ class AWSOpenSearch(VectorDB):
145
145
  docvalue_fields=[self.id_col_name],
146
146
  stored_fields="_none_",
147
147
  )
148
- log.info(f'Search took: {resp["took"]}')
149
- log.info(f'Search shards: {resp["_shards"]}')
150
- log.info(f'Search hits total: {resp["hits"]["total"]}')
148
+ log.info(f"Search took: {resp['took']}")
149
+ log.info(f"Search shards: {resp['_shards']}")
150
+ log.info(f"Search hits total: {resp['hits']['total']}")
151
151
  return [int(h["fields"][self.id_col_name][0]) for h in resp["hits"]["hits"]]
152
152
  except Exception as e:
153
153
  log.warning(f"Failed to search: {self.index_name} error: {e!s}")
154
154
  raise e from None
155
155
 
156
- def optimize(self):
156
+ def optimize(self, data_size: int | None = None):
157
157
  """optimize will be called between insertion and search in performance cases."""
158
158
  # Call refresh first to ensure that all segments are created
159
159
  self._refresh_index()
@@ -194,6 +194,3 @@ class AWSOpenSearch(VectorDB):
194
194
  log.info("Calling warmup API to load graphs into memory")
195
195
  warmup_endpoint = f"/_plugins/_knn/warmup/{self.index_name}"
196
196
  self.client.transport.perform_request("GET", warmup_endpoint)
197
-
198
- def ready_to_load(self):
199
- """ready_to_load will be called before load in load cases."""
@@ -57,10 +57,7 @@ class ChromaClient(VectorDB):
57
57
  def ready_to_search(self) -> bool:
58
58
  pass
59
59
 
60
- def ready_to_load(self) -> bool:
61
- pass
62
-
63
- def optimize(self) -> None:
60
+ def optimize(self, data_size: int | None = None):
64
61
  pass
65
62
 
66
63
  def insert_embeddings(
@@ -143,7 +143,7 @@ class ElasticCloud(VectorDB):
143
143
  log.warning(f"Failed to search: {self.indice} error: {e!s}")
144
144
  raise e from None
145
145
 
146
- def optimize(self):
146
+ def optimize(self, data_size: int | None = None):
147
147
  """optimize will be called between insertion and search in performance cases."""
148
148
  assert self.client is not None, "should self.init() first"
149
149
  self.client.indices.refresh(index=self.indice)
@@ -158,6 +158,3 @@ class ElasticCloud(VectorDB):
158
158
  task_status = self.client.tasks.get(task_id=force_merge_task_id)
159
159
  if task_status["completed"]:
160
160
  return
161
-
162
- def ready_to_load(self):
163
- """ready_to_load will be called before load in load cases."""
@@ -43,8 +43,8 @@ class MemoryDBTypedDict(TypedDict):
43
43
  show_default=True,
44
44
  default=False,
45
45
  help=(
46
- "Cluster Mode Disabled (CMD), use this flag when testing locally on a single node instance.",
47
- " In production, MemoryDB only supports cluster mode (CME)",
46
+ "Cluster Mode Disabled (CMD), use this flag when testing locally on a single node instance."
47
+ " In production, MemoryDB only supports cluster mode (CME)"
48
48
  ),
49
49
  ),
50
50
  ]
@@ -157,17 +157,14 @@ class MemoryDB(VectorDB):
157
157
  self.conn = self.get_client()
158
158
  search_param = self.case_config.search_param()
159
159
  if search_param["ef_runtime"]:
160
- self.ef_runtime_str = f'EF_RUNTIME {search_param["ef_runtime"]}'
160
+ self.ef_runtime_str = f"EF_RUNTIME {search_param['ef_runtime']}"
161
161
  else:
162
162
  self.ef_runtime_str = ""
163
163
  yield
164
164
  self.conn.close()
165
165
  self.conn = None
166
166
 
167
- def ready_to_load(self) -> bool:
168
- pass
169
-
170
- def optimize(self) -> None:
167
+ def optimize(self, data_size: int | None = None):
171
168
  self._post_insert()
172
169
 
173
170
  def insert_embeddings(
@@ -138,26 +138,7 @@ class Milvus(VectorDB):
138
138
  log.warning(f"{self.name} optimize error: {e}")
139
139
  raise e from None
140
140
 
141
- def ready_to_load(self):
142
- assert self.col, "Please call self.init() before"
143
- self._pre_load(self.col)
144
-
145
- def _pre_load(self, coll: Collection):
146
- try:
147
- if not coll.has_index(index_name=self._index_name):
148
- log.info(f"{self.name} create index")
149
- coll.create_index(
150
- self._vector_field,
151
- self.case_config.index_param(),
152
- index_name=self._index_name,
153
- )
154
- coll.load()
155
- log.info(f"{self.name} load")
156
- except Exception as e:
157
- log.warning(f"{self.name} pre load error: {e}")
158
- raise e from None
159
-
160
- def optimize(self):
141
+ def optimize(self, data_size: int | None = None):
161
142
  assert self.col, "Please call self.init() before"
162
143
  self._optimize()
163
144
 
@@ -143,10 +143,7 @@ class PgDiskANN(VectorDB):
143
143
  )
144
144
  self.conn.commit()
145
145
 
146
- def ready_to_load(self):
147
- pass
148
-
149
- def optimize(self):
146
+ def optimize(self, data_size: int | None = None):
150
147
  self._post_insert()
151
148
 
152
149
  def _post_insert(self):
@@ -153,10 +153,7 @@ class PgVectoRS(VectorDB):
153
153
  )
154
154
  self.conn.commit()
155
155
 
156
- def ready_to_load(self):
157
- pass
158
-
159
- def optimize(self):
156
+ def optimize(self, data_size: int | None = None):
160
157
  self._post_insert()
161
158
 
162
159
  def _post_insert(self):
@@ -200,10 +197,7 @@ class PgVectoRS(VectorDB):
200
197
  self.cursor.execute(index_create_sql)
201
198
  self.conn.commit()
202
199
  except Exception as e:
203
- log.warning(
204
- f"Failed to create pgvecto.rs index {self._index_name} \
205
- at table {self.table_name} error: {e}",
206
- )
200
+ log.warning(f"Failed to create pgvecto.rs index {self._index_name} at table {self.table_name} error: {e}")
207
201
  raise e from None
208
202
 
209
203
  def _create_table(self, dim: int):
@@ -258,9 +252,7 @@ class PgVectoRS(VectorDB):
258
252
 
259
253
  return len(metadata), None
260
254
  except Exception as e:
261
- log.warning(
262
- f"Failed to insert data into pgvecto.rs table ({self.table_name}), error: {e}",
263
- )
255
+ log.warning(f"Failed to insert data into pgvecto.rs table ({self.table_name}), error: {e}")
264
256
  return 0, e
265
257
 
266
258
  def search_embedding(
@@ -228,10 +228,7 @@ class PgVector(VectorDB):
228
228
  )
229
229
  self.conn.commit()
230
230
 
231
- def ready_to_load(self):
232
- pass
233
-
234
- def optimize(self):
231
+ def optimize(self, data_size: int | None = None):
235
232
  self._post_insert()
236
233
 
237
234
  def _post_insert(self):
@@ -415,9 +412,7 @@ class PgVector(VectorDB):
415
412
 
416
413
  return len(metadata), None
417
414
  except Exception as e:
418
- log.warning(
419
- f"Failed to insert data into pgvector table ({self.table_name}), error: {e}",
420
- )
415
+ log.warning(f"Failed to insert data into pgvector table ({self.table_name}), error: {e}")
421
416
  return 0, e
422
417
 
423
418
  def search_embedding(
@@ -143,10 +143,7 @@ class PgVectorScale(VectorDB):
143
143
  )
144
144
  self.conn.commit()
145
145
 
146
- def ready_to_load(self):
147
- pass
148
-
149
- def optimize(self):
146
+ def optimize(self, data_size: int | None = None):
150
147
  self._post_insert()
151
148
 
152
149
  def _post_insert(self):
@@ -255,9 +252,7 @@ class PgVectorScale(VectorDB):
255
252
 
256
253
  return len(metadata), None
257
254
  except Exception as e:
258
- log.warning(
259
- f"Failed to insert data into pgvector table ({self.table_name}), error: {e}",
260
- )
255
+ log.warning(f"Failed to insert data into pgvector table ({self.table_name}), error: {e}")
261
256
  return 0, e
262
257
 
263
258
  def search_embedding(
@@ -59,10 +59,7 @@ class Pinecone(VectorDB):
59
59
  self.index = pc.Index(self.index_name)
60
60
  yield
61
61
 
62
- def ready_to_load(self):
63
- pass
64
-
65
- def optimize(self):
62
+ def optimize(self, data_size: int | None = None):
66
63
  pass
67
64
 
68
65
  def insert_embeddings(
@@ -62,10 +62,7 @@ class QdrantCloud(VectorDB):
62
62
  self.qdrant_client = None
63
63
  del self.qdrant_client
64
64
 
65
- def ready_to_load(self):
66
- pass
67
-
68
- def optimize(self):
65
+ def optimize(self, data_size: int | None = None):
69
66
  assert self.qdrant_client, "Please call self.init() before"
70
67
  # wait for vectors to be fully indexed
71
68
  try:
@@ -76,8 +73,8 @@ class QdrantCloud(VectorDB):
76
73
  continue
77
74
  if info.status == CollectionStatus.GREEN:
78
75
  msg = (
79
- f"Stored vectors: {info.vectors_count}, Indexed vectors: {info.indexed_vectors_count}, ",
80
- f"Collection status: {info.indexed_vectors_count}",
76
+ f"Stored vectors: {info.vectors_count}, Indexed vectors: {info.indexed_vectors_count}, "
77
+ f"Collection status: {info.indexed_vectors_count}"
81
78
  )
82
79
  log.info(msg)
83
80
  return
@@ -95,10 +95,7 @@ class Redis(VectorDB):
95
95
  def ready_to_search(self) -> bool:
96
96
  """Check if the database is ready to search."""
97
97
 
98
- def ready_to_load(self) -> bool:
99
- pass
100
-
101
- def optimize(self) -> None:
98
+ def optimize(self, data_size: int | None = None):
102
99
  pass
103
100
 
104
101
  def insert_embeddings(
@@ -17,7 +17,7 @@ class TestTypedDict(CommonTypedDict): ...
17
17
  @click_parameter_decorators_from_typed_dict(TestTypedDict)
18
18
  def Test(**parameters: Unpack[TestTypedDict]):
19
19
  run(
20
- db=DB.NewClient,
20
+ db=DB.Test,
21
21
  db_config=TestConfig(db_label=parameters["db_label"]),
22
22
  db_case_config=TestIndexConfig(),
23
23
  **parameters,
@@ -33,10 +33,7 @@ class Test(VectorDB):
33
33
 
34
34
  yield
35
35
 
36
- def ready_to_load(self) -> bool:
37
- return True
38
-
39
- def optimize(self) -> None:
36
+ def optimize(self, data_size: int | None = None):
40
37
  pass
41
38
 
42
39
  def insert_embeddings(
@@ -67,10 +67,7 @@ class WeaviateCloud(VectorDB):
67
67
  self.client = None
68
68
  del self.client
69
69
 
70
- def ready_to_load(self):
71
- """Should call insert first, do nothing"""
72
-
73
- def optimize(self):
70
+ def optimize(self, data_size: int | None = None):
74
71
  assert self.client.schema.exists(self.collection_name)
75
72
  self.client.schema.update_config(
76
73
  self.collection_name,
@@ -63,9 +63,7 @@ class AliyunOSSReader(DatasetReader):
63
63
  # check size equal
64
64
  remote_size, local_size = info.content_length, local.stat().st_size
65
65
  if remote_size != local_size:
66
- log.info(
67
- f"local file: {local} size[{local_size}] not match with remote size[{remote_size}]",
68
- )
66
+ log.info(f"local file: {local} size[{local_size}] not match with remote size[{remote_size}]")
69
67
  return False
70
68
 
71
69
  return True
@@ -89,9 +87,7 @@ class AliyunOSSReader(DatasetReader):
89
87
  local_file = local_ds_root.joinpath(file)
90
88
 
91
89
  if (not local_file.exists()) or (not self.validate_file(remote_file, local_file)):
92
- log.info(
93
- f"local file: {local_file} not match with remote: {remote_file}; add to downloading list",
94
- )
90
+ log.info(f"local file: {local_file} not match with remote: {remote_file}; add to downloading list")
95
91
  downloads.append((remote_file, local_file))
96
92
 
97
93
  if len(downloads) == 0:
@@ -135,9 +131,7 @@ class AwsS3Reader(DatasetReader):
135
131
  local_file = local_ds_root.joinpath(file)
136
132
 
137
133
  if (not local_file.exists()) or (not self.validate_file(remote_file, local_file)):
138
- log.info(
139
- f"local file: {local_file} not match with remote: {remote_file}; add to downloading list",
140
- )
134
+ log.info(f"local file: {local_file} not match with remote: {remote_file}; add to downloading list")
141
135
  downloads.append(remote_file)
142
136
 
143
137
  if len(downloads) == 0:
@@ -157,9 +151,7 @@ class AwsS3Reader(DatasetReader):
157
151
  # check size equal
158
152
  remote_size, local_size = info.get("size"), local.stat().st_size
159
153
  if remote_size != local_size:
160
- log.info(
161
- f"local file: {local} size[{local_size}] not match with remote size[{remote_size}]",
162
- )
154
+ log.info(f"local file: {local} size[{local_size}] not match with remote size[{remote_size}]")
163
155
  return False
164
156
 
165
157
  return True
@@ -79,14 +79,14 @@ class MultiProcessingSearchRunner:
79
79
 
80
80
  if count % 500 == 0:
81
81
  log.debug(
82
- f"({mp.current_process().name:16}) ",
83
- f"search_count: {count}, latest_latency={time.perf_counter()-s}",
82
+ f"({mp.current_process().name:16}) "
83
+ f"search_count: {count}, latest_latency={time.perf_counter()-s}"
84
84
  )
85
85
 
86
86
  total_dur = round(time.perf_counter() - start_time, 4)
87
87
  log.info(
88
88
  f"{mp.current_process().name:16} search {self.duration}s: "
89
- f"actual_dur={total_dur}s, count={count}, qps in this process: {round(count / total_dur, 4):3}",
89
+ f"actual_dur={total_dur}s, count={count}, qps in this process: {round(count / total_dur, 4):3}"
90
90
  )
91
91
 
92
92
  return (count, total_dur, latencies)
@@ -94,9 +94,7 @@ class MultiProcessingSearchRunner:
94
94
  @staticmethod
95
95
  def get_mp_context():
96
96
  mp_start_method = "spawn"
97
- log.debug(
98
- f"MultiProcessingSearchRunner get multiprocessing start method: {mp_start_method}",
99
- )
97
+ log.debug(f"MultiProcessingSearchRunner get multiprocessing start method: {mp_start_method}")
100
98
  return mp.get_context(mp_start_method)
101
99
 
102
100
  def _run_all_concurrencies_mem_efficient(self):
@@ -113,9 +111,7 @@ class MultiProcessingSearchRunner:
113
111
  mp_context=self.get_mp_context(),
114
112
  max_workers=conc,
115
113
  ) as executor:
116
- log.info(
117
- f"Start search {self.duration}s in concurrency {conc}, filters: {self.filters}",
118
- )
114
+ log.info(f"Start search {self.duration}s in concurrency {conc}, filters: {self.filters}")
119
115
  future_iter = [executor.submit(self.search, self.test_data, q, cond) for i in range(conc)]
120
116
  # Sync all processes
121
117
  while q.qsize() < conc:
@@ -124,9 +120,7 @@ class MultiProcessingSearchRunner:
124
120
 
125
121
  with cond:
126
122
  cond.notify_all()
127
- log.info(
128
- f"Syncing all process and start concurrency search, concurrency={conc}",
129
- )
123
+ log.info(f"Syncing all process and start concurrency search, concurrency={conc}")
130
124
 
131
125
  start = time.perf_counter()
132
126
  all_count = sum([r.result()[0] for r in future_iter])
@@ -140,18 +134,14 @@ class MultiProcessingSearchRunner:
140
134
  conc_qps_list.append(qps)
141
135
  conc_latency_p99_list.append(latency_p99)
142
136
  conc_latency_avg_list.append(latency_avg)
143
- log.info(
144
- f"End search in concurrency {conc}: dur={cost}s, total_count={all_count}, qps={qps}",
145
- )
137
+ log.info(f"End search in concurrency {conc}: dur={cost}s, total_count={all_count}, qps={qps}")
146
138
 
147
139
  if qps > max_qps:
148
140
  max_qps = qps
149
- log.info(
150
- f"Update largest qps with concurrency {conc}: current max_qps={max_qps}",
151
- )
141
+ log.info(f"Update largest qps with concurrency {conc}: current max_qps={max_qps}")
152
142
  except Exception as e:
153
143
  log.warning(
154
- f"Fail to search all concurrencies: {self.concurrencies}, max_qps before failure={max_qps}, reason={e}",
144
+ f"Fail to search, concurrencies: {self.concurrencies}, max_qps before failure={max_qps}, reason={e}"
155
145
  )
156
146
  traceback.print_exc()
157
147
 
@@ -193,9 +183,7 @@ class MultiProcessingSearchRunner:
193
183
  mp_context=self.get_mp_context(),
194
184
  max_workers=conc,
195
185
  ) as executor:
196
- log.info(
197
- f"Start search_by_dur {duration}s in concurrency {conc}, filters: {self.filters}",
198
- )
186
+ log.info(f"Start search_by_dur {duration}s in concurrency {conc}, filters: {self.filters}")
199
187
  future_iter = [
200
188
  executor.submit(self.search_by_dur, duration, self.test_data, q, cond) for i in range(conc)
201
189
  ]
@@ -206,24 +194,18 @@ class MultiProcessingSearchRunner:
206
194
 
207
195
  with cond:
208
196
  cond.notify_all()
209
- log.info(
210
- f"Syncing all process and start concurrency search, concurrency={conc}",
211
- )
197
+ log.info(f"Syncing all process and start concurrency search, concurrency={conc}")
212
198
 
213
199
  start = time.perf_counter()
214
200
  all_count = sum([r.result() for r in future_iter])
215
201
  cost = time.perf_counter() - start
216
202
 
217
203
  qps = round(all_count / cost, 4)
218
- log.info(
219
- f"End search in concurrency {conc}: dur={cost}s, total_count={all_count}, qps={qps}",
220
- )
204
+ log.info(f"End search in concurrency {conc}: dur={cost}s, total_count={all_count}, qps={qps}")
221
205
 
222
206
  if qps > max_qps:
223
207
  max_qps = qps
224
- log.info(
225
- f"Update largest qps with concurrency {conc}: current max_qps={max_qps}",
226
- )
208
+ log.info(f"Update largest qps with concurrency {conc}: current max_qps={max_qps}")
227
209
  except Exception as e:
228
210
  log.warning(
229
211
  f"Fail to search all concurrencies: {self.concurrencies}, max_qps before failure={max_qps}, reason={e}",
@@ -275,14 +257,14 @@ class MultiProcessingSearchRunner:
275
257
 
276
258
  if count % 500 == 0:
277
259
  log.debug(
278
- f"({mp.current_process().name:16}) search_count: {count}, ",
279
- f"latest_latency={time.perf_counter()-s}",
260
+ f"({mp.current_process().name:16}) search_count: {count}, "
261
+ f"latest_latency={time.perf_counter()-s}"
280
262
  )
281
263
 
282
264
  total_dur = round(time.perf_counter() - start_time, 4)
283
265
  log.debug(
284
266
  f"{mp.current_process().name:16} search {self.duration}s: "
285
- f"actual_dur={total_dur}s, count={count}, qps in this process: {round(count / total_dur, 4):3}",
267
+ f"actual_dur={total_dur}s, count={count}, qps in this process: {round(count / total_dur, 4):3}"
286
268
  )
287
269
 
288
270
  return count
@@ -73,14 +73,14 @@ class RatedMultiThreadingInsertRunner:
73
73
 
74
74
  if len(not_done) > 0:
75
75
  log.warning(
76
- f"Failed to finish all tasks in 1s, [{len(not_done)}/{len(executing_futures)}] ",
77
- f"tasks are not done, waited={wait_interval:.2f}, trying to wait in the next round",
76
+ f"Failed to finish all tasks in 1s, [{len(not_done)}/{len(executing_futures)}] "
77
+ f"tasks are not done, waited={wait_interval:.2f}, trying to wait in the next round"
78
78
  )
79
79
  executing_futures = list(not_done)
80
80
  else:
81
81
  log.debug(
82
- f"Finished {len(executing_futures)} insert-{config.NUM_PER_BATCH} ",
83
- f"task in 1s, wait_interval={wait_interval:.2f}",
82
+ f"Finished {len(executing_futures)} insert-{config.NUM_PER_BATCH} "
83
+ f"task in 1s, wait_interval={wait_interval:.2f}"
84
84
  )
85
85
  executing_futures = []
86
86
  except Exception as e: