redis-benchmarks-specification 0.1.274__py3-none-any.whl → 0.1.275__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of redis-benchmarks-specification might be problematic. Click here for more details.
- redis_benchmarks_specification/__common__/timeseries.py +28 -6
- redis_benchmarks_specification/__runner__/args.py +36 -0
- redis_benchmarks_specification/__runner__/remote_profiling.py +329 -0
- redis_benchmarks_specification/__runner__/runner.py +496 -53
- redis_benchmarks_specification/test-suites/defaults.yml +3 -0
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-50-fields-10B-values.yml +1 -1
- {redis_benchmarks_specification-0.1.274.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/METADATA +1 -1
- {redis_benchmarks_specification-0.1.274.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/RECORD +11 -16
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-100B-values-cursor-count-1000.yml +0 -34
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values-cursor-count-100.yml +0 -34
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-hash-hscan-1K-fields-10B-values.yml +0 -34
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan-cursor-count-100.yml +0 -32
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-set-1K-elements-sscan.yml +0 -32
- redis_benchmarks_specification/test-suites/memtier_benchmark-1key-zset-1K-elements-zscan.yml +0 -32
- {redis_benchmarks_specification-0.1.274.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/LICENSE +0 -0
- {redis_benchmarks_specification-0.1.274.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/WHEEL +0 -0
- {redis_benchmarks_specification-0.1.274.dist-info → redis_benchmarks_specification-0.1.275.dist-info}/entry_points.txt +0 -0
|
@@ -62,6 +62,7 @@ from redis_benchmarks_specification.__common__.spec import (
|
|
|
62
62
|
extract_client_tools,
|
|
63
63
|
)
|
|
64
64
|
from redis_benchmarks_specification.__runner__.args import create_client_runner_args
|
|
65
|
+
from redis_benchmarks_specification.__runner__.remote_profiling import RemoteProfiler
|
|
65
66
|
|
|
66
67
|
|
|
67
68
|
def parse_size(size):
|
|
@@ -91,6 +92,31 @@ def parse_size(size):
|
|
|
91
92
|
return int(number * units[unit])
|
|
92
93
|
|
|
93
94
|
|
|
95
|
+
def extract_expected_benchmark_duration(benchmark_command_str, override_memtier_test_time):
|
|
96
|
+
"""
|
|
97
|
+
Extract expected benchmark duration from command string or override.
|
|
98
|
+
|
|
99
|
+
Args:
|
|
100
|
+
benchmark_command_str: The benchmark command string
|
|
101
|
+
override_memtier_test_time: Override test time value
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
Expected duration in seconds, or 30 as default
|
|
105
|
+
"""
|
|
106
|
+
if override_memtier_test_time > 0:
|
|
107
|
+
return override_memtier_test_time
|
|
108
|
+
|
|
109
|
+
# Try to extract test-time from command string
|
|
110
|
+
if "test-time" in benchmark_command_str:
|
|
111
|
+
# Handle both --test-time (memtier) and -test-time (pubsub-sub-bench)
|
|
112
|
+
test_time_match = re.search(r"--?test-time[=\s]+(\d+)", benchmark_command_str)
|
|
113
|
+
if test_time_match:
|
|
114
|
+
return int(test_time_match.group(1))
|
|
115
|
+
|
|
116
|
+
# Default duration if not found
|
|
117
|
+
return 30
|
|
118
|
+
|
|
119
|
+
|
|
94
120
|
def run_multiple_clients(
|
|
95
121
|
benchmark_config,
|
|
96
122
|
docker_client,
|
|
@@ -181,6 +207,30 @@ def run_multiple_clients(
|
|
|
181
207
|
unix_socket,
|
|
182
208
|
None, # username
|
|
183
209
|
)
|
|
210
|
+
elif "vector-db-benchmark" in client_tool:
|
|
211
|
+
(
|
|
212
|
+
_,
|
|
213
|
+
benchmark_command_str,
|
|
214
|
+
arbitrary_command,
|
|
215
|
+
client_env_vars,
|
|
216
|
+
) = prepare_vector_db_benchmark_parameters(
|
|
217
|
+
client_config,
|
|
218
|
+
client_tool,
|
|
219
|
+
port,
|
|
220
|
+
host,
|
|
221
|
+
password,
|
|
222
|
+
local_benchmark_output_filename,
|
|
223
|
+
oss_cluster_api_enabled,
|
|
224
|
+
tls_enabled,
|
|
225
|
+
tls_skip_verify,
|
|
226
|
+
test_tls_cert,
|
|
227
|
+
test_tls_key,
|
|
228
|
+
test_tls_cacert,
|
|
229
|
+
resp_version,
|
|
230
|
+
override_memtier_test_time,
|
|
231
|
+
unix_socket,
|
|
232
|
+
None, # username
|
|
233
|
+
)
|
|
184
234
|
else:
|
|
185
235
|
# Handle other benchmark tools
|
|
186
236
|
(
|
|
@@ -224,23 +274,51 @@ def run_multiple_clients(
|
|
|
224
274
|
# Start container (detached)
|
|
225
275
|
import os
|
|
226
276
|
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
277
|
+
# Set working directory based on tool
|
|
278
|
+
working_dir = benchmark_tool_workdir
|
|
279
|
+
if "vector-db-benchmark" in client_tool:
|
|
280
|
+
working_dir = "/app" # vector-db-benchmark needs to run from /app
|
|
281
|
+
|
|
282
|
+
# Prepare container arguments
|
|
283
|
+
volumes = {
|
|
284
|
+
temporary_dir_client: {
|
|
285
|
+
"bind": client_mnt_point,
|
|
286
|
+
"mode": "rw",
|
|
234
287
|
},
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
# For vector-db-benchmark, also mount the results directory
|
|
291
|
+
if "vector-db-benchmark" in client_tool:
|
|
292
|
+
volumes[temporary_dir_client] = {
|
|
293
|
+
"bind": "/app/results",
|
|
294
|
+
"mode": "rw",
|
|
295
|
+
}
|
|
296
|
+
|
|
297
|
+
container_kwargs = {
|
|
298
|
+
"image": client_image,
|
|
299
|
+
"volumes": volumes,
|
|
300
|
+
"auto_remove": False,
|
|
301
|
+
"privileged": True,
|
|
302
|
+
"working_dir": working_dir,
|
|
303
|
+
"command": benchmark_command_str,
|
|
304
|
+
"network_mode": "host",
|
|
305
|
+
"detach": True,
|
|
306
|
+
"cpuset_cpus": client_cpuset_cpus,
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
# Only add user for non-vector-db-benchmark tools to avoid permission issues
|
|
310
|
+
if "vector-db-benchmark" not in client_tool:
|
|
311
|
+
container_kwargs["user"] = f"{os.getuid()}:{os.getgid()}"
|
|
312
|
+
|
|
313
|
+
# Add environment variables for vector-db-benchmark
|
|
314
|
+
if "vector-db-benchmark" in client_tool:
|
|
315
|
+
try:
|
|
316
|
+
container_kwargs["environment"] = client_env_vars
|
|
317
|
+
except NameError:
|
|
318
|
+
# client_env_vars not defined, skip environment variables
|
|
319
|
+
pass
|
|
320
|
+
|
|
321
|
+
container = docker_client.containers.run(**container_kwargs)
|
|
244
322
|
|
|
245
323
|
containers.append(
|
|
246
324
|
{
|
|
@@ -334,6 +412,7 @@ def run_multiple_clients(
|
|
|
334
412
|
aggregated_json = {}
|
|
335
413
|
memtier_json = None
|
|
336
414
|
pubsub_json = None
|
|
415
|
+
vector_json = None
|
|
337
416
|
|
|
338
417
|
for result in successful_results:
|
|
339
418
|
client_index = result["client_index"]
|
|
@@ -360,6 +439,19 @@ def run_multiple_clients(
|
|
|
360
439
|
logging.info(
|
|
361
440
|
f"Successfully read pubsub-sub-bench JSON output from client {client_index}"
|
|
362
441
|
)
|
|
442
|
+
elif "vector-db-benchmark" in tool:
|
|
443
|
+
# For vector-db-benchmark, look for summary JSON file
|
|
444
|
+
summary_files = [f for f in os.listdir(temporary_dir_client) if f.endswith("-summary.json")]
|
|
445
|
+
if summary_files:
|
|
446
|
+
summary_filepath = os.path.join(temporary_dir_client, summary_files[0])
|
|
447
|
+
try:
|
|
448
|
+
with open(summary_filepath, 'r') as f:
|
|
449
|
+
vector_json = json.load(f)
|
|
450
|
+
logging.info(f"Successfully read vector-db-benchmark JSON output from {summary_files[0]}")
|
|
451
|
+
except Exception as e:
|
|
452
|
+
logging.warning(f"Failed to read vector-db-benchmark JSON from {summary_files[0]}: {e}")
|
|
453
|
+
else:
|
|
454
|
+
logging.warning(f"No vector-db-benchmark summary JSON file found for client {client_index}")
|
|
363
455
|
|
|
364
456
|
logging.info(
|
|
365
457
|
f"Successfully read JSON output from client {client_index} ({tool})"
|
|
@@ -376,16 +468,32 @@ def run_multiple_clients(
|
|
|
376
468
|
f"JSON output file not found for client {client_index}: {json_filepath}"
|
|
377
469
|
)
|
|
378
470
|
|
|
379
|
-
# Merge JSON outputs from
|
|
380
|
-
if memtier_json and pubsub_json:
|
|
471
|
+
# Merge JSON outputs from all tools
|
|
472
|
+
if memtier_json and pubsub_json and vector_json:
|
|
473
|
+
# Use memtier as base and add other metrics
|
|
474
|
+
aggregated_json = memtier_json.copy()
|
|
475
|
+
aggregated_json.update(pubsub_json)
|
|
476
|
+
aggregated_json.update(vector_json)
|
|
477
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
478
|
+
logging.info("Using merged JSON results from memtier, pubsub-sub-bench, and vector-db-benchmark clients")
|
|
479
|
+
elif memtier_json and pubsub_json:
|
|
381
480
|
# Use memtier as base and add pubsub metrics
|
|
382
481
|
aggregated_json = memtier_json.copy()
|
|
383
|
-
# Add pubsub metrics to the aggregated result
|
|
384
482
|
aggregated_json.update(pubsub_json)
|
|
385
483
|
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
386
|
-
logging.info(
|
|
387
|
-
|
|
388
|
-
|
|
484
|
+
logging.info("Using merged JSON results from memtier and pubsub-sub-bench clients")
|
|
485
|
+
elif memtier_json and vector_json:
|
|
486
|
+
# Use memtier as base and add vector metrics
|
|
487
|
+
aggregated_json = memtier_json.copy()
|
|
488
|
+
aggregated_json.update(vector_json)
|
|
489
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
490
|
+
logging.info("Using merged JSON results from memtier and vector-db-benchmark clients")
|
|
491
|
+
elif pubsub_json and vector_json:
|
|
492
|
+
# Use pubsub as base and add vector metrics
|
|
493
|
+
aggregated_json = pubsub_json.copy()
|
|
494
|
+
aggregated_json.update(vector_json)
|
|
495
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
496
|
+
logging.info("Using merged JSON results from pubsub-sub-bench and vector-db-benchmark clients")
|
|
389
497
|
elif memtier_json:
|
|
390
498
|
# Only memtier available
|
|
391
499
|
aggregated_json = memtier_json
|
|
@@ -396,12 +504,15 @@ def run_multiple_clients(
|
|
|
396
504
|
aggregated_json = pubsub_json
|
|
397
505
|
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
398
506
|
logging.info("Using JSON results from pubsub-sub-bench client only")
|
|
507
|
+
elif vector_json:
|
|
508
|
+
# Only vector-db-benchmark available
|
|
509
|
+
aggregated_json = vector_json
|
|
510
|
+
aggregated_stdout = json.dumps(aggregated_json, indent=2)
|
|
511
|
+
logging.info("Using JSON results from vector-db-benchmark client only")
|
|
399
512
|
else:
|
|
400
513
|
# Fall back to concatenated stdout
|
|
401
514
|
aggregated_stdout = "\n".join([r["stdout"] for r in successful_results])
|
|
402
|
-
logging.warning(
|
|
403
|
-
"No JSON results found, falling back to concatenated stdout"
|
|
404
|
-
)
|
|
515
|
+
logging.warning("No JSON results found, falling back to concatenated stdout")
|
|
405
516
|
|
|
406
517
|
return aggregated_stdout, results
|
|
407
518
|
|
|
@@ -665,6 +776,71 @@ def prepare_memtier_benchmark_parameters(
|
|
|
665
776
|
return None, benchmark_command_str, arbitrary_command
|
|
666
777
|
|
|
667
778
|
|
|
779
|
+
def prepare_vector_db_benchmark_parameters(
|
|
780
|
+
clientconfig,
|
|
781
|
+
full_benchmark_path,
|
|
782
|
+
port,
|
|
783
|
+
server,
|
|
784
|
+
password,
|
|
785
|
+
local_benchmark_output_filename,
|
|
786
|
+
oss_cluster_api_enabled=False,
|
|
787
|
+
tls_enabled=False,
|
|
788
|
+
tls_skip_verify=False,
|
|
789
|
+
tls_cert=None,
|
|
790
|
+
tls_key=None,
|
|
791
|
+
tls_cacert=None,
|
|
792
|
+
resp_version=None,
|
|
793
|
+
override_test_time=0,
|
|
794
|
+
unix_socket="",
|
|
795
|
+
username=None,
|
|
796
|
+
):
|
|
797
|
+
"""
|
|
798
|
+
Prepare vector-db-benchmark command parameters
|
|
799
|
+
"""
|
|
800
|
+
arbitrary_command = False
|
|
801
|
+
|
|
802
|
+
benchmark_command = [
|
|
803
|
+
"/app/run.py",
|
|
804
|
+
"--host",
|
|
805
|
+
f"{server}",
|
|
806
|
+
]
|
|
807
|
+
|
|
808
|
+
# Add port as environment variable (vector-db-benchmark uses env vars)
|
|
809
|
+
env_vars = {}
|
|
810
|
+
if port is not None:
|
|
811
|
+
env_vars["REDIS_PORT"] = str(port)
|
|
812
|
+
if password is not None:
|
|
813
|
+
env_vars["REDIS_AUTH"] = password
|
|
814
|
+
if username is not None:
|
|
815
|
+
env_vars["REDIS_USER"] = username
|
|
816
|
+
|
|
817
|
+
# Add engines parameter
|
|
818
|
+
engines = clientconfig.get("engines", "vectorsets-fp32-default")
|
|
819
|
+
benchmark_command.extend(["--engines", engines])
|
|
820
|
+
|
|
821
|
+
# Add datasets parameter
|
|
822
|
+
datasets = clientconfig.get("datasets", "random-100")
|
|
823
|
+
benchmark_command.extend(["--datasets", datasets])
|
|
824
|
+
|
|
825
|
+
# Add other optional parameters
|
|
826
|
+
if "parallels" in clientconfig:
|
|
827
|
+
benchmark_command.extend(["--parallels", str(clientconfig["parallels"])])
|
|
828
|
+
|
|
829
|
+
if "queries" in clientconfig:
|
|
830
|
+
benchmark_command.extend(["--queries", str(clientconfig["queries"])])
|
|
831
|
+
|
|
832
|
+
if "timeout" in clientconfig:
|
|
833
|
+
benchmark_command.extend(["--timeout", str(clientconfig["timeout"])])
|
|
834
|
+
|
|
835
|
+
# Add custom arguments if specified
|
|
836
|
+
if "arguments" in clientconfig:
|
|
837
|
+
benchmark_command_str = " ".join(benchmark_command) + " " + clientconfig["arguments"]
|
|
838
|
+
else:
|
|
839
|
+
benchmark_command_str = " ".join(benchmark_command)
|
|
840
|
+
|
|
841
|
+
return benchmark_command, benchmark_command_str, arbitrary_command, env_vars
|
|
842
|
+
|
|
843
|
+
|
|
668
844
|
def prepare_pubsub_sub_bench_parameters(
|
|
669
845
|
clientconfig,
|
|
670
846
|
full_benchmark_path,
|
|
@@ -899,6 +1075,23 @@ def process_self_contained_coordinator_stream(
|
|
|
899
1075
|
redis_pid = conn.info()["process_id"]
|
|
900
1076
|
redis_pids.append(redis_pid)
|
|
901
1077
|
|
|
1078
|
+
# Check if all tested commands are supported by this Redis instance
|
|
1079
|
+
supported_commands = get_supported_redis_commands(redis_conns)
|
|
1080
|
+
commands_supported, unsupported_commands = check_test_command_support(
|
|
1081
|
+
benchmark_config, supported_commands
|
|
1082
|
+
)
|
|
1083
|
+
|
|
1084
|
+
if not commands_supported:
|
|
1085
|
+
logging.warning(
|
|
1086
|
+
f"Skipping test {test_name} due to unsupported commands: {unsupported_commands}"
|
|
1087
|
+
)
|
|
1088
|
+
delete_temporary_files(
|
|
1089
|
+
temporary_dir_client=temporary_dir_client,
|
|
1090
|
+
full_result_path=None,
|
|
1091
|
+
benchmark_tool_global=benchmark_tool_global,
|
|
1092
|
+
)
|
|
1093
|
+
continue
|
|
1094
|
+
|
|
902
1095
|
github_actor = f"{tf_triggering_env}-{running_platform}"
|
|
903
1096
|
dso = "redis-server"
|
|
904
1097
|
profilers_artifacts_matrix = []
|
|
@@ -1208,6 +1401,30 @@ def process_self_contained_coordinator_stream(
|
|
|
1208
1401
|
unix_socket,
|
|
1209
1402
|
None, # username
|
|
1210
1403
|
)
|
|
1404
|
+
elif "vector-db-benchmark" in benchmark_tool:
|
|
1405
|
+
(
|
|
1406
|
+
_,
|
|
1407
|
+
benchmark_command_str,
|
|
1408
|
+
arbitrary_command,
|
|
1409
|
+
env_vars,
|
|
1410
|
+
) = prepare_vector_db_benchmark_parameters(
|
|
1411
|
+
benchmark_config["clientconfig"],
|
|
1412
|
+
full_benchmark_path,
|
|
1413
|
+
port,
|
|
1414
|
+
host,
|
|
1415
|
+
password,
|
|
1416
|
+
local_benchmark_output_filename,
|
|
1417
|
+
oss_cluster_api_enabled,
|
|
1418
|
+
tls_enabled,
|
|
1419
|
+
tls_skip_verify,
|
|
1420
|
+
test_tls_cert,
|
|
1421
|
+
test_tls_key,
|
|
1422
|
+
test_tls_cacert,
|
|
1423
|
+
resp_version,
|
|
1424
|
+
override_memtier_test_time,
|
|
1425
|
+
unix_socket,
|
|
1426
|
+
None, # username
|
|
1427
|
+
)
|
|
1211
1428
|
else:
|
|
1212
1429
|
# prepare the benchmark command for other tools
|
|
1213
1430
|
(
|
|
@@ -1241,6 +1458,40 @@ def process_self_contained_coordinator_stream(
|
|
|
1241
1458
|
profiler_call_graph_mode,
|
|
1242
1459
|
)
|
|
1243
1460
|
|
|
1461
|
+
# start remote profiling if enabled
|
|
1462
|
+
remote_profiler = None
|
|
1463
|
+
if args.enable_remote_profiling:
|
|
1464
|
+
try:
|
|
1465
|
+
remote_profiler = RemoteProfiler(
|
|
1466
|
+
args.remote_profile_host,
|
|
1467
|
+
args.remote_profile_port,
|
|
1468
|
+
args.remote_profile_output_dir,
|
|
1469
|
+
args.remote_profile_username,
|
|
1470
|
+
args.remote_profile_password
|
|
1471
|
+
)
|
|
1472
|
+
|
|
1473
|
+
# Extract expected benchmark duration
|
|
1474
|
+
expected_duration = extract_expected_benchmark_duration(
|
|
1475
|
+
benchmark_command_str, override_memtier_test_time
|
|
1476
|
+
)
|
|
1477
|
+
|
|
1478
|
+
# Start remote profiling
|
|
1479
|
+
profiling_started = remote_profiler.start_profiling(
|
|
1480
|
+
redis_conns[0] if redis_conns else None,
|
|
1481
|
+
test_name,
|
|
1482
|
+
expected_duration
|
|
1483
|
+
)
|
|
1484
|
+
|
|
1485
|
+
if profiling_started:
|
|
1486
|
+
logging.info(f"Started remote profiling for test: {test_name}")
|
|
1487
|
+
else:
|
|
1488
|
+
logging.warning(f"Failed to start remote profiling for test: {test_name}")
|
|
1489
|
+
remote_profiler = None
|
|
1490
|
+
|
|
1491
|
+
except Exception as e:
|
|
1492
|
+
logging.error(f"Error starting remote profiling: {e}")
|
|
1493
|
+
remote_profiler = None
|
|
1494
|
+
|
|
1244
1495
|
# run the benchmark
|
|
1245
1496
|
benchmark_start_time = datetime.datetime.now()
|
|
1246
1497
|
|
|
@@ -1305,22 +1556,53 @@ def process_self_contained_coordinator_stream(
|
|
|
1305
1556
|
)
|
|
1306
1557
|
|
|
1307
1558
|
# Use explicit container management for single client
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1559
|
+
import os
|
|
1560
|
+
|
|
1561
|
+
# Set working directory based on tool
|
|
1562
|
+
working_dir = benchmark_tool_workdir
|
|
1563
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
1564
|
+
working_dir = "/app" # vector-db-benchmark needs to run from /app
|
|
1565
|
+
|
|
1566
|
+
# Prepare volumes
|
|
1567
|
+
volumes = {
|
|
1568
|
+
temporary_dir_client: {
|
|
1569
|
+
"bind": client_mnt_point,
|
|
1570
|
+
"mode": "rw",
|
|
1315
1571
|
},
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1572
|
+
}
|
|
1573
|
+
|
|
1574
|
+
# For vector-db-benchmark, also mount the results directory
|
|
1575
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
1576
|
+
volumes[temporary_dir_client] = {
|
|
1577
|
+
"bind": "/app/results",
|
|
1578
|
+
"mode": "rw",
|
|
1579
|
+
}
|
|
1580
|
+
|
|
1581
|
+
container_kwargs = {
|
|
1582
|
+
"image": client_container_image,
|
|
1583
|
+
"volumes": volumes,
|
|
1584
|
+
"auto_remove": False,
|
|
1585
|
+
"privileged": True,
|
|
1586
|
+
"working_dir": working_dir,
|
|
1587
|
+
"command": benchmark_command_str,
|
|
1588
|
+
"network_mode": "host",
|
|
1589
|
+
"detach": True,
|
|
1590
|
+
"cpuset_cpus": client_cpuset_cpus,
|
|
1591
|
+
}
|
|
1592
|
+
|
|
1593
|
+
# Only add user for non-vector-db-benchmark tools to avoid permission issues
|
|
1594
|
+
if "vector-db-benchmark" not in benchmark_tool:
|
|
1595
|
+
container_kwargs["user"] = f"{os.getuid()}:{os.getgid()}"
|
|
1596
|
+
|
|
1597
|
+
# Add environment variables for vector-db-benchmark
|
|
1598
|
+
if "vector-db-benchmark" in benchmark_tool:
|
|
1599
|
+
try:
|
|
1600
|
+
container_kwargs["environment"] = env_vars
|
|
1601
|
+
except NameError:
|
|
1602
|
+
# env_vars not defined, skip environment variables
|
|
1603
|
+
pass
|
|
1604
|
+
|
|
1605
|
+
container = docker_client.containers.run(**container_kwargs)
|
|
1324
1606
|
|
|
1325
1607
|
# Wait for container and get output
|
|
1326
1608
|
try:
|
|
@@ -1370,7 +1652,25 @@ def process_self_contained_coordinator_stream(
|
|
|
1370
1652
|
test_name,
|
|
1371
1653
|
)
|
|
1372
1654
|
|
|
1655
|
+
# wait for remote profiling completion
|
|
1656
|
+
if remote_profiler is not None:
|
|
1657
|
+
try:
|
|
1658
|
+
logging.info("Waiting for remote profiling to complete...")
|
|
1659
|
+
profiling_success = remote_profiler.wait_for_completion(timeout=60)
|
|
1660
|
+
if profiling_success:
|
|
1661
|
+
logging.info("Remote profiling completed successfully")
|
|
1662
|
+
else:
|
|
1663
|
+
logging.warning("Remote profiling did not complete successfully")
|
|
1664
|
+
except Exception as e:
|
|
1665
|
+
logging.error(f"Error waiting for remote profiling completion: {e}")
|
|
1666
|
+
|
|
1373
1667
|
logging.info("Printing client tool stdout output")
|
|
1668
|
+
if client_container_stdout:
|
|
1669
|
+
print("=== Container Output ===")
|
|
1670
|
+
print(client_container_stdout)
|
|
1671
|
+
print("=== End Container Output ===")
|
|
1672
|
+
else:
|
|
1673
|
+
logging.warning("No container output captured")
|
|
1374
1674
|
|
|
1375
1675
|
used_memory_check(
|
|
1376
1676
|
test_name,
|
|
@@ -1428,13 +1728,30 @@ def process_self_contained_coordinator_stream(
|
|
|
1428
1728
|
full_result_path = "{}/{}".format(
|
|
1429
1729
|
temporary_dir_client, local_benchmark_output_filename
|
|
1430
1730
|
)
|
|
1731
|
+
elif "vector-db-benchmark" in benchmark_tool:
|
|
1732
|
+
# For vector-db-benchmark, look for summary JSON file
|
|
1733
|
+
import os
|
|
1734
|
+
summary_files = [f for f in os.listdir(temporary_dir_client) if f.endswith("-summary.json")]
|
|
1735
|
+
if summary_files:
|
|
1736
|
+
full_result_path = os.path.join(temporary_dir_client, summary_files[0])
|
|
1737
|
+
logging.info(f"Found vector-db-benchmark summary file: {summary_files[0]}")
|
|
1738
|
+
else:
|
|
1739
|
+
logging.warning("No vector-db-benchmark summary JSON file found")
|
|
1740
|
+
# Create empty results dict to avoid crash
|
|
1741
|
+
results_dict = {}
|
|
1742
|
+
|
|
1431
1743
|
logging.info(f"Reading results json from {full_result_path}")
|
|
1432
1744
|
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1745
|
+
if "vector-db-benchmark" in benchmark_tool and not os.path.exists(full_result_path):
|
|
1746
|
+
# Handle case where vector-db-benchmark didn't produce results
|
|
1747
|
+
results_dict = {}
|
|
1748
|
+
logging.warning("Vector-db-benchmark did not produce results file")
|
|
1749
|
+
else:
|
|
1750
|
+
with open(
|
|
1751
|
+
full_result_path,
|
|
1752
|
+
"r",
|
|
1753
|
+
) as json_file:
|
|
1754
|
+
results_dict = json.load(json_file)
|
|
1438
1755
|
print_results_table_stdout(
|
|
1439
1756
|
benchmark_config,
|
|
1440
1757
|
default_metrics,
|
|
@@ -1529,7 +1846,14 @@ def process_self_contained_coordinator_stream(
|
|
|
1529
1846
|
# Get redis_conns from the first test context (we need to pass it somehow)
|
|
1530
1847
|
# For now, try to get it from the current context if available
|
|
1531
1848
|
try:
|
|
1532
|
-
|
|
1849
|
+
# Try to get redis connection to display server info
|
|
1850
|
+
import redis as redis_module
|
|
1851
|
+
|
|
1852
|
+
r = redis_module.StrictRedis(
|
|
1853
|
+
host="localhost", port=6379, decode_responses=True
|
|
1854
|
+
)
|
|
1855
|
+
r.ping() # Test connection
|
|
1856
|
+
print_redis_info_section([r])
|
|
1533
1857
|
except Exception as e:
|
|
1534
1858
|
logging.info(f"Could not connect to Redis for server info: {e}")
|
|
1535
1859
|
|
|
@@ -1654,7 +1978,32 @@ def print_results_table_stdout(
|
|
|
1654
1978
|
]
|
|
1655
1979
|
results_matrix = extract_results_table(metrics, results_dict)
|
|
1656
1980
|
|
|
1657
|
-
|
|
1981
|
+
# Use resolved metric name for precision_summary metrics, otherwise use original path
|
|
1982
|
+
def get_display_name(x):
|
|
1983
|
+
# For precision_summary metrics with wildcards, construct the resolved path
|
|
1984
|
+
if (len(x) > 1 and
|
|
1985
|
+
isinstance(x[0], str) and
|
|
1986
|
+
"precision_summary" in x[0] and
|
|
1987
|
+
"*" in x[0]):
|
|
1988
|
+
|
|
1989
|
+
# Look for the precision level in the cleaned metrics logs
|
|
1990
|
+
# We need to find the corresponding cleaned metric to get the precision level
|
|
1991
|
+
# For now, let's extract it from the time series logs that we know are working
|
|
1992
|
+
# The pattern is: replace "*" with the actual precision level
|
|
1993
|
+
|
|
1994
|
+
# Since we know from logs that the precision level is available,
|
|
1995
|
+
# let's reconstruct it from the metric context path (x[1]) if available
|
|
1996
|
+
if len(x) > 1 and isinstance(x[1], str) and x[1].startswith("'") and x[1].endswith("'"):
|
|
1997
|
+
precision_level = x[1] # This should be something like "'1.0000'"
|
|
1998
|
+
resolved_path = x[0].replace("*", precision_level)
|
|
1999
|
+
return resolved_path
|
|
2000
|
+
|
|
2001
|
+
return x[0] # Use original path
|
|
2002
|
+
|
|
2003
|
+
results_matrix = [
|
|
2004
|
+
[get_display_name(x), f"{x[3]:.3f}"]
|
|
2005
|
+
for x in results_matrix
|
|
2006
|
+
]
|
|
1658
2007
|
writer = MarkdownTableWriter(
|
|
1659
2008
|
table_name=table_name,
|
|
1660
2009
|
headers=results_matrix_headers,
|
|
@@ -1668,14 +2017,19 @@ def print_redis_info_section(redis_conns):
|
|
|
1668
2017
|
if redis_conns is not None and len(redis_conns) > 0:
|
|
1669
2018
|
try:
|
|
1670
2019
|
redis_info = redis_conns[0].info()
|
|
2020
|
+
server_name = "redis"
|
|
2021
|
+
if "server_name" in redis_info:
|
|
2022
|
+
server_name = redis_info['server_name']
|
|
1671
2023
|
|
|
1672
2024
|
print("\n# Redis Server Information")
|
|
1673
2025
|
redis_info_data = [
|
|
1674
|
-
["
|
|
1675
|
-
["
|
|
1676
|
-
["
|
|
1677
|
-
["
|
|
1678
|
-
["
|
|
2026
|
+
[f"{server_name} version", redis_info.get(f"{server_name}_version", "unknown")],
|
|
2027
|
+
["redis version", redis_info.get("redis_version", "unknown")],
|
|
2028
|
+
["io_threads_active", redis_info.get("io_threads_active", "unknown")],
|
|
2029
|
+
[f"{server_name} Git SHA1", redis_info.get("redis_git_sha1", "unknown")],
|
|
2030
|
+
[f"{server_name} Git Dirty", str(redis_info.get("redis_git_dirty", "unknown"))],
|
|
2031
|
+
[f"{server_name} Build ID", redis_info.get("redis_build_id", "unknown")],
|
|
2032
|
+
[f"{server_name} Mode", redis_info.get("redis_mode", "unknown")],
|
|
1679
2033
|
["OS", redis_info.get("os", "unknown")],
|
|
1680
2034
|
["Arch Bits", str(redis_info.get("arch_bits", "unknown"))],
|
|
1681
2035
|
["GCC Version", redis_info.get("gcc_version", "unknown")],
|
|
@@ -1703,6 +2057,78 @@ def print_redis_info_section(redis_conns):
|
|
|
1703
2057
|
logging.warning(f"Failed to collect Redis server information: {e}")
|
|
1704
2058
|
|
|
1705
2059
|
|
|
2060
|
+
def get_supported_redis_commands(redis_conns):
|
|
2061
|
+
"""Get list of supported Redis commands from the server"""
|
|
2062
|
+
if redis_conns is not None and len(redis_conns) > 0:
|
|
2063
|
+
try:
|
|
2064
|
+
# Execute COMMAND to get all supported commands
|
|
2065
|
+
commands_info = redis_conns[0].execute_command("COMMAND")
|
|
2066
|
+
logging.info(f"COMMAND response type: {type(commands_info)}, length: {len(commands_info) if hasattr(commands_info, '__len__') else 'N/A'}")
|
|
2067
|
+
|
|
2068
|
+
# Extract command names
|
|
2069
|
+
supported_commands = set()
|
|
2070
|
+
|
|
2071
|
+
if isinstance(commands_info, dict):
|
|
2072
|
+
# COMMAND response is a dict with command names as keys
|
|
2073
|
+
for cmd_name in commands_info.keys():
|
|
2074
|
+
if isinstance(cmd_name, bytes):
|
|
2075
|
+
cmd_name = cmd_name.decode('utf-8')
|
|
2076
|
+
supported_commands.add(str(cmd_name).upper())
|
|
2077
|
+
elif isinstance(commands_info, (list, tuple)):
|
|
2078
|
+
# Fallback for list format (first element of each command info array)
|
|
2079
|
+
for cmd_info in commands_info:
|
|
2080
|
+
if isinstance(cmd_info, (list, tuple)) and len(cmd_info) > 0:
|
|
2081
|
+
cmd_name = cmd_info[0]
|
|
2082
|
+
if isinstance(cmd_name, bytes):
|
|
2083
|
+
cmd_name = cmd_name.decode('utf-8')
|
|
2084
|
+
supported_commands.add(str(cmd_name).upper())
|
|
2085
|
+
|
|
2086
|
+
logging.info(f"Retrieved {len(supported_commands)} supported Redis commands")
|
|
2087
|
+
|
|
2088
|
+
# Log some sample commands for debugging
|
|
2089
|
+
if supported_commands:
|
|
2090
|
+
sample_commands = sorted(list(supported_commands))[:10]
|
|
2091
|
+
logging.info(f"Sample commands: {sample_commands}")
|
|
2092
|
+
|
|
2093
|
+
# Check specifically for vector commands
|
|
2094
|
+
vector_commands = [cmd for cmd in supported_commands if cmd.startswith('V')]
|
|
2095
|
+
if vector_commands:
|
|
2096
|
+
logging.info(f"Vector commands found: {sorted(vector_commands)}")
|
|
2097
|
+
|
|
2098
|
+
return supported_commands
|
|
2099
|
+
except Exception as e:
|
|
2100
|
+
logging.warning(f"Failed to get supported Redis commands: {e}")
|
|
2101
|
+
logging.warning("Proceeding without command validation")
|
|
2102
|
+
return None
|
|
2103
|
+
return None
|
|
2104
|
+
|
|
2105
|
+
|
|
2106
|
+
def check_test_command_support(benchmark_config, supported_commands):
|
|
2107
|
+
"""Check if all tested-commands in the benchmark config are supported"""
|
|
2108
|
+
if supported_commands is None:
|
|
2109
|
+
logging.warning("No supported commands list available, skipping command check")
|
|
2110
|
+
return True, []
|
|
2111
|
+
|
|
2112
|
+
if "tested-commands" not in benchmark_config:
|
|
2113
|
+
logging.info("No tested-commands specified in benchmark config")
|
|
2114
|
+
return True, []
|
|
2115
|
+
|
|
2116
|
+
tested_commands = benchmark_config["tested-commands"]
|
|
2117
|
+
unsupported_commands = []
|
|
2118
|
+
|
|
2119
|
+
for cmd in tested_commands:
|
|
2120
|
+
cmd_upper = cmd.upper()
|
|
2121
|
+
if cmd_upper not in supported_commands:
|
|
2122
|
+
unsupported_commands.append(cmd)
|
|
2123
|
+
|
|
2124
|
+
if unsupported_commands:
|
|
2125
|
+
logging.warning(f"Unsupported commands found: {unsupported_commands}")
|
|
2126
|
+
return False, unsupported_commands
|
|
2127
|
+
else:
|
|
2128
|
+
logging.info(f"All tested commands are supported: {tested_commands}")
|
|
2129
|
+
return True, []
|
|
2130
|
+
|
|
2131
|
+
|
|
1706
2132
|
def prepare_overall_total_test_results(
|
|
1707
2133
|
benchmark_config,
|
|
1708
2134
|
default_metrics,
|
|
@@ -1721,8 +2147,25 @@ def prepare_overall_total_test_results(
|
|
|
1721
2147
|
None,
|
|
1722
2148
|
)
|
|
1723
2149
|
current_test_results_matrix = extract_results_table(metrics, results_dict)
|
|
2150
|
+
|
|
2151
|
+
# Use the same display name logic as in the individual test results
|
|
2152
|
+
def get_overall_display_name(x):
|
|
2153
|
+
# For precision_summary metrics with wildcards, construct the resolved path
|
|
2154
|
+
if (len(x) > 1 and
|
|
2155
|
+
isinstance(x[0], str) and
|
|
2156
|
+
"precision_summary" in x[0] and
|
|
2157
|
+
"*" in x[0]):
|
|
2158
|
+
|
|
2159
|
+
# Reconstruct resolved path from metric context path (x[1]) if available
|
|
2160
|
+
if len(x) > 1 and isinstance(x[1], str) and x[1].startswith("'") and x[1].endswith("'"):
|
|
2161
|
+
precision_level = x[1] # This should be something like "'1.0000'"
|
|
2162
|
+
resolved_path = x[0].replace("*", precision_level)
|
|
2163
|
+
return resolved_path
|
|
2164
|
+
|
|
2165
|
+
return x[0] # Use original path
|
|
2166
|
+
|
|
1724
2167
|
current_test_results_matrix = [
|
|
1725
|
-
[test_name, x
|
|
2168
|
+
[test_name, get_overall_display_name(x), f"{x[3]:.3f}"] for x in current_test_results_matrix
|
|
1726
2169
|
]
|
|
1727
2170
|
overall_results_matrix.extend(current_test_results_matrix)
|
|
1728
2171
|
|