redis-benchmarks-specification 0.1.271__py3-none-any.whl → 0.1.273__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

Files changed (37) hide show
  1. redis_benchmarks_specification/__cli__/stats.py +37 -6
  2. redis_benchmarks_specification/__common__/spec.py +64 -3
  3. redis_benchmarks_specification/__runner__/args.py +6 -0
  4. redis_benchmarks_specification/__runner__/runner.py +700 -94
  5. redis_benchmarks_specification/test-suites/defaults.yml +1 -0
  6. redis_benchmarks_specification/test-suites/generate.py +7 -1
  7. redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-string-set-update-del-ex-36000-pipeline-10.yml +32 -0
  8. redis_benchmarks_specification/test-suites/memtier_benchmark-150Mkeys-string-set-ex-20-pipeline-10.yml +30 -0
  9. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-generic-scan-cursor-count-5000-pipeline-10.yml +34 -0
  10. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits-pipeline-10.yml +30 -0
  11. redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-load-set-intset-with-100-elements-19-digits.yml +30 -0
  12. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-400_conns.yml +38 -0
  13. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-40_conns.yml +38 -0
  14. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-2000_conns.yml +1 -1
  15. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-400_conns.yml +1 -1
  16. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-get-with-1KiB-values-pipeline-10-40_conns.yml +1 -1
  17. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-400_conns.yml +38 -0
  18. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-2000_conns.yml +1 -1
  19. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-400_conns.yml +1 -1
  20. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-20-80-with-512B-values-pipeline-10-5200_conns.yml +1 -1
  21. redis_benchmarks_specification/test-suites/memtier_benchmark-3Mkeys-string-mixed-50-50-with-512B-values-with-expiration-pipeline-10-400_conns.yml +1 -1
  22. redis_benchmarks_specification/test-suites/memtier_benchmark-50Mkeys-string-set-ex-10-with-precondition-pipeline-10.yml +34 -0
  23. redis_benchmarks_specification/test-suites/memtier_benchmark-50Mkeys-string-set-ex-10years-pipeline-10.yml +30 -0
  24. redis_benchmarks_specification/test-suites/memtier_benchmark-50Mkeys-string-set-ex-3-pipeline-10.yml +30 -0
  25. redis_benchmarks_specification/test-suites/memtier_benchmark-50Mkeys-string-set-ex-random-range-pipeline-10.yml +30 -0
  26. redis_benchmarks_specification/test-suites/memtier_benchmark-50Mkeys-string-set-update-del-ex-120-pipeline-10.yml +32 -0
  27. redis_benchmarks_specification/test-suites/memtier_benchmark-80Mkeys-string-set-ex-20-precodition-multiclient-pipeline-10.yml +34 -0
  28. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-100-subscribers.yml +35 -0
  29. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-1000-subscribers.yml +35 -0
  30. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-5000-subscribers.yml +35 -0
  31. redis_benchmarks_specification/test-suites/memtier_benchmark-nokeys-pubsub-mixed-100-channels-128B-100-publishers-50K-subscribers-5k-conns.yml +35 -0
  32. {redis_benchmarks_specification-0.1.271.dist-info → redis_benchmarks_specification-0.1.273.dist-info}/METADATA +1 -1
  33. {redis_benchmarks_specification-0.1.271.dist-info → redis_benchmarks_specification-0.1.273.dist-info}/RECORD +36 -19
  34. redis_benchmarks_specification/setups/builders/gcc:10.5.0-amd64-debian-bullseye-redisearch.yml +0 -24
  35. {redis_benchmarks_specification-0.1.271.dist-info → redis_benchmarks_specification-0.1.273.dist-info}/LICENSE +0 -0
  36. {redis_benchmarks_specification-0.1.271.dist-info → redis_benchmarks_specification-0.1.273.dist-info}/WHEEL +0 -0
  37. {redis_benchmarks_specification-0.1.271.dist-info → redis_benchmarks_specification-0.1.273.dist-info}/entry_points.txt +0 -0
@@ -57,6 +57,9 @@ from redis_benchmarks_specification.__common__.spec import (
57
57
  extract_client_container_image,
58
58
  extract_client_cpu_limit,
59
59
  extract_client_tool,
60
+ extract_client_configs,
61
+ extract_client_container_images,
62
+ extract_client_tools,
60
63
  )
61
64
  from redis_benchmarks_specification.__runner__.args import create_client_runner_args
62
65
 
@@ -88,6 +91,321 @@ def parse_size(size):
88
91
  return int(number * units[unit])
89
92
 
90
93
 
94
+ def run_multiple_clients(
95
+ benchmark_config,
96
+ docker_client,
97
+ temporary_dir_client,
98
+ client_mnt_point,
99
+ benchmark_tool_workdir,
100
+ client_cpuset_cpus,
101
+ port,
102
+ host,
103
+ password,
104
+ oss_cluster_api_enabled,
105
+ tls_enabled,
106
+ tls_skip_verify,
107
+ test_tls_cert,
108
+ test_tls_key,
109
+ test_tls_cacert,
110
+ resp_version,
111
+ override_memtier_test_time,
112
+ override_test_runs,
113
+ unix_socket,
114
+ args,
115
+ ):
116
+ """
117
+ Run multiple client configurations simultaneously and aggregate results.
118
+ Returns aggregated stdout and list of individual results.
119
+ """
120
+ client_configs = extract_client_configs(benchmark_config)
121
+ client_images = extract_client_container_images(benchmark_config)
122
+ client_tools = extract_client_tools(benchmark_config)
123
+
124
+ if not client_configs:
125
+ raise ValueError("No client configurations found")
126
+
127
+ containers = []
128
+ results = []
129
+
130
+ # Start all containers simultaneously (detached)
131
+ for client_index, (client_config, client_tool, client_image) in enumerate(
132
+ zip(client_configs, client_tools, client_images)
133
+ ):
134
+ try:
135
+ local_benchmark_output_filename = f"benchmark_output_{client_index}.json"
136
+
137
+ # Prepare benchmark command for this client
138
+ if "memtier_benchmark" in client_tool:
139
+ (
140
+ _,
141
+ benchmark_command_str,
142
+ arbitrary_command,
143
+ ) = prepare_memtier_benchmark_parameters(
144
+ client_config,
145
+ client_tool,
146
+ port,
147
+ host,
148
+ password,
149
+ local_benchmark_output_filename,
150
+ oss_cluster_api_enabled,
151
+ tls_enabled,
152
+ tls_skip_verify,
153
+ test_tls_cert,
154
+ test_tls_key,
155
+ test_tls_cacert,
156
+ resp_version,
157
+ override_memtier_test_time,
158
+ override_test_runs,
159
+ unix_socket,
160
+ )
161
+ elif "pubsub-sub-bench" in client_tool:
162
+ (
163
+ _,
164
+ benchmark_command_str,
165
+ arbitrary_command,
166
+ ) = prepare_pubsub_sub_bench_parameters(
167
+ client_config,
168
+ client_tool,
169
+ port,
170
+ host,
171
+ password,
172
+ local_benchmark_output_filename,
173
+ oss_cluster_api_enabled,
174
+ tls_enabled,
175
+ tls_skip_verify,
176
+ test_tls_cert,
177
+ test_tls_key,
178
+ test_tls_cacert,
179
+ resp_version,
180
+ override_memtier_test_time,
181
+ unix_socket,
182
+ None, # username
183
+ )
184
+ else:
185
+ # Handle other benchmark tools
186
+ (
187
+ benchmark_command,
188
+ benchmark_command_str,
189
+ ) = prepare_benchmark_parameters(
190
+ {**benchmark_config, "clientconfig": client_config},
191
+ client_tool,
192
+ port,
193
+ host,
194
+ local_benchmark_output_filename,
195
+ False,
196
+ benchmark_tool_workdir,
197
+ False,
198
+ )
199
+
200
+ # Calculate container timeout
201
+ container_timeout = 300 # 5 minutes default
202
+ buffer_timeout = (
203
+ args.container_timeout_buffer
204
+ ) # Configurable buffer from command line
205
+ if "test-time" in benchmark_command_str:
206
+ # Try to extract test time and add buffer
207
+ import re
208
+
209
+ # Handle both --test-time (memtier) and -test-time (pubsub-sub-bench)
210
+ test_time_match = re.search(
211
+ r"--?test-time[=\s]+(\d+)", benchmark_command_str
212
+ )
213
+ if test_time_match:
214
+ test_time = int(test_time_match.group(1))
215
+ container_timeout = test_time + buffer_timeout
216
+ logging.info(
217
+ f"Client {client_index}: Set container timeout to {container_timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)"
218
+ )
219
+
220
+ logging.info(
221
+ f"Starting client {client_index} with docker image {client_image} (cpuset={client_cpuset_cpus}) with args: {benchmark_command_str}"
222
+ )
223
+
224
+ # Start container (detached)
225
+ import os
226
+
227
+ container = docker_client.containers.run(
228
+ image=client_image,
229
+ volumes={
230
+ temporary_dir_client: {
231
+ "bind": client_mnt_point,
232
+ "mode": "rw",
233
+ },
234
+ },
235
+ auto_remove=False,
236
+ privileged=True,
237
+ working_dir=benchmark_tool_workdir,
238
+ command=benchmark_command_str,
239
+ network_mode="host",
240
+ detach=True,
241
+ cpuset_cpus=client_cpuset_cpus,
242
+ user=f"{os.getuid()}:{os.getgid()}", # Run as current user to fix permissions
243
+ )
244
+
245
+ containers.append(
246
+ {
247
+ "container": container,
248
+ "client_index": client_index,
249
+ "client_tool": client_tool,
250
+ "client_image": client_image,
251
+ "benchmark_command_str": benchmark_command_str,
252
+ "timeout": container_timeout,
253
+ }
254
+ )
255
+
256
+ except Exception as e:
257
+ error_msg = f"Error starting client {client_index}: {e}"
258
+ logging.error(error_msg)
259
+ logging.error(f"Image: {client_image}, Tool: {client_tool}")
260
+ logging.error(f"Command: {benchmark_command_str}")
261
+ # Fail fast on container startup errors
262
+ raise RuntimeError(f"Failed to start client {client_index}: {e}")
263
+
264
+ # Wait for all containers to complete
265
+ logging.info(f"Waiting for {len(containers)} containers to complete...")
266
+
267
+ for container_info in containers:
268
+ container = container_info["container"]
269
+ client_index = container_info["client_index"]
270
+ client_tool = container_info["client_tool"]
271
+ client_image = container_info["client_image"]
272
+ benchmark_command_str = container_info["benchmark_command_str"]
273
+
274
+ try:
275
+ # Wait for container to complete
276
+ exit_code = container.wait(timeout=container_info["timeout"])
277
+ client_stdout = container.logs().decode("utf-8")
278
+
279
+ # Check if container succeeded
280
+ if exit_code.get("StatusCode", 1) != 0:
281
+ logging.error(
282
+ f"Client {client_index} failed with exit code: {exit_code}"
283
+ )
284
+ logging.error(f"Client {client_index} stdout/stderr:")
285
+ logging.error(client_stdout)
286
+ # Fail fast on container execution errors
287
+ raise RuntimeError(
288
+ f"Client {client_index} ({client_tool}) failed with exit code {exit_code}"
289
+ )
290
+
291
+ logging.info(
292
+ f"Client {client_index} completed successfully with exit code: {exit_code}"
293
+ )
294
+
295
+ results.append(
296
+ {
297
+ "client_index": client_index,
298
+ "stdout": client_stdout,
299
+ "config": client_configs[client_index],
300
+ "tool": client_tool,
301
+ "image": client_image,
302
+ }
303
+ )
304
+
305
+ except Exception as e:
306
+ # Get logs even if wait failed
307
+ try:
308
+ client_stdout = container.logs().decode("utf-8")
309
+ logging.error(f"Client {client_index} logs:")
310
+ logging.error(client_stdout)
311
+ except:
312
+ logging.error(f"Could not retrieve logs for client {client_index}")
313
+
314
+ raise RuntimeError(f"Client {client_index} ({client_tool}) failed: {e}")
315
+
316
+ finally:
317
+ # Clean up container
318
+ try:
319
+ container.remove(force=True)
320
+ except Exception as cleanup_error:
321
+ logging.warning(f"Client {client_index} cleanup error: {cleanup_error}")
322
+
323
+ logging.info(f"Successfully completed {len(containers)} client configurations")
324
+
325
+ # Aggregate results by reading JSON output files
326
+ aggregated_stdout = ""
327
+ successful_results = [r for r in results if "error" not in r]
328
+
329
+ if successful_results:
330
+ # Try to read and aggregate JSON output files
331
+ import json
332
+ import os
333
+
334
+ aggregated_json = {}
335
+ memtier_json = None
336
+ pubsub_json = None
337
+
338
+ for result in successful_results:
339
+ client_index = result["client_index"]
340
+ tool = result["tool"]
341
+
342
+ # Look for JSON output file
343
+ json_filename = f"benchmark_output_{client_index}.json"
344
+ json_filepath = os.path.join(temporary_dir_client, json_filename)
345
+
346
+ if os.path.exists(json_filepath):
347
+ try:
348
+ with open(json_filepath, "r") as f:
349
+ client_json = json.load(f)
350
+
351
+ if "memtier_benchmark" in tool:
352
+ # Store memtier JSON
353
+ memtier_json = client_json
354
+ logging.info(
355
+ f"Successfully read memtier JSON output from client {client_index}"
356
+ )
357
+ elif "pubsub-sub-bench" in tool:
358
+ # Store pubsub JSON
359
+ pubsub_json = client_json
360
+ logging.info(
361
+ f"Successfully read pubsub-sub-bench JSON output from client {client_index}"
362
+ )
363
+
364
+ logging.info(
365
+ f"Successfully read JSON output from client {client_index} ({tool})"
366
+ )
367
+
368
+ except Exception as e:
369
+ logging.warning(
370
+ f"Failed to read JSON from client {client_index}: {e}"
371
+ )
372
+ # Fall back to stdout
373
+ pass
374
+ else:
375
+ logging.warning(
376
+ f"JSON output file not found for client {client_index}: {json_filepath}"
377
+ )
378
+
379
+ # Merge JSON outputs from both tools
380
+ if memtier_json and pubsub_json:
381
+ # Use memtier as base and add pubsub metrics
382
+ aggregated_json = memtier_json.copy()
383
+ # Add pubsub metrics to the aggregated result
384
+ aggregated_json.update(pubsub_json)
385
+ aggregated_stdout = json.dumps(aggregated_json, indent=2)
386
+ logging.info(
387
+ "Using merged JSON results from memtier and pubsub-sub-bench clients"
388
+ )
389
+ elif memtier_json:
390
+ # Only memtier available
391
+ aggregated_json = memtier_json
392
+ aggregated_stdout = json.dumps(aggregated_json, indent=2)
393
+ logging.info("Using JSON results from memtier client only")
394
+ elif pubsub_json:
395
+ # Only pubsub available
396
+ aggregated_json = pubsub_json
397
+ aggregated_stdout = json.dumps(aggregated_json, indent=2)
398
+ logging.info("Using JSON results from pubsub-sub-bench client only")
399
+ else:
400
+ # Fall back to concatenated stdout
401
+ aggregated_stdout = "\n".join([r["stdout"] for r in successful_results])
402
+ logging.warning(
403
+ "No JSON results found, falling back to concatenated stdout"
404
+ )
405
+
406
+ return aggregated_stdout, results
407
+
408
+
91
409
  def main():
92
410
  _, _, project_version = populate_with_poetry_data()
93
411
  project_name_suffix = "redis-benchmarks-spec-client-runner"
@@ -347,6 +665,96 @@ def prepare_memtier_benchmark_parameters(
347
665
  return None, benchmark_command_str, arbitrary_command
348
666
 
349
667
 
668
+ def prepare_pubsub_sub_bench_parameters(
669
+ clientconfig,
670
+ full_benchmark_path,
671
+ port,
672
+ server,
673
+ password,
674
+ local_benchmark_output_filename,
675
+ oss_cluster_api_enabled=False,
676
+ tls_enabled=False,
677
+ tls_skip_verify=False,
678
+ tls_cert=None,
679
+ tls_key=None,
680
+ tls_cacert=None,
681
+ resp_version=None,
682
+ override_test_time=0,
683
+ unix_socket="",
684
+ username=None,
685
+ ):
686
+ """
687
+ Prepare pubsub-sub-bench command parameters
688
+ """
689
+ arbitrary_command = False
690
+
691
+ benchmark_command = [
692
+ # full_benchmark_path,
693
+ "-json-out-file",
694
+ local_benchmark_output_filename,
695
+ ]
696
+
697
+ # Connection parameters
698
+ if unix_socket != "":
699
+ # pubsub-sub-bench doesn't support unix sockets directly
700
+ # Fall back to host/port
701
+ logging.warning(
702
+ "pubsub-sub-bench doesn't support unix sockets, using host/port"
703
+ )
704
+ benchmark_command.extend(["-host", server, "-port", str(port)])
705
+ else:
706
+ benchmark_command.extend(["-host", server, "-port", str(port)])
707
+
708
+ # Authentication
709
+ if username and password:
710
+ # ACL style authentication
711
+ benchmark_command.extend(["-user", username, "-a", password])
712
+ elif password:
713
+ # Password-only authentication
714
+ benchmark_command.extend(["-a", password])
715
+
716
+ # TLS support (if the tool supports it in future versions)
717
+ if tls_enabled:
718
+ logging.warning("pubsub-sub-bench TLS support not implemented yet")
719
+
720
+ # RESP version
721
+ if resp_version:
722
+ if resp_version == "3":
723
+ benchmark_command.extend(["-resp", "3"])
724
+ elif resp_version == "2":
725
+ benchmark_command.extend(["-resp", "2"])
726
+
727
+ # Cluster mode
728
+ if oss_cluster_api_enabled:
729
+ benchmark_command.append("-oss-cluster-api-distribute-subscribers")
730
+
731
+ logging.info(f"Preparing pubsub-sub-bench parameters: {benchmark_command}")
732
+ benchmark_command_str = " ".join(benchmark_command)
733
+
734
+ # Append user-defined arguments from YAML
735
+ user_arguments = ""
736
+ if "arguments" in clientconfig:
737
+ user_arguments = clientconfig["arguments"]
738
+
739
+ # Test time override - handle after user arguments to avoid conflicts
740
+ if override_test_time and override_test_time > 0:
741
+ # Remove any existing -test-time from user arguments
742
+ import re
743
+
744
+ user_arguments = re.sub(r"-test-time\s+\d+", "", user_arguments)
745
+ # Add our override test time
746
+ benchmark_command_str = (
747
+ benchmark_command_str + " -test-time " + str(override_test_time)
748
+ )
749
+ logging.info(f"Applied test-time override: {override_test_time}s")
750
+
751
+ # Add cleaned user arguments
752
+ if user_arguments.strip():
753
+ benchmark_command_str = benchmark_command_str + " " + user_arguments.strip()
754
+
755
+ return benchmark_command, benchmark_command_str, arbitrary_command
756
+
757
+
350
758
  def process_self_contained_coordinator_stream(
351
759
  args,
352
760
  datasink_push_results_redistimeseries,
@@ -376,7 +784,7 @@ def process_self_contained_coordinator_stream(
376
784
  if preserve_temporary_client_dirs is True:
377
785
  logging.info(f"Preserving temporary client dir {temporary_dir_client}")
378
786
  else:
379
- if "redis-benchmark" in benchmark_tool_global:
787
+ if benchmark_tool_global and "redis-benchmark" in benchmark_tool_global:
380
788
  if full_result_path is not None:
381
789
  os.remove(full_result_path)
382
790
  logging.info("Removing temporary JSON file")
@@ -723,45 +1131,6 @@ def process_self_contained_coordinator_stream(
723
1131
  )
724
1132
  arbitrary_command = False
725
1133
 
726
- if "memtier_benchmark" not in benchmark_tool:
727
- # prepare the benchmark command
728
- (
729
- benchmark_command,
730
- benchmark_command_str,
731
- ) = prepare_benchmark_parameters(
732
- benchmark_config,
733
- full_benchmark_path,
734
- port,
735
- host,
736
- local_benchmark_output_filename,
737
- False,
738
- benchmark_tool_workdir,
739
- False,
740
- )
741
- else:
742
- (
743
- _,
744
- benchmark_command_str,
745
- arbitrary_command,
746
- ) = prepare_memtier_benchmark_parameters(
747
- benchmark_config["clientconfig"],
748
- full_benchmark_path,
749
- port,
750
- host,
751
- password,
752
- local_benchmark_output_filename,
753
- oss_cluster_api_enabled,
754
- tls_enabled,
755
- tls_skip_verify,
756
- test_tls_cert,
757
- test_tls_key,
758
- test_tls_cacert,
759
- resp_version,
760
- override_memtier_test_time,
761
- override_test_runs,
762
- unix_socket,
763
- )
764
-
765
1134
  if (
766
1135
  arbitrary_command
767
1136
  and oss_cluster_api_enabled
@@ -777,9 +1146,83 @@ def process_self_contained_coordinator_stream(
777
1146
  )
778
1147
  continue
779
1148
 
780
- client_container_image = extract_client_container_image(
781
- benchmark_config
782
- )
1149
+ # Check if we have multiple client configurations
1150
+ client_configs = extract_client_configs(benchmark_config)
1151
+ is_multiple_clients = len(client_configs) > 1
1152
+
1153
+ if is_multiple_clients:
1154
+ logging.info(
1155
+ f"Running test with {len(client_configs)} client configurations"
1156
+ )
1157
+ else:
1158
+ # Legacy single client mode - prepare benchmark parameters
1159
+ client_container_image = extract_client_container_image(
1160
+ benchmark_config
1161
+ )
1162
+ benchmark_tool = extract_client_tool(benchmark_config)
1163
+
1164
+ # Prepare benchmark command for single client
1165
+ if "memtier_benchmark" in benchmark_tool:
1166
+ (
1167
+ _,
1168
+ benchmark_command_str,
1169
+ arbitrary_command,
1170
+ ) = prepare_memtier_benchmark_parameters(
1171
+ benchmark_config["clientconfig"],
1172
+ full_benchmark_path,
1173
+ port,
1174
+ host,
1175
+ password,
1176
+ local_benchmark_output_filename,
1177
+ oss_cluster_api_enabled,
1178
+ tls_enabled,
1179
+ tls_skip_verify,
1180
+ test_tls_cert,
1181
+ test_tls_key,
1182
+ test_tls_cacert,
1183
+ resp_version,
1184
+ override_memtier_test_time,
1185
+ override_test_runs,
1186
+ unix_socket,
1187
+ )
1188
+ elif "pubsub-sub-bench" in benchmark_tool:
1189
+ (
1190
+ _,
1191
+ benchmark_command_str,
1192
+ arbitrary_command,
1193
+ ) = prepare_pubsub_sub_bench_parameters(
1194
+ benchmark_config["clientconfig"],
1195
+ full_benchmark_path,
1196
+ port,
1197
+ host,
1198
+ password,
1199
+ local_benchmark_output_filename,
1200
+ oss_cluster_api_enabled,
1201
+ tls_enabled,
1202
+ tls_skip_verify,
1203
+ test_tls_cert,
1204
+ test_tls_key,
1205
+ test_tls_cacert,
1206
+ resp_version,
1207
+ override_memtier_test_time,
1208
+ unix_socket,
1209
+ None, # username
1210
+ )
1211
+ else:
1212
+ # prepare the benchmark command for other tools
1213
+ (
1214
+ benchmark_command,
1215
+ benchmark_command_str,
1216
+ ) = prepare_benchmark_parameters(
1217
+ benchmark_config,
1218
+ full_benchmark_path,
1219
+ port,
1220
+ host,
1221
+ local_benchmark_output_filename,
1222
+ False,
1223
+ benchmark_tool_workdir,
1224
+ False,
1225
+ )
783
1226
  profiler_call_graph_mode = "dwarf"
784
1227
  profiler_frequency = 99
785
1228
 
@@ -801,50 +1244,106 @@ def process_self_contained_coordinator_stream(
801
1244
  # run the benchmark
802
1245
  benchmark_start_time = datetime.datetime.now()
803
1246
 
804
- if args.benchmark_local_install:
805
- logging.info("Running memtier benchmark outside of docker")
806
- benchmark_command_str = (
807
- "taskset -c "
808
- + client_cpuset_cpus
809
- + " "
810
- + benchmark_command_str
811
- )
1247
+ if is_multiple_clients:
1248
+ # Run multiple client configurations
812
1249
  logging.info(
813
- "Running memtier benchmark command {}".format(
814
- benchmark_command_str
815
- )
1250
+ "Running multiple client configurations simultaneously"
1251
+ )
1252
+ client_container_stdout, client_results = run_multiple_clients(
1253
+ benchmark_config,
1254
+ docker_client,
1255
+ temporary_dir_client,
1256
+ client_mnt_point,
1257
+ benchmark_tool_workdir,
1258
+ client_cpuset_cpus,
1259
+ port,
1260
+ host,
1261
+ password,
1262
+ oss_cluster_api_enabled,
1263
+ tls_enabled,
1264
+ tls_skip_verify,
1265
+ test_tls_cert,
1266
+ test_tls_key,
1267
+ test_tls_cacert,
1268
+ resp_version,
1269
+ override_memtier_test_time,
1270
+ override_test_runs,
1271
+ unix_socket,
1272
+ args,
816
1273
  )
817
- stream = os.popen(benchmark_command_str)
818
- client_container_stdout = stream.read()
819
- move_command = "mv {} {}".format(
820
- local_benchmark_output_filename, temporary_dir_client
1274
+ logging.info(
1275
+ f"Completed {len(client_results)} client configurations"
821
1276
  )
822
- os.system(move_command)
823
1277
  else:
824
- logging.info(
825
- "Using docker image {} as benchmark client image (cpuset={}) with the following args: {}".format(
826
- client_container_image,
827
- client_cpuset_cpus,
828
- benchmark_command_str,
1278
+ # Legacy single client execution
1279
+ if args.benchmark_local_install:
1280
+ logging.info("Running memtier benchmark outside of docker")
1281
+ benchmark_command_str = (
1282
+ "taskset -c "
1283
+ + client_cpuset_cpus
1284
+ + " "
1285
+ + benchmark_command_str
1286
+ )
1287
+ logging.info(
1288
+ "Running memtier benchmark command {}".format(
1289
+ benchmark_command_str
1290
+ )
1291
+ )
1292
+ stream = os.popen(benchmark_command_str)
1293
+ client_container_stdout = stream.read()
1294
+ move_command = "mv {} {}".format(
1295
+ local_benchmark_output_filename, temporary_dir_client
1296
+ )
1297
+ os.system(move_command)
1298
+ else:
1299
+ logging.info(
1300
+ "Using docker image {} as benchmark client image (cpuset={}) with the following args: {}".format(
1301
+ client_container_image,
1302
+ client_cpuset_cpus,
1303
+ benchmark_command_str,
1304
+ )
829
1305
  )
830
- )
831
1306
 
832
- client_container_stdout = docker_client.containers.run(
833
- image=client_container_image,
834
- volumes={
835
- temporary_dir_client: {
836
- "bind": client_mnt_point,
837
- "mode": "rw",
1307
+ # Use explicit container management for single client
1308
+ container = docker_client.containers.run(
1309
+ image=client_container_image,
1310
+ volumes={
1311
+ temporary_dir_client: {
1312
+ "bind": client_mnt_point,
1313
+ "mode": "rw",
1314
+ },
838
1315
  },
839
- },
840
- auto_remove=True,
841
- privileged=True,
842
- working_dir=benchmark_tool_workdir,
843
- command=benchmark_command_str,
844
- network_mode="host",
845
- detach=False,
846
- cpuset_cpus=client_cpuset_cpus,
847
- )
1316
+ auto_remove=False,
1317
+ privileged=True,
1318
+ working_dir=benchmark_tool_workdir,
1319
+ command=benchmark_command_str,
1320
+ network_mode="host",
1321
+ detach=True,
1322
+ cpuset_cpus=client_cpuset_cpus,
1323
+ )
1324
+
1325
+ # Wait for container and get output
1326
+ try:
1327
+ exit_code = container.wait()
1328
+ client_container_stdout = container.logs().decode(
1329
+ "utf-8"
1330
+ )
1331
+ logging.info(
1332
+ f"Single client completed with exit code: {exit_code}"
1333
+ )
1334
+ except Exception as wait_error:
1335
+ logging.error(f"Single client wait error: {wait_error}")
1336
+ client_container_stdout = container.logs().decode(
1337
+ "utf-8"
1338
+ )
1339
+ finally:
1340
+ # Clean up container
1341
+ try:
1342
+ container.remove(force=True)
1343
+ except Exception as cleanup_error:
1344
+ logging.warning(
1345
+ f"Single client cleanup error: {cleanup_error}"
1346
+ )
848
1347
 
849
1348
  benchmark_end_time = datetime.datetime.now()
850
1349
  benchmark_duration_seconds = (
@@ -895,18 +1394,47 @@ def process_self_contained_coordinator_stream(
895
1394
  client_container_stdout,
896
1395
  None,
897
1396
  )
898
- full_result_path = local_benchmark_output_filename
899
- if "memtier_benchmark" in benchmark_tool:
900
- full_result_path = "{}/{}".format(
901
- temporary_dir_client, local_benchmark_output_filename
1397
+ # Check if we have multi-client results with aggregated JSON
1398
+ if (
1399
+ is_multiple_clients
1400
+ and client_container_stdout.strip().startswith("{")
1401
+ ):
1402
+ # Use aggregated JSON from multi-client runner
1403
+ logging.info(
1404
+ "Using aggregated JSON results from multi-client execution"
902
1405
  )
903
- logging.info(f"Reading results json from {full_result_path}")
1406
+ results_dict = json.loads(client_container_stdout)
1407
+ # Print results table for multi-client
1408
+ print_results_table_stdout(
1409
+ benchmark_config,
1410
+ default_metrics,
1411
+ results_dict,
1412
+ setup_type,
1413
+ test_name,
1414
+ )
1415
+ # Add results to overall summary table
1416
+ prepare_overall_total_test_results(
1417
+ benchmark_config,
1418
+ default_metrics,
1419
+ results_dict,
1420
+ test_name,
1421
+ results_matrix,
1422
+ redis_conns,
1423
+ )
1424
+ else:
1425
+ # Single client - read from file as usual
1426
+ full_result_path = local_benchmark_output_filename
1427
+ if "memtier_benchmark" in benchmark_tool:
1428
+ full_result_path = "{}/{}".format(
1429
+ temporary_dir_client, local_benchmark_output_filename
1430
+ )
1431
+ logging.info(f"Reading results json from {full_result_path}")
904
1432
 
905
- with open(
906
- full_result_path,
907
- "r",
908
- ) as json_file:
909
- results_dict = json.load(json_file)
1433
+ with open(
1434
+ full_result_path,
1435
+ "r",
1436
+ ) as json_file:
1437
+ results_dict = json.load(json_file)
910
1438
  print_results_table_stdout(
911
1439
  benchmark_config,
912
1440
  default_metrics,
@@ -921,6 +1449,7 @@ def process_self_contained_coordinator_stream(
921
1449
  results_dict,
922
1450
  test_name,
923
1451
  results_matrix,
1452
+ redis_conns,
924
1453
  )
925
1454
 
926
1455
  dataset_load_duration_seconds = 0
@@ -995,6 +1524,22 @@ def process_self_contained_coordinator_stream(
995
1524
  benchmark_tool_global=benchmark_tool_global,
996
1525
  )
997
1526
 
1527
+ # Print Redis server information section before results
1528
+ if len(results_matrix) > 0:
1529
+ # Get redis_conns from the first test context (we need to pass it somehow)
1530
+ # For now, try to get it from the current context if available
1531
+ try:
1532
+ # Try to get redis connection to display server info
1533
+ import redis as redis_module
1534
+
1535
+ r = redis_module.StrictRedis(
1536
+ host="localhost", port=6379, decode_responses=True
1537
+ )
1538
+ r.ping() # Test connection
1539
+ print_redis_info_section([r])
1540
+ except Exception as e:
1541
+ logging.info(f"Could not connect to Redis for server info: {e}")
1542
+
998
1543
  table_name = "Results for entire test-suite"
999
1544
  results_matrix_headers = [
1000
1545
  "Test Name",
@@ -1125,8 +1670,53 @@ def print_results_table_stdout(
1125
1670
  writer.write_table()
1126
1671
 
1127
1672
 
1673
+ def print_redis_info_section(redis_conns):
1674
+ """Print Redis server information as a separate section"""
1675
+ if redis_conns is not None and len(redis_conns) > 0:
1676
+ try:
1677
+ redis_info = redis_conns[0].info()
1678
+
1679
+ print("\n# Redis Server Information")
1680
+ redis_info_data = [
1681
+ ["Redis Version", redis_info.get("redis_version", "unknown")],
1682
+ ["Redis Git SHA1", redis_info.get("redis_git_sha1", "unknown")],
1683
+ ["Redis Git Dirty", str(redis_info.get("redis_git_dirty", "unknown"))],
1684
+ ["Redis Build ID", redis_info.get("redis_build_id", "unknown")],
1685
+ ["Redis Mode", redis_info.get("redis_mode", "unknown")],
1686
+ ["OS", redis_info.get("os", "unknown")],
1687
+ ["Arch Bits", str(redis_info.get("arch_bits", "unknown"))],
1688
+ ["GCC Version", redis_info.get("gcc_version", "unknown")],
1689
+ ["Process ID", str(redis_info.get("process_id", "unknown"))],
1690
+ ["TCP Port", str(redis_info.get("tcp_port", "unknown"))],
1691
+ [
1692
+ "Uptime (seconds)",
1693
+ str(redis_info.get("uptime_in_seconds", "unknown")),
1694
+ ],
1695
+ ]
1696
+
1697
+ from pytablewriter import MarkdownTableWriter
1698
+
1699
+ writer = MarkdownTableWriter(
1700
+ table_name="",
1701
+ headers=["Property", "Value"],
1702
+ value_matrix=redis_info_data,
1703
+ )
1704
+ writer.write_table()
1705
+
1706
+ logging.info(
1707
+ f"Displayed Redis server information: Redis {redis_info.get('redis_version', 'unknown')}"
1708
+ )
1709
+ except Exception as e:
1710
+ logging.warning(f"Failed to collect Redis server information: {e}")
1711
+
1712
+
1128
1713
  def prepare_overall_total_test_results(
1129
- benchmark_config, default_metrics, results_dict, test_name, overall_results_matrix
1714
+ benchmark_config,
1715
+ default_metrics,
1716
+ results_dict,
1717
+ test_name,
1718
+ overall_results_matrix,
1719
+ redis_conns=None,
1130
1720
  ):
1131
1721
  # check which metrics to extract
1132
1722
  (
@@ -1246,7 +1836,8 @@ def data_prepopulation_step(
1246
1836
  preload_command_str,
1247
1837
  )
1248
1838
  )
1249
- client_container_stdout = docker_client.containers.run(
1839
+ # Use explicit container management for preload tool
1840
+ container = docker_client.containers.run(
1250
1841
  image=preload_image,
1251
1842
  volumes={
1252
1843
  temporary_dir: {
@@ -1254,15 +1845,30 @@ def data_prepopulation_step(
1254
1845
  "mode": "rw",
1255
1846
  },
1256
1847
  },
1257
- auto_remove=True,
1848
+ auto_remove=False,
1258
1849
  privileged=True,
1259
1850
  working_dir=benchmark_tool_workdir,
1260
1851
  command=preload_command_str,
1261
1852
  network_mode="host",
1262
- detach=False,
1853
+ detach=True,
1263
1854
  cpuset_cpus=client_cpuset_cpus,
1264
1855
  )
1265
1856
 
1857
+ # Wait for preload container and get output
1858
+ try:
1859
+ exit_code = container.wait()
1860
+ client_container_stdout = container.logs().decode("utf-8")
1861
+ logging.info(f"Preload tool completed with exit code: {exit_code}")
1862
+ except Exception as wait_error:
1863
+ logging.error(f"Preload tool wait error: {wait_error}")
1864
+ client_container_stdout = container.logs().decode("utf-8")
1865
+ finally:
1866
+ # Clean up container
1867
+ try:
1868
+ container.remove(force=True)
1869
+ except Exception as cleanup_error:
1870
+ logging.warning(f"Preload tool cleanup error: {cleanup_error}")
1871
+
1266
1872
  preload_end_time = datetime.datetime.now()
1267
1873
  preload_duration_seconds = calculate_client_tool_duration_and_check(
1268
1874
  preload_end_time, preload_start_time, "Preload", False