redis-benchmarks-specification 0.1.281__py3-none-any.whl → 0.1.283__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

@@ -79,7 +79,9 @@ def run_local_command_with_timeout(command_str, timeout_seconds, description="co
79
79
  tuple: (success, stdout, stderr)
80
80
  """
81
81
  try:
82
- logging.info(f"Running {description} with {timeout_seconds}s timeout: {command_str}")
82
+ logging.info(
83
+ f"Running {description} with {timeout_seconds}s timeout: {command_str}"
84
+ )
83
85
 
84
86
  # Use shell=True to support complex command strings with pipes, etc.
85
87
  process = subprocess.Popen(
@@ -87,7 +89,7 @@ def run_local_command_with_timeout(command_str, timeout_seconds, description="co
87
89
  shell=True,
88
90
  stdout=subprocess.PIPE,
89
91
  stderr=subprocess.PIPE,
90
- text=True
92
+ text=True,
91
93
  )
92
94
 
93
95
  try:
@@ -106,7 +108,9 @@ def run_local_command_with_timeout(command_str, timeout_seconds, description="co
106
108
  logging.error(f"{description} timed out after {timeout_seconds} seconds")
107
109
  process.kill()
108
110
  try:
109
- stdout, stderr = process.communicate(timeout=5) # Give 5 seconds to cleanup
111
+ stdout, stderr = process.communicate(
112
+ timeout=5
113
+ ) # Give 5 seconds to cleanup
110
114
  except subprocess.TimeoutExpired:
111
115
  stdout, stderr = "", "Process killed due to timeout"
112
116
  return False, stdout, f"Timeout after {timeout_seconds} seconds. {stderr}"
@@ -136,7 +140,9 @@ def calculate_process_timeout(command_str, buffer_timeout):
136
140
  if test_time_match:
137
141
  test_time = int(test_time_match.group(1))
138
142
  timeout = test_time + buffer_timeout
139
- logging.info(f"Set process timeout to {timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)")
143
+ logging.info(
144
+ f"Set process timeout to {timeout}s (test-time: {test_time}s + {buffer_timeout}s buffer)"
145
+ )
140
146
  return timeout
141
147
 
142
148
  logging.info(f"Using default process timeout: {default_timeout}s")
@@ -170,7 +176,9 @@ def parse_size(size):
170
176
  return int(number * units[unit])
171
177
 
172
178
 
173
- def extract_expected_benchmark_duration(benchmark_command_str, override_memtier_test_time):
179
+ def extract_expected_benchmark_duration(
180
+ benchmark_command_str, override_memtier_test_time
181
+ ):
174
182
  """
175
183
  Extract expected benchmark duration from command string or override.
176
184
 
@@ -242,7 +250,9 @@ def run_multiple_clients(
242
250
  if "memtier_benchmark" in client_tool:
243
251
  # Set benchmark path based on local install option
244
252
  if args.benchmark_local_install:
245
- full_benchmark_path = getattr(args, 'memtier_bin_path', 'memtier_benchmark')
253
+ full_benchmark_path = getattr(
254
+ args, "memtier_bin_path", "memtier_benchmark"
255
+ )
246
256
  else:
247
257
  full_benchmark_path = f"/usr/local/bin/{client_tool}"
248
258
 
@@ -334,7 +344,9 @@ def run_multiple_clients(
334
344
  # Calculate container timeout
335
345
  container_timeout = 300 # 5 minutes default
336
346
  # Use new timeout_buffer argument, fallback to container_timeout_buffer for backward compatibility
337
- buffer_timeout = getattr(args, 'timeout_buffer', getattr(args, 'container_timeout_buffer', 60))
347
+ buffer_timeout = getattr(
348
+ args, "timeout_buffer", getattr(args, "container_timeout_buffer", 60)
349
+ )
338
350
  if "test-time" in benchmark_command_str:
339
351
  # Try to extract test time and add buffer
340
352
  import re
@@ -520,17 +532,29 @@ def run_multiple_clients(
520
532
  )
521
533
  elif "vector-db-benchmark" in tool:
522
534
  # For vector-db-benchmark, look for summary JSON file
523
- summary_files = [f for f in os.listdir(temporary_dir_client) if f.endswith("-summary.json")]
535
+ summary_files = [
536
+ f
537
+ for f in os.listdir(temporary_dir_client)
538
+ if f.endswith("-summary.json")
539
+ ]
524
540
  if summary_files:
525
- summary_filepath = os.path.join(temporary_dir_client, summary_files[0])
541
+ summary_filepath = os.path.join(
542
+ temporary_dir_client, summary_files[0]
543
+ )
526
544
  try:
527
- with open(summary_filepath, 'r') as f:
545
+ with open(summary_filepath, "r") as f:
528
546
  vector_json = json.load(f)
529
- logging.info(f"Successfully read vector-db-benchmark JSON output from {summary_files[0]}")
547
+ logging.info(
548
+ f"Successfully read vector-db-benchmark JSON output from {summary_files[0]}"
549
+ )
530
550
  except Exception as e:
531
- logging.warning(f"Failed to read vector-db-benchmark JSON from {summary_files[0]}: {e}")
551
+ logging.warning(
552
+ f"Failed to read vector-db-benchmark JSON from {summary_files[0]}: {e}"
553
+ )
532
554
  else:
533
- logging.warning(f"No vector-db-benchmark summary JSON file found for client {client_index}")
555
+ logging.warning(
556
+ f"No vector-db-benchmark summary JSON file found for client {client_index}"
557
+ )
534
558
 
535
559
  logging.info(
536
560
  f"Successfully read JSON output from client {client_index} ({tool})"
@@ -554,25 +578,33 @@ def run_multiple_clients(
554
578
  aggregated_json.update(pubsub_json)
555
579
  aggregated_json.update(vector_json)
556
580
  aggregated_stdout = json.dumps(aggregated_json, indent=2)
557
- logging.info("Using merged JSON results from memtier, pubsub-sub-bench, and vector-db-benchmark clients")
581
+ logging.info(
582
+ "Using merged JSON results from memtier, pubsub-sub-bench, and vector-db-benchmark clients"
583
+ )
558
584
  elif memtier_json and pubsub_json:
559
585
  # Use memtier as base and add pubsub metrics
560
586
  aggregated_json = memtier_json.copy()
561
587
  aggregated_json.update(pubsub_json)
562
588
  aggregated_stdout = json.dumps(aggregated_json, indent=2)
563
- logging.info("Using merged JSON results from memtier and pubsub-sub-bench clients")
589
+ logging.info(
590
+ "Using merged JSON results from memtier and pubsub-sub-bench clients"
591
+ )
564
592
  elif memtier_json and vector_json:
565
593
  # Use memtier as base and add vector metrics
566
594
  aggregated_json = memtier_json.copy()
567
595
  aggregated_json.update(vector_json)
568
596
  aggregated_stdout = json.dumps(aggregated_json, indent=2)
569
- logging.info("Using merged JSON results from memtier and vector-db-benchmark clients")
597
+ logging.info(
598
+ "Using merged JSON results from memtier and vector-db-benchmark clients"
599
+ )
570
600
  elif pubsub_json and vector_json:
571
601
  # Use pubsub as base and add vector metrics
572
602
  aggregated_json = pubsub_json.copy()
573
603
  aggregated_json.update(vector_json)
574
604
  aggregated_stdout = json.dumps(aggregated_json, indent=2)
575
- logging.info("Using merged JSON results from pubsub-sub-bench and vector-db-benchmark clients")
605
+ logging.info(
606
+ "Using merged JSON results from pubsub-sub-bench and vector-db-benchmark clients"
607
+ )
576
608
  elif memtier_json:
577
609
  # Only memtier available
578
610
  aggregated_json = memtier_json
@@ -591,7 +623,9 @@ def run_multiple_clients(
591
623
  else:
592
624
  # Fall back to concatenated stdout
593
625
  aggregated_stdout = "\n".join([r["stdout"] for r in successful_results])
594
- logging.warning("No JSON results found, falling back to concatenated stdout")
626
+ logging.warning(
627
+ "No JSON results found, falling back to concatenated stdout"
628
+ )
595
629
 
596
630
  return aggregated_stdout, results
597
631
 
@@ -913,7 +947,9 @@ def prepare_vector_db_benchmark_parameters(
913
947
 
914
948
  # Add custom arguments if specified
915
949
  if "arguments" in clientconfig:
916
- benchmark_command_str = " ".join(benchmark_command) + " " + clientconfig["arguments"]
950
+ benchmark_command_str = (
951
+ " ".join(benchmark_command) + " " + clientconfig["arguments"]
952
+ )
917
953
  else:
918
954
  benchmark_command_str = " ".join(benchmark_command)
919
955
 
@@ -1089,6 +1125,7 @@ def process_self_contained_coordinator_stream(
1089
1125
  current_cpu_pos = args.cpuset_start_pos
1090
1126
  temporary_dir_client = tempfile.mkdtemp(dir=home)
1091
1127
 
1128
+ # These will be updated after auto-detection
1092
1129
  tf_github_org = args.github_org
1093
1130
  tf_github_repo = args.github_repo
1094
1131
  tf_triggering_env = args.platform_name
@@ -1121,6 +1158,56 @@ def process_self_contained_coordinator_stream(
1121
1158
  )
1122
1159
  setup_name = "oss-standalone"
1123
1160
  r.ping()
1161
+
1162
+ # Auto-detect server information if not explicitly provided
1163
+ from redis_benchmarks_specification.__runner__.remote_profiling import (
1164
+ extract_server_info_for_args,
1165
+ extract_server_metadata_for_timeseries
1166
+ )
1167
+
1168
+ detected_info = extract_server_info_for_args(r)
1169
+ server_metadata = extract_server_metadata_for_timeseries(r)
1170
+
1171
+ # Use detected values if arguments weren't explicitly provided
1172
+ github_org = args.github_org
1173
+ github_repo = args.github_repo
1174
+
1175
+ # Auto-detect github_org if it's the default value
1176
+ if args.github_org == "redis" and detected_info["github_org"] != "redis":
1177
+ github_org = detected_info["github_org"]
1178
+ logging.info(f"Auto-detected github_org: {github_org}")
1179
+
1180
+ # Auto-detect github_repo if it's the default value
1181
+ if args.github_repo == "redis" and detected_info["github_repo"] != "redis":
1182
+ github_repo = detected_info["github_repo"]
1183
+ logging.info(f"Auto-detected github_repo: {github_repo}")
1184
+
1185
+ # Auto-detect version if it's the default value
1186
+ if args.github_version == "NA" and detected_info["github_version"] != "unknown":
1187
+ git_version = detected_info["github_version"]
1188
+ logging.info(f"Auto-detected github_version: {git_version}")
1189
+
1190
+ # Auto-detect git hash from server info if available
1191
+ if git_hash == "NA":
1192
+ try:
1193
+ server_info = r.info("server")
1194
+ redis_git_sha1 = server_info.get("redis_git_sha1", "")
1195
+ redis_build_id = server_info.get("redis_build_id", "")
1196
+
1197
+ # Use git_sha1 if available and not empty/zero
1198
+ if redis_git_sha1 and redis_git_sha1 not in ("", "0", "00000000"):
1199
+ git_hash = redis_git_sha1
1200
+ logging.info(f"Auto-detected git_hash from redis_git_sha1: {git_hash}")
1201
+ # Fallback to build_id if git_sha1 is not available
1202
+ elif redis_build_id and redis_build_id not in ("", "0"):
1203
+ git_hash = redis_build_id
1204
+ logging.info(f"Auto-detected git_hash from redis_build_id: {git_hash}")
1205
+ except Exception as e:
1206
+ logging.warning(f"Failed to auto-detect git hash: {e}")
1207
+
1208
+ # Update tf_github_org and tf_github_repo with detected values
1209
+ tf_github_org = github_org
1210
+ tf_github_repo = github_repo
1124
1211
  redis_conns = [r]
1125
1212
  if oss_cluster_api_enabled:
1126
1213
  redis_conns = []
@@ -1156,8 +1243,8 @@ def process_self_contained_coordinator_stream(
1156
1243
 
1157
1244
  # Check if all tested commands are supported by this Redis instance
1158
1245
  supported_commands = get_supported_redis_commands(redis_conns)
1159
- commands_supported, unsupported_commands = check_test_command_support(
1160
- benchmark_config, supported_commands
1246
+ commands_supported, unsupported_commands = (
1247
+ check_test_command_support(benchmark_config, supported_commands)
1161
1248
  )
1162
1249
 
1163
1250
  if not commands_supported:
@@ -1235,6 +1322,8 @@ def process_self_contained_coordinator_stream(
1235
1322
  benchmark_tool_workdir = client_mnt_point
1236
1323
 
1237
1324
  metadata = {}
1325
+ # Add server metadata from Redis INFO SERVER
1326
+ metadata.update(server_metadata)
1238
1327
  test_tls_cacert = None
1239
1328
  test_tls_cert = None
1240
1329
  test_tls_key = None
@@ -1328,7 +1417,11 @@ def process_self_contained_coordinator_stream(
1328
1417
  if "dbconfig" in benchmark_config:
1329
1418
  if "preload_tool" in benchmark_config["dbconfig"]:
1330
1419
  # Get timeout buffer for preload
1331
- buffer_timeout = getattr(args, 'timeout_buffer', getattr(args, 'container_timeout_buffer', 60))
1420
+ buffer_timeout = getattr(
1421
+ args,
1422
+ "timeout_buffer",
1423
+ getattr(args, "container_timeout_buffer", 60),
1424
+ )
1332
1425
 
1333
1426
  res = data_prepopulation_step(
1334
1427
  benchmark_config,
@@ -1397,8 +1490,13 @@ def process_self_contained_coordinator_stream(
1397
1490
  benchmark_tool = "redis-benchmark"
1398
1491
 
1399
1492
  # Set benchmark path based on local install option
1400
- if args.benchmark_local_install and "memtier_benchmark" in benchmark_tool:
1401
- full_benchmark_path = getattr(args, 'memtier_bin_path', 'memtier_benchmark')
1493
+ if (
1494
+ args.benchmark_local_install
1495
+ and "memtier_benchmark" in benchmark_tool
1496
+ ):
1497
+ full_benchmark_path = getattr(
1498
+ args, "memtier_bin_path", "memtier_benchmark"
1499
+ )
1402
1500
  else:
1403
1501
  full_benchmark_path = f"/usr/local/bin/{benchmark_tool}"
1404
1502
 
@@ -1564,7 +1662,7 @@ def process_self_contained_coordinator_stream(
1564
1662
  args.remote_profile_port,
1565
1663
  args.remote_profile_output_dir,
1566
1664
  args.remote_profile_username,
1567
- args.remote_profile_password
1665
+ args.remote_profile_password,
1568
1666
  )
1569
1667
 
1570
1668
  # Extract expected benchmark duration
@@ -1576,13 +1674,17 @@ def process_self_contained_coordinator_stream(
1576
1674
  profiling_started = remote_profiler.start_profiling(
1577
1675
  redis_conns[0] if redis_conns else None,
1578
1676
  test_name,
1579
- expected_duration
1677
+ expected_duration,
1580
1678
  )
1581
1679
 
1582
1680
  if profiling_started:
1583
- logging.info(f"Started remote profiling for test: {test_name}")
1681
+ logging.info(
1682
+ f"Started remote profiling for test: {test_name}"
1683
+ )
1584
1684
  else:
1585
- logging.warning(f"Failed to start remote profiling for test: {test_name}")
1685
+ logging.warning(
1686
+ f"Failed to start remote profiling for test: {test_name}"
1687
+ )
1586
1688
  remote_profiler = None
1587
1689
 
1588
1690
  except Exception as e:
@@ -1634,14 +1736,22 @@ def process_self_contained_coordinator_stream(
1634
1736
  )
1635
1737
 
1636
1738
  # Calculate timeout for local process
1637
- buffer_timeout = getattr(args, 'timeout_buffer', getattr(args, 'container_timeout_buffer', 60))
1638
- process_timeout = calculate_process_timeout(benchmark_command_str, buffer_timeout)
1739
+ buffer_timeout = getattr(
1740
+ args,
1741
+ "timeout_buffer",
1742
+ getattr(args, "container_timeout_buffer", 60),
1743
+ )
1744
+ process_timeout = calculate_process_timeout(
1745
+ benchmark_command_str, buffer_timeout
1746
+ )
1639
1747
 
1640
1748
  # Run with timeout
1641
- success, client_container_stdout, stderr = run_local_command_with_timeout(
1642
- benchmark_command_str,
1643
- process_timeout,
1644
- "memtier benchmark"
1749
+ success, client_container_stdout, stderr = (
1750
+ run_local_command_with_timeout(
1751
+ benchmark_command_str,
1752
+ process_timeout,
1753
+ "memtier benchmark",
1754
+ )
1645
1755
  )
1646
1756
 
1647
1757
  if not success:
@@ -1666,7 +1776,9 @@ def process_self_contained_coordinator_stream(
1666
1776
  # Set working directory based on tool
1667
1777
  working_dir = benchmark_tool_workdir
1668
1778
  if "vector-db-benchmark" in benchmark_tool:
1669
- working_dir = "/app" # vector-db-benchmark needs to run from /app
1779
+ working_dir = (
1780
+ "/app" # vector-db-benchmark needs to run from /app
1781
+ )
1670
1782
 
1671
1783
  # Prepare volumes
1672
1784
  volumes = {
@@ -1697,7 +1809,9 @@ def process_self_contained_coordinator_stream(
1697
1809
 
1698
1810
  # Only add user for non-vector-db-benchmark tools to avoid permission issues
1699
1811
  if "vector-db-benchmark" not in benchmark_tool:
1700
- container_kwargs["user"] = f"{os.getuid()}:{os.getgid()}"
1812
+ container_kwargs["user"] = (
1813
+ f"{os.getuid()}:{os.getgid()}"
1814
+ )
1701
1815
 
1702
1816
  # Add environment variables for vector-db-benchmark
1703
1817
  if "vector-db-benchmark" in benchmark_tool:
@@ -1761,13 +1875,19 @@ def process_self_contained_coordinator_stream(
1761
1875
  if remote_profiler is not None:
1762
1876
  try:
1763
1877
  logging.info("Waiting for remote profiling to complete...")
1764
- profiling_success = remote_profiler.wait_for_completion(timeout=60)
1878
+ profiling_success = remote_profiler.wait_for_completion(
1879
+ timeout=60
1880
+ )
1765
1881
  if profiling_success:
1766
1882
  logging.info("Remote profiling completed successfully")
1767
1883
  else:
1768
- logging.warning("Remote profiling did not complete successfully")
1884
+ logging.warning(
1885
+ "Remote profiling did not complete successfully"
1886
+ )
1769
1887
  except Exception as e:
1770
- logging.error(f"Error waiting for remote profiling completion: {e}")
1888
+ logging.error(
1889
+ f"Error waiting for remote profiling completion: {e}"
1890
+ )
1771
1891
 
1772
1892
  logging.info("Printing client tool stdout output")
1773
1893
  if client_container_stdout:
@@ -1835,21 +1955,36 @@ def process_self_contained_coordinator_stream(
1835
1955
  )
1836
1956
  elif "vector-db-benchmark" in benchmark_tool:
1837
1957
  # For vector-db-benchmark, look for summary JSON file
1838
- summary_files = [f for f in os.listdir(temporary_dir_client) if f.endswith("-summary.json")]
1958
+ summary_files = [
1959
+ f
1960
+ for f in os.listdir(temporary_dir_client)
1961
+ if f.endswith("-summary.json")
1962
+ ]
1839
1963
  if summary_files:
1840
- full_result_path = os.path.join(temporary_dir_client, summary_files[0])
1841
- logging.info(f"Found vector-db-benchmark summary file: {summary_files[0]}")
1964
+ full_result_path = os.path.join(
1965
+ temporary_dir_client, summary_files[0]
1966
+ )
1967
+ logging.info(
1968
+ f"Found vector-db-benchmark summary file: {summary_files[0]}"
1969
+ )
1842
1970
  else:
1843
- logging.warning("No vector-db-benchmark summary JSON file found")
1971
+ logging.warning(
1972
+ "No vector-db-benchmark summary JSON file found"
1973
+ )
1844
1974
  # Create empty results dict to avoid crash
1845
1975
  results_dict = {}
1846
1976
 
1847
1977
  logging.info(f"Reading results json from {full_result_path}")
1848
1978
 
1849
- if "vector-db-benchmark" in benchmark_tool and not os.path.exists(full_result_path):
1979
+ if (
1980
+ "vector-db-benchmark" in benchmark_tool
1981
+ and not os.path.exists(full_result_path)
1982
+ ):
1850
1983
  # Handle case where vector-db-benchmark didn't produce results
1851
1984
  results_dict = {}
1852
- logging.warning("Vector-db-benchmark did not produce results file")
1985
+ logging.warning(
1986
+ "Vector-db-benchmark did not produce results file"
1987
+ )
1853
1988
  else:
1854
1989
  with open(
1855
1990
  full_result_path,
@@ -1897,6 +2032,7 @@ def process_self_contained_coordinator_stream(
1897
2032
  tf_triggering_env,
1898
2033
  topology_spec_name,
1899
2034
  default_metrics,
2035
+ git_hash,
1900
2036
  )
1901
2037
  test_result = True
1902
2038
  total_test_suite_runs = total_test_suite_runs + 1
@@ -1990,7 +2126,9 @@ def get_maxmemory(r):
1990
2126
 
1991
2127
  # Check if maxmemory key exists in Redis memory info
1992
2128
  if "maxmemory" not in memory_info:
1993
- logging.warning("maxmemory not present in Redis memory info. Cannot enforce memory checks.")
2129
+ logging.warning(
2130
+ "maxmemory not present in Redis memory info. Cannot enforce memory checks."
2131
+ )
1994
2132
  return 0
1995
2133
 
1996
2134
  maxmemory = int(memory_info["maxmemory"])
@@ -2085,10 +2223,12 @@ def print_results_table_stdout(
2085
2223
  # Use resolved metric name for precision_summary metrics, otherwise use original path
2086
2224
  def get_display_name(x):
2087
2225
  # For precision_summary metrics with wildcards, construct the resolved path
2088
- if (len(x) > 1 and
2089
- isinstance(x[0], str) and
2090
- "precision_summary" in x[0] and
2091
- "*" in x[0]):
2226
+ if (
2227
+ len(x) > 1
2228
+ and isinstance(x[0], str)
2229
+ and "precision_summary" in x[0]
2230
+ and "*" in x[0]
2231
+ ):
2092
2232
 
2093
2233
  # Look for the precision level in the cleaned metrics logs
2094
2234
  # We need to find the corresponding cleaned metric to get the precision level
@@ -2097,17 +2237,19 @@ def print_results_table_stdout(
2097
2237
 
2098
2238
  # Since we know from logs that the precision level is available,
2099
2239
  # let's reconstruct it from the metric context path (x[1]) if available
2100
- if len(x) > 1 and isinstance(x[1], str) and x[1].startswith("'") and x[1].endswith("'"):
2240
+ if (
2241
+ len(x) > 1
2242
+ and isinstance(x[1], str)
2243
+ and x[1].startswith("'")
2244
+ and x[1].endswith("'")
2245
+ ):
2101
2246
  precision_level = x[1] # This should be something like "'1.0000'"
2102
2247
  resolved_path = x[0].replace("*", precision_level)
2103
2248
  return resolved_path
2104
2249
 
2105
2250
  return x[0] # Use original path
2106
2251
 
2107
- results_matrix = [
2108
- [get_display_name(x), f"{x[3]:.3f}"]
2109
- for x in results_matrix
2110
- ]
2252
+ results_matrix = [[get_display_name(x), f"{x[3]:.3f}"] for x in results_matrix]
2111
2253
  writer = MarkdownTableWriter(
2112
2254
  table_name=table_name,
2113
2255
  headers=results_matrix_headers,
@@ -2123,16 +2265,28 @@ def print_redis_info_section(redis_conns):
2123
2265
  redis_info = redis_conns[0].info()
2124
2266
  server_name = "redis"
2125
2267
  if "server_name" in redis_info:
2126
- server_name = redis_info['server_name']
2268
+ server_name = redis_info["server_name"]
2127
2269
 
2128
2270
  print("\n# Redis Server Information")
2129
2271
  redis_info_data = [
2130
- [f"{server_name} version", redis_info.get(f"{server_name}_version", "unknown")],
2272
+ [
2273
+ f"{server_name} version",
2274
+ redis_info.get(f"{server_name}_version", "unknown"),
2275
+ ],
2131
2276
  ["redis version", redis_info.get("redis_version", "unknown")],
2132
2277
  ["io_threads_active", redis_info.get("io_threads_active", "unknown")],
2133
- [f"{server_name} Git SHA1", redis_info.get("redis_git_sha1", "unknown")],
2134
- [f"{server_name} Git Dirty", str(redis_info.get("redis_git_dirty", "unknown"))],
2135
- [f"{server_name} Build ID", redis_info.get("redis_build_id", "unknown")],
2278
+ [
2279
+ f"{server_name} Git SHA1",
2280
+ redis_info.get("redis_git_sha1", "unknown"),
2281
+ ],
2282
+ [
2283
+ f"{server_name} Git Dirty",
2284
+ str(redis_info.get("redis_git_dirty", "unknown")),
2285
+ ],
2286
+ [
2287
+ f"{server_name} Build ID",
2288
+ redis_info.get("redis_build_id", "unknown"),
2289
+ ],
2136
2290
  [f"{server_name} Mode", redis_info.get("redis_mode", "unknown")],
2137
2291
  ["OS", redis_info.get("os", "unknown")],
2138
2292
  ["Arch Bits", str(redis_info.get("arch_bits", "unknown"))],
@@ -2167,7 +2321,9 @@ def get_supported_redis_commands(redis_conns):
2167
2321
  try:
2168
2322
  # Execute COMMAND to get all supported commands
2169
2323
  commands_info = redis_conns[0].execute_command("COMMAND")
2170
- logging.info(f"COMMAND response type: {type(commands_info)}, length: {len(commands_info) if hasattr(commands_info, '__len__') else 'N/A'}")
2324
+ logging.info(
2325
+ f"COMMAND response type: {type(commands_info)}, length: {len(commands_info) if hasattr(commands_info, '__len__') else 'N/A'}"
2326
+ )
2171
2327
 
2172
2328
  # Extract command names
2173
2329
  supported_commands = set()
@@ -2176,7 +2332,7 @@ def get_supported_redis_commands(redis_conns):
2176
2332
  # COMMAND response is a dict with command names as keys
2177
2333
  for cmd_name in commands_info.keys():
2178
2334
  if isinstance(cmd_name, bytes):
2179
- cmd_name = cmd_name.decode('utf-8')
2335
+ cmd_name = cmd_name.decode("utf-8")
2180
2336
  supported_commands.add(str(cmd_name).upper())
2181
2337
  elif isinstance(commands_info, (list, tuple)):
2182
2338
  # Fallback for list format (first element of each command info array)
@@ -2184,10 +2340,12 @@ def get_supported_redis_commands(redis_conns):
2184
2340
  if isinstance(cmd_info, (list, tuple)) and len(cmd_info) > 0:
2185
2341
  cmd_name = cmd_info[0]
2186
2342
  if isinstance(cmd_name, bytes):
2187
- cmd_name = cmd_name.decode('utf-8')
2343
+ cmd_name = cmd_name.decode("utf-8")
2188
2344
  supported_commands.add(str(cmd_name).upper())
2189
2345
 
2190
- logging.info(f"Retrieved {len(supported_commands)} supported Redis commands")
2346
+ logging.info(
2347
+ f"Retrieved {len(supported_commands)} supported Redis commands"
2348
+ )
2191
2349
 
2192
2350
  # Log some sample commands for debugging
2193
2351
  if supported_commands:
@@ -2195,7 +2353,9 @@ def get_supported_redis_commands(redis_conns):
2195
2353
  logging.info(f"Sample commands: {sample_commands}")
2196
2354
 
2197
2355
  # Check specifically for vector commands
2198
- vector_commands = [cmd for cmd in supported_commands if cmd.startswith('V')]
2356
+ vector_commands = [
2357
+ cmd for cmd in supported_commands if cmd.startswith("V")
2358
+ ]
2199
2359
  if vector_commands:
2200
2360
  logging.info(f"Vector commands found: {sorted(vector_commands)}")
2201
2361
 
@@ -2255,13 +2415,20 @@ def prepare_overall_total_test_results(
2255
2415
  # Use the same display name logic as in the individual test results
2256
2416
  def get_overall_display_name(x):
2257
2417
  # For precision_summary metrics with wildcards, construct the resolved path
2258
- if (len(x) > 1 and
2259
- isinstance(x[0], str) and
2260
- "precision_summary" in x[0] and
2261
- "*" in x[0]):
2418
+ if (
2419
+ len(x) > 1
2420
+ and isinstance(x[0], str)
2421
+ and "precision_summary" in x[0]
2422
+ and "*" in x[0]
2423
+ ):
2262
2424
 
2263
2425
  # Reconstruct resolved path from metric context path (x[1]) if available
2264
- if len(x) > 1 and isinstance(x[1], str) and x[1].startswith("'") and x[1].endswith("'"):
2426
+ if (
2427
+ len(x) > 1
2428
+ and isinstance(x[1], str)
2429
+ and x[1].startswith("'")
2430
+ and x[1].endswith("'")
2431
+ ):
2265
2432
  precision_level = x[1] # This should be something like "'1.0000'"
2266
2433
  resolved_path = x[0].replace("*", precision_level)
2267
2434
  return resolved_path
@@ -2269,7 +2436,8 @@ def prepare_overall_total_test_results(
2269
2436
  return x[0] # Use original path
2270
2437
 
2271
2438
  current_test_results_matrix = [
2272
- [test_name, get_overall_display_name(x), f"{x[3]:.3f}"] for x in current_test_results_matrix
2439
+ [test_name, get_overall_display_name(x), f"{x[3]:.3f}"]
2440
+ for x in current_test_results_matrix
2273
2441
  ]
2274
2442
  overall_results_matrix.extend(current_test_results_matrix)
2275
2443
 
@@ -2317,7 +2485,7 @@ def data_prepopulation_step(
2317
2485
 
2318
2486
  # Set preload tool path based on local install option
2319
2487
  if benchmark_local_install and "memtier_benchmark" in preload_tool and args:
2320
- full_benchmark_path = getattr(args, 'memtier_bin_path', 'memtier_benchmark')
2488
+ full_benchmark_path = getattr(args, "memtier_bin_path", "memtier_benchmark")
2321
2489
  else:
2322
2490
  full_benchmark_path = f"/usr/local/bin/{preload_tool}"
2323
2491
  client_mnt_point = "/mnt/client/"
@@ -2364,13 +2532,13 @@ def data_prepopulation_step(
2364
2532
  )
2365
2533
 
2366
2534
  # Calculate timeout for preload process
2367
- process_timeout = calculate_process_timeout(preload_command_str, timeout_buffer)
2535
+ process_timeout = calculate_process_timeout(
2536
+ preload_command_str, timeout_buffer
2537
+ )
2368
2538
 
2369
2539
  # Run with timeout
2370
2540
  success, client_container_stdout, stderr = run_local_command_with_timeout(
2371
- preload_command_str,
2372
- process_timeout,
2373
- "memtier preload"
2541
+ preload_command_str, process_timeout, "memtier preload"
2374
2542
  )
2375
2543
 
2376
2544
  if not success:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis-benchmarks-specification
3
- Version: 0.1.281
3
+ Version: 0.1.283
4
4
  Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com