redis-benchmarks-specification 0.1.323__py3-none-any.whl → 0.1.325__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

@@ -502,6 +502,7 @@ def builder_process_stream(
502
502
  tests_priority_lower_limit,
503
503
  tests_priority_upper_limit,
504
504
  tests_regexp,
505
+ ".*", # command_regexp - default to all commands
505
506
  use_git_timestamp,
506
507
  server_name,
507
508
  github_org,
@@ -642,6 +643,7 @@ def generate_benchmark_stream_request(
642
643
  tests_priority_lower_limit=0,
643
644
  tests_priority_upper_limit=10000,
644
645
  tests_regexp=".*",
646
+ command_regexp=".*",
645
647
  use_git_timestamp=False,
646
648
  server_name="redis",
647
649
  github_org="redis",
@@ -658,6 +660,7 @@ def generate_benchmark_stream_request(
658
660
  "tests_priority_upper_limit": tests_priority_upper_limit,
659
661
  "tests_priority_lower_limit": tests_priority_lower_limit,
660
662
  "tests_groups_regexp": tests_groups_regexp,
663
+ "command_regexp": command_regexp,
661
664
  "server_name": server_name,
662
665
  "github_org": github_org,
663
666
  "github_repo": github_repo,
@@ -217,4 +217,10 @@ def spec_cli_args(parser):
217
217
  default=-1,
218
218
  help="Wait x sections for build. If -1, waits forever.",
219
219
  )
220
+ parser.add_argument(
221
+ "--command-regex",
222
+ type=str,
223
+ default=".*",
224
+ help="Filter tests by command using regex. Only tests that include commands matching this regex will be processed.",
225
+ )
220
226
  return parser
@@ -46,6 +46,30 @@ def create_compare_arguments(parser):
46
46
  default="",
47
47
  help="specify a test (or a comma separated list of tests) to use for comparison. If none is specified by default will use all of them.",
48
48
  )
49
+ parser.add_argument(
50
+ "--extra-filters",
51
+ type=str,
52
+ default="",
53
+ help="specify extra filters to pass to baseline and comparison.",
54
+ )
55
+ parser.add_argument(
56
+ "--use-test-suites-folder",
57
+ action="store_true",
58
+ default=False,
59
+ help="Use test names from YAML files in test-suites folder instead of database",
60
+ )
61
+ parser.add_argument(
62
+ "--generate-boxplot",
63
+ action="store_true",
64
+ default=False,
65
+ help="Generate box plot showing performance change distribution per command",
66
+ )
67
+ parser.add_argument(
68
+ "--boxplot-output",
69
+ type=str,
70
+ default="command_performance_boxplot.png",
71
+ help="Output filename for the box plot (supports .png, .svg, .pdf)",
72
+ )
49
73
  parser.add_argument(
50
74
  "--defaults_filename",
51
75
  type=str,
@@ -155,6 +179,20 @@ def create_compare_arguments(parser):
155
179
  parser.add_argument("--simple-table", type=bool, default=False)
156
180
  parser.add_argument("--use_metric_context_path", type=bool, default=False)
157
181
  parser.add_argument("--testname_regex", type=str, default=".*", required=False)
182
+ parser.add_argument(
183
+ "--command-group-regex",
184
+ type=str,
185
+ default=".*",
186
+ required=False,
187
+ help="Filter commands by command group using regex. Only commands belonging to matching groups will be included in boxplot and summary.",
188
+ )
189
+ parser.add_argument(
190
+ "--command-regex",
191
+ type=str,
192
+ default=".*",
193
+ required=False,
194
+ help="Filter tests by command using regex. Only tests that include commands matching this regex will be processed.",
195
+ )
158
196
  parser.add_argument(
159
197
  "--regression_str", type=str, default="REGRESSION", required=False
160
198
  )
@@ -15,10 +15,29 @@ import datetime as dt
15
15
  import os
16
16
  from tqdm import tqdm
17
17
  import argparse
18
+ import numpy as np
18
19
 
19
20
  from io import StringIO
20
21
  import sys
21
22
 
23
+ # Import command categorization function
24
+ try:
25
+ from utils.summary import categorize_command
26
+ except ImportError:
27
+ # Fallback if utils.summary is not available
28
+ def categorize_command(command):
29
+ return "unknown"
30
+
31
+
32
+ # Optional matplotlib import for box plot generation
33
+ try:
34
+ import matplotlib.pyplot as plt
35
+
36
+ MATPLOTLIB_AVAILABLE = True
37
+ except ImportError:
38
+ MATPLOTLIB_AVAILABLE = False
39
+ logging.warning("matplotlib not available, box plot generation will be disabled")
40
+
22
41
  from redis_benchmarks_specification.__common__.github import (
23
42
  update_comment_if_needed,
24
43
  create_new_pr_comment,
@@ -355,6 +374,8 @@ def compare_command_logic(args, project_name, project_version):
355
374
  total_stable,
356
375
  total_unstable,
357
376
  total_comparison_points,
377
+ boxplot_data,
378
+ command_change,
358
379
  ) = compute_regression_table(
359
380
  rts,
360
381
  tf_github_org,
@@ -399,6 +420,11 @@ def compare_command_logic(args, project_name, project_version):
399
420
  args.regression_str,
400
421
  args.improvement_str,
401
422
  tests_with_config,
423
+ args.use_test_suites_folder,
424
+ testsuites_folder,
425
+ args.extra_filters,
426
+ getattr(args, "command_group_regex", ".*"),
427
+ getattr(args, "command_regex", ".*"),
402
428
  )
403
429
  total_regressions = len(regressions_list)
404
430
  total_improvements = len(improvements_list)
@@ -432,7 +458,26 @@ def compare_command_logic(args, project_name, project_version):
432
458
  args.regressions_percent_lower_limit,
433
459
  regressions_list,
434
460
  improvements_list,
461
+ args.improvement_str,
462
+ args.regression_str,
435
463
  )
464
+
465
+ # Generate box plot if requested
466
+ if args.generate_boxplot and command_change:
467
+ if MATPLOTLIB_AVAILABLE:
468
+ logging.info(f"Generating box plot with {len(command_change)} commands...")
469
+ generate_command_performance_boxplot_from_command_data(
470
+ command_change,
471
+ args.boxplot_output,
472
+ args.regression_str,
473
+ args.improvement_str,
474
+ getattr(args, "command_group_regex", ".*"),
475
+ )
476
+ else:
477
+ logging.error(
478
+ "Box plot generation requested but matplotlib is not available"
479
+ )
480
+
436
481
  return (
437
482
  detected_regressions,
438
483
  "",
@@ -474,6 +519,8 @@ def prepare_regression_comment(
474
519
  regressions_percent_lower_limit,
475
520
  regressions_list=[],
476
521
  improvements_list=[],
522
+ improvement_str="Improvement",
523
+ regression_str="Regression",
477
524
  ):
478
525
  if total_comparison_points > 0:
479
526
  comment_body = "### Automated performance analysis summary\n\n"
@@ -513,21 +560,24 @@ def prepare_regression_comment(
513
560
  )
514
561
  )
515
562
  if total_improvements > 0:
516
- comparison_summary += "- Detected a total of {} improvements above the improvement water line.\n".format(
517
- total_improvements
563
+ comparison_summary += "- Detected a total of {} improvements above the improvement water line ({}).\n".format(
564
+ total_improvements, improvement_str
518
565
  )
519
566
  if len(improvements_list) > 0:
520
- regression_values = [l[1] for l in improvements_list]
521
- regression_df = pd.DataFrame(regression_values)
522
- median_regression = round(float(regression_df.median().iloc[0]), 1)
523
- max_regression = round(float(regression_df.max().iloc[0]), 1)
524
- min_regression = round(float(regression_df.min().iloc[0]), 1)
567
+ improvement_values = [l[1] for l in improvements_list]
568
+ improvement_df = pd.DataFrame(improvement_values)
569
+ median_improvement = round(float(improvement_df.median().iloc[0]), 1)
570
+ max_improvement = round(float(improvement_df.max().iloc[0]), 1)
571
+ min_improvement = round(float(improvement_df.min().iloc[0]), 1)
572
+ p25_improvement = round(float(improvement_df.quantile(0.25).iloc[0]), 1)
573
+ p75_improvement = round(float(improvement_df.quantile(0.75).iloc[0]), 1)
525
574
 
526
- comparison_summary += f" - Median/Common-Case improvement was {median_regression}% and ranged from [{min_regression}%,{max_regression}%].\n"
575
+ comparison_summary += f" - The median improvement ({improvement_str}) was {median_improvement}%, with values ranging from {min_improvement}% to {max_improvement}%.\n"
576
+ comparison_summary += f" - Quartile distribution: P25={p25_improvement}%, P50={median_improvement}%, P75={p75_improvement}%.\n"
527
577
 
528
578
  if total_regressions > 0:
529
- comparison_summary += "- Detected a total of {} regressions bellow the regression water line {}.\n".format(
530
- total_regressions, regressions_percent_lower_limit
579
+ comparison_summary += "- Detected a total of {} regressions below the regression water line of {} ({}).\n".format(
580
+ total_regressions, regressions_percent_lower_limit, regression_str
531
581
  )
532
582
  if len(regressions_list) > 0:
533
583
  regression_values = [l[1] for l in regressions_list]
@@ -535,8 +585,11 @@ def prepare_regression_comment(
535
585
  median_regression = round(float(regression_df.median().iloc[0]), 1)
536
586
  max_regression = round(float(regression_df.max().iloc[0]), 1)
537
587
  min_regression = round(float(regression_df.min().iloc[0]), 1)
588
+ p25_regression = round(float(regression_df.quantile(0.25).iloc[0]), 1)
589
+ p75_regression = round(float(regression_df.quantile(0.75).iloc[0]), 1)
538
590
 
539
- comparison_summary += f" - Median/Common-Case regression was {median_regression}% and ranged from [{min_regression}%,{max_regression}%].\n"
591
+ comparison_summary += f" - The median regression ({regression_str}) was {median_regression}%, with values ranging from {min_regression}% to {max_regression}%.\n"
592
+ comparison_summary += f" - Quartile distribution: P25={p25_regression}%, P50={median_regression}%, P75={p75_regression}%.\n"
540
593
 
541
594
  comment_body += comparison_summary
542
595
  comment_body += "\n"
@@ -686,6 +739,11 @@ def compute_regression_table(
686
739
  regression_str="REGRESSION",
687
740
  improvement_str="IMPROVEMENT",
688
741
  tests_with_config={},
742
+ use_test_suites_folder=False,
743
+ test_suites_folder=None,
744
+ extra_filters="",
745
+ command_group_regex=".*",
746
+ command_regex=".*",
689
747
  ):
690
748
  START_TIME_NOW_UTC, _, _ = get_start_time_vars()
691
749
  START_TIME_LAST_MONTH_UTC = START_TIME_NOW_UTC - datetime.timedelta(days=31)
@@ -746,10 +804,18 @@ def compute_regression_table(
746
804
  if test != "":
747
805
  test_names = test.split(",")
748
806
  logging.info("Using test name {}".format(test_names))
807
+ elif use_test_suites_folder:
808
+ test_names = get_test_names_from_yaml_files(
809
+ test_suites_folder, tags_regex_string
810
+ )
749
811
  else:
750
812
  test_names = get_test_names_from_db(
751
813
  rts, tags_regex_string, test_names, used_key
752
814
  )
815
+
816
+ # Apply command regex filtering to tests_with_config
817
+ tests_with_config = filter_tests_by_command_regex(tests_with_config, command_regex)
818
+
753
819
  (
754
820
  detected_regressions,
755
821
  table_full,
@@ -770,6 +836,7 @@ def compute_regression_table(
770
836
  no_datapoints_list,
771
837
  group_change,
772
838
  command_change,
839
+ boxplot_data,
773
840
  ) = from_rts_to_regression_table(
774
841
  baseline_deployment_name,
775
842
  comparison_deployment_name,
@@ -803,6 +870,7 @@ def compute_regression_table(
803
870
  regression_str,
804
871
  improvement_str,
805
872
  tests_with_config,
873
+ extra_filters,
806
874
  )
807
875
  logging.info(
808
876
  "Printing differential analysis between {} and {}".format(
@@ -818,19 +886,40 @@ def compute_regression_table(
818
886
  )
819
887
 
820
888
  table_output += "<details>\n <summary>By GROUP change csv:</summary>\n\n"
821
- table_output += "\ncommand_group,min_change,max_change \n"
889
+ table_output += (
890
+ "\ncommand_group,min_change,q1_change,median_change,q3_change,max_change \n"
891
+ )
822
892
  for group_name, changes_list in group_change.items():
823
- max_change = max(changes_list)
824
893
  min_change = min(changes_list)
825
- table_output += f"{group_name},{min_change:.3f},{max_change:.3f}\n"
894
+ q1_change = np.percentile(changes_list, 25)
895
+ median_change = np.median(changes_list)
896
+ q3_change = np.percentile(changes_list, 75)
897
+ max_change = max(changes_list)
898
+ table_output += f"{group_name},{min_change:.3f},{q1_change:.3f},{median_change:.3f},{q3_change:.3f},{max_change:.3f}\n"
826
899
  table_output += "\n</details>\n"
827
900
  table_output += "\n\n"
828
901
  table_output += "<details>\n <summary>By COMMAND change csv:</summary>\n\n"
829
- table_output += "\ncommand,min_change,max_change \n"
830
- for command_name, changes_list in command_change.items():
831
- max_change = max(changes_list)
902
+ table_output += (
903
+ "\ncommand,min_change,q1_change,median_change,q3_change,max_change \n"
904
+ )
905
+
906
+ # Filter commands by command group regex if specified
907
+ filtered_command_change = command_change
908
+ if command_group_regex != ".*":
909
+ group_regex = re.compile(command_group_regex)
910
+ filtered_command_change = {}
911
+ for command_name, changes_list in command_change.items():
912
+ command_group = categorize_command(command_name.lower())
913
+ if re.search(group_regex, command_group):
914
+ filtered_command_change[command_name] = changes_list
915
+
916
+ for command_name, changes_list in filtered_command_change.items():
832
917
  min_change = min(changes_list)
833
- table_output += f"{command_name},{min_change:.3f},{max_change:.3f}\n"
918
+ q1_change = np.percentile(changes_list, 25)
919
+ median_change = np.median(changes_list)
920
+ q3_change = np.percentile(changes_list, 75)
921
+ max_change = max(changes_list)
922
+ table_output += f"{command_name},{min_change:.3f},{q1_change:.3f},{median_change:.3f},{q3_change:.3f},{max_change:.3f}\n"
834
923
  table_output += "\n</details>\n"
835
924
 
836
925
  if total_unstable > 0:
@@ -954,6 +1043,8 @@ def compute_regression_table(
954
1043
  total_stable,
955
1044
  total_unstable,
956
1045
  total_comparison_points,
1046
+ boxplot_data,
1047
+ command_change,
957
1048
  )
958
1049
 
959
1050
 
@@ -1067,11 +1158,11 @@ def get_by_strings(
1067
1158
 
1068
1159
  if comparison_hash is not None:
1069
1160
  # check if we had already covered comparison
1070
- if comparison_covered:
1071
- logging.error(
1072
- "--comparison-branch, --comparison-tag, --comparison-hash, --comparison-target-branch, and --comparison-target-table are mutually exclusive. Pick one..."
1073
- )
1074
- exit(1)
1161
+ # if comparison_covered:
1162
+ # logging.error(
1163
+ # "--comparison-branch, --comparison-tag, --comparison-hash, --comparison-target-branch, and --comparison-target-table are mutually exclusive. Pick one..."
1164
+ # )
1165
+ # exit(1)
1075
1166
  comparison_covered = True
1076
1167
  by_str_comparison = "hash"
1077
1168
  comparison_str = comparison_hash
@@ -1124,6 +1215,7 @@ def from_rts_to_regression_table(
1124
1215
  regression_str="REGRESSION",
1125
1216
  improvement_str="IMPROVEMENT",
1126
1217
  tests_with_config={},
1218
+ extra_filters="",
1127
1219
  ):
1128
1220
  print_all = print_regressions_only is False and print_improvements_only is False
1129
1221
  table_full = []
@@ -1150,6 +1242,9 @@ def from_rts_to_regression_table(
1150
1242
  group_change = {}
1151
1243
  command_change = {}
1152
1244
  original_metric_mode = metric_mode
1245
+
1246
+ # Data collection for box plot
1247
+ boxplot_data = []
1153
1248
  for test_name in test_names:
1154
1249
  tested_groups = []
1155
1250
  tested_commands = []
@@ -1176,6 +1271,8 @@ def from_rts_to_regression_table(
1176
1271
  "github_repo={}".format(baseline_github_repo),
1177
1272
  "triggering_env={}".format(tf_triggering_env_baseline),
1178
1273
  ]
1274
+ if extra_filters != "":
1275
+ filters_baseline.append(extra_filters)
1179
1276
  if baseline_str != "":
1180
1277
  filters_baseline.append("{}={}".format(by_str_baseline, baseline_str))
1181
1278
  if baseline_deployment_name != "":
@@ -1200,6 +1297,8 @@ def from_rts_to_regression_table(
1200
1297
  filters_comparison.append(
1201
1298
  "deployment_name={}".format(comparison_deployment_name)
1202
1299
  )
1300
+ if extra_filters != "":
1301
+ filters_comparison.append(extra_filters)
1203
1302
  if comparison_github_org != "":
1204
1303
  filters_comparison.append(f"github_org={comparison_github_org}")
1205
1304
  if "hash" not in by_str_baseline:
@@ -1362,10 +1461,18 @@ def from_rts_to_regression_table(
1362
1461
  unstable_list.append([test_name, "n/a"])
1363
1462
 
1364
1463
  baseline_v_str = prepare_value_str(
1365
- baseline_pct_change, baseline_v, baseline_values, simplify_table
1464
+ baseline_pct_change,
1465
+ baseline_v,
1466
+ baseline_values,
1467
+ simplify_table,
1468
+ metric_name,
1366
1469
  )
1367
1470
  comparison_v_str = prepare_value_str(
1368
- comparison_pct_change, comparison_v, comparison_values, simplify_table
1471
+ comparison_pct_change,
1472
+ comparison_v,
1473
+ comparison_values,
1474
+ simplify_table,
1475
+ metric_name,
1369
1476
  )
1370
1477
 
1371
1478
  if metric_mode == "higher-better":
@@ -1377,6 +1484,9 @@ def from_rts_to_regression_table(
1377
1484
  percentage_change = (
1378
1485
  -(float(baseline_v) - float(comparison_v)) / float(baseline_v)
1379
1486
  ) * 100.0
1487
+
1488
+ # Collect data for box plot
1489
+ boxplot_data.append((test_name, percentage_change))
1380
1490
  else:
1381
1491
  logging.warn(
1382
1492
  f"Missing data for test {test_name}. baseline_v={baseline_v} (pct_change={baseline_pct_change}), comparison_v={comparison_v} (pct_change={comparison_pct_change}) "
@@ -1540,6 +1650,7 @@ def from_rts_to_regression_table(
1540
1650
  no_datapoints_list,
1541
1651
  group_change,
1542
1652
  command_change,
1653
+ boxplot_data,
1543
1654
  )
1544
1655
 
1545
1656
 
@@ -1571,13 +1682,47 @@ def check_multi_value_filter(baseline_str):
1571
1682
  return multi_value_baseline
1572
1683
 
1573
1684
 
1574
- def prepare_value_str(baseline_pct_change, baseline_v, baseline_values, simplify_table):
1575
- if baseline_v < 1.0:
1576
- baseline_v_str = " {:.2f}".format(baseline_v)
1577
- elif baseline_v < 10.0:
1578
- baseline_v_str = " {:.1f}".format(baseline_v)
1685
+ def is_latency_metric(metric_name):
1686
+ """Check if a metric represents latency and should use 3-digit precision"""
1687
+ latency_indicators = [
1688
+ "latency",
1689
+ "percentile",
1690
+ "usec",
1691
+ "msec",
1692
+ "overallQuantiles",
1693
+ "latencystats",
1694
+ "p50",
1695
+ "p95",
1696
+ "p99",
1697
+ "p999",
1698
+ ]
1699
+ metric_name_lower = metric_name.lower()
1700
+ return any(indicator in metric_name_lower for indicator in latency_indicators)
1701
+
1702
+
1703
+ def prepare_value_str(
1704
+ baseline_pct_change, baseline_v, baseline_values, simplify_table, metric_name=""
1705
+ ):
1706
+ """Prepare value string with appropriate precision based on metric type"""
1707
+ # Use 3-digit precision for latency metrics
1708
+ if is_latency_metric(metric_name):
1709
+ if baseline_v < 1.0:
1710
+ baseline_v_str = " {:.3f}".format(baseline_v)
1711
+ elif baseline_v < 10.0:
1712
+ baseline_v_str = " {:.3f}".format(baseline_v)
1713
+ elif baseline_v < 100.0:
1714
+ baseline_v_str = " {:.3f}".format(baseline_v)
1715
+ else:
1716
+ baseline_v_str = " {:.3f}".format(baseline_v)
1579
1717
  else:
1580
- baseline_v_str = " {:.0f}".format(baseline_v)
1718
+ # Original formatting for non-latency metrics
1719
+ if baseline_v < 1.0:
1720
+ baseline_v_str = " {:.2f}".format(baseline_v)
1721
+ elif baseline_v < 10.0:
1722
+ baseline_v_str = " {:.1f}".format(baseline_v)
1723
+ else:
1724
+ baseline_v_str = " {:.0f}".format(baseline_v)
1725
+
1581
1726
  stamp_b = ""
1582
1727
  if baseline_pct_change > 10.0:
1583
1728
  stamp_b = "UNSTABLE "
@@ -1620,6 +1765,444 @@ def get_test_names_from_db(rts, tags_regex_string, test_names, used_key):
1620
1765
  return test_names
1621
1766
 
1622
1767
 
1768
+ def filter_tests_by_command_regex(tests_with_config, command_regex=".*"):
1769
+ """Filter tests based on command regex matching tested-commands"""
1770
+ if command_regex == ".*":
1771
+ return tests_with_config
1772
+
1773
+ logging.info(f"Filtering tests by command regex: {command_regex}")
1774
+ command_regex_compiled = re.compile(command_regex, re.IGNORECASE)
1775
+ filtered_tests = {}
1776
+
1777
+ for test_name, test_config in tests_with_config.items():
1778
+ tested_commands = test_config.get("tested-commands", [])
1779
+
1780
+ # Check if any tested command matches the regex
1781
+ command_match = False
1782
+ for command in tested_commands:
1783
+ if re.search(command_regex_compiled, command):
1784
+ command_match = True
1785
+ logging.info(f"Including test {test_name} (matches command: {command})")
1786
+ break
1787
+
1788
+ if command_match:
1789
+ filtered_tests[test_name] = test_config
1790
+ else:
1791
+ logging.info(f"Excluding test {test_name} (commands: {tested_commands})")
1792
+
1793
+ logging.info(
1794
+ f"Command regex filtering: {len(filtered_tests)} tests remaining out of {len(tests_with_config)}"
1795
+ )
1796
+ return filtered_tests
1797
+
1798
+
1799
+ def get_test_names_from_yaml_files(test_suites_folder, tags_regex_string):
1800
+ """Get test names from YAML files in test-suites folder"""
1801
+ from redis_benchmarks_specification.__common__.runner import get_benchmark_specs
1802
+
1803
+ # Get all YAML files
1804
+ yaml_files = get_benchmark_specs(test_suites_folder, test="", test_regex=".*")
1805
+
1806
+ # Extract test names (remove path and .yml extension)
1807
+ test_names = []
1808
+ for yaml_file in yaml_files:
1809
+ test_name = os.path.basename(yaml_file).replace(".yml", "")
1810
+ # Apply regex filtering like database version
1811
+ match_obj = re.search(tags_regex_string, test_name)
1812
+ if match_obj is not None:
1813
+ test_names.append(test_name)
1814
+
1815
+ test_names.sort()
1816
+ logging.info(
1817
+ "Based on test-suites folder ({}) we have {} comparison points: {}".format(
1818
+ test_suites_folder, len(test_names), test_names
1819
+ )
1820
+ )
1821
+ return test_names
1822
+
1823
+
1824
+ def extract_command_from_test_name(test_name):
1825
+ """Extract Redis command from test name"""
1826
+ # Common patterns in test names
1827
+ test_name_lower = test_name.lower()
1828
+
1829
+ # Handle specific patterns
1830
+ if "memtier_benchmark" in test_name_lower:
1831
+ # Look for command patterns in memtier test names
1832
+ for cmd in [
1833
+ "get",
1834
+ "set",
1835
+ "hget",
1836
+ "hset",
1837
+ "hgetall",
1838
+ "hmset",
1839
+ "hmget",
1840
+ "hdel",
1841
+ "hexists",
1842
+ "hkeys",
1843
+ "hvals",
1844
+ "hincrby",
1845
+ "hincrbyfloat",
1846
+ "hsetnx",
1847
+ "hscan",
1848
+ "multi",
1849
+ "exec",
1850
+ ]:
1851
+ if cmd in test_name_lower:
1852
+ return cmd.upper()
1853
+
1854
+ # Try to extract command from test name directly
1855
+ parts = test_name.split("-")
1856
+ for part in parts:
1857
+ part_upper = part.upper()
1858
+ # Check if it looks like a Redis command
1859
+ if len(part_upper) >= 3 and part_upper.isalpha():
1860
+ return part_upper
1861
+
1862
+ return "UNKNOWN"
1863
+
1864
+
1865
+ def generate_command_performance_boxplot_from_command_data(
1866
+ command_change,
1867
+ output_filename,
1868
+ regression_str="Regression",
1869
+ improvement_str="Improvement",
1870
+ command_group_regex=".*",
1871
+ ):
1872
+ """Generate vertical box plot showing performance change distribution per command using command_change data"""
1873
+ if not MATPLOTLIB_AVAILABLE:
1874
+ logging.error("matplotlib not available, cannot generate box plot")
1875
+ return
1876
+
1877
+ try:
1878
+ if not command_change:
1879
+ logging.warning("No command data found for box plot generation")
1880
+ return
1881
+
1882
+ # Filter commands by command group regex
1883
+ if command_group_regex != ".*":
1884
+ logging.info(
1885
+ f"Filtering commands by command group regex: {command_group_regex}"
1886
+ )
1887
+ group_regex = re.compile(command_group_regex)
1888
+ filtered_command_change = {}
1889
+
1890
+ for cmd, changes in command_change.items():
1891
+ command_group = categorize_command(cmd.lower())
1892
+ if re.search(group_regex, command_group):
1893
+ filtered_command_change[cmd] = changes
1894
+ logging.info(f"Including command {cmd} (group: {command_group})")
1895
+ else:
1896
+ logging.info(f"Excluding command {cmd} (group: {command_group})")
1897
+
1898
+ command_change = filtered_command_change
1899
+
1900
+ if not command_change:
1901
+ logging.warning(
1902
+ f"No commands found matching command group regex: {command_group_regex}"
1903
+ )
1904
+ return
1905
+
1906
+ logging.info(f"After filtering: {len(command_change)} commands remaining")
1907
+
1908
+ # Sort commands by median performance change for better visualization
1909
+ commands_with_median = [
1910
+ (cmd, np.median(changes)) for cmd, changes in command_change.items()
1911
+ ]
1912
+ commands_with_median.sort(key=lambda x: x[1])
1913
+ commands = [cmd for cmd, _ in commands_with_median]
1914
+
1915
+ # Prepare data for plotting (vertical orientation)
1916
+ data_for_plot = [command_change[cmd] for cmd in commands]
1917
+
1918
+ # Create labels with test count
1919
+ labels_with_count = [
1920
+ f"{cmd}\n({len(command_change[cmd])} tests)" for cmd in commands
1921
+ ]
1922
+
1923
+ # Create the plot (vertical orientation)
1924
+ plt.figure(figsize=(10, 16))
1925
+
1926
+ # Create horizontal box plot (which makes it vertical when we rotate)
1927
+ positions = range(1, len(commands) + 1)
1928
+ box_plot = plt.boxplot(
1929
+ data_for_plot,
1930
+ positions=positions,
1931
+ patch_artist=True,
1932
+ showfliers=True,
1933
+ flierprops={"marker": "o", "markersize": 4},
1934
+ vert=False,
1935
+ ) # vert=False makes it horizontal (commands on Y-axis)
1936
+
1937
+ # Color the boxes and add value annotations
1938
+ for i, (patch, cmd) in enumerate(zip(box_plot["boxes"], commands)):
1939
+ changes = command_change[cmd]
1940
+ median_change = np.median(changes)
1941
+ min_change = min(changes)
1942
+ max_change = max(changes)
1943
+
1944
+ # Color based on median performance
1945
+ if median_change > 0:
1946
+ patch.set_facecolor("lightcoral") # Red for improvements
1947
+ patch.set_alpha(0.7)
1948
+ else:
1949
+ patch.set_facecolor("lightblue") # Blue for degradations
1950
+ patch.set_alpha(0.7)
1951
+
1952
+ # Store values for later annotation (after xlim is set)
1953
+ y_pos = i + 1 # Position corresponds to the box position
1954
+
1955
+ # Store annotation data for after xlim is set
1956
+ if not hasattr(plt, "_annotation_data"):
1957
+ plt._annotation_data = []
1958
+ plt._annotation_data.append(
1959
+ {
1960
+ "y_pos": y_pos,
1961
+ "min_change": min_change,
1962
+ "median_change": median_change,
1963
+ "max_change": max_change,
1964
+ }
1965
+ )
1966
+
1967
+ # Calculate optimal x-axis limits for maximum visibility
1968
+ all_values = []
1969
+ for changes in command_change.values():
1970
+ all_values.extend(changes)
1971
+
1972
+ if all_values:
1973
+ data_min = min(all_values)
1974
+ data_max = max(all_values)
1975
+
1976
+ logging.info(f"Box plot data range: {data_min:.3f}% to {data_max:.3f}%")
1977
+
1978
+ # Add minimal padding - tight to the data
1979
+ data_range = data_max - data_min
1980
+ if data_range == 0:
1981
+ # If all values are the same, add minimal symmetric padding
1982
+ padding = max(abs(data_min) * 0.05, 0.5) # At least 5% or 0.5
1983
+ x_min = data_min - padding
1984
+ x_max = data_max + padding
1985
+ else:
1986
+ # Add minimal padding: 2% on each side
1987
+ padding = data_range * 0.02
1988
+ x_min = data_min - padding
1989
+ x_max = data_max + padding
1990
+
1991
+ # Only include 0 if it's actually within or very close to the data range
1992
+ if data_min <= 0 <= data_max:
1993
+ # 0 is within the data range, keep current limits
1994
+ pass
1995
+ elif data_min > 0 and data_min < data_range * 0.1:
1996
+ # All positive values, but 0 is very close - include it
1997
+ x_min = 0
1998
+ elif data_max < 0 and abs(data_max) < data_range * 0.1:
1999
+ # All negative values, but 0 is very close - include it
2000
+ x_max = 0
2001
+
2002
+ plt.xlim(x_min, x_max)
2003
+ logging.info(f"Box plot x-axis limits set to: {x_min:.3f}% to {x_max:.3f}%")
2004
+
2005
+ # Add vertical line at 0% (only if 0 is visible)
2006
+ current_xlim = plt.xlim()
2007
+ if current_xlim[0] <= 0 <= current_xlim[1]:
2008
+ plt.axvline(x=0, color="black", linestyle="-", linewidth=1, alpha=0.8)
2009
+
2010
+ # Add background shading with current limits
2011
+ x_min, x_max = plt.xlim()
2012
+ if x_max > 0:
2013
+ plt.axvspan(max(0, x_min), x_max, alpha=0.1, color="red")
2014
+ if x_min < 0:
2015
+ plt.axvspan(x_min, min(0, x_max), alpha=0.1, color="blue")
2016
+
2017
+ # Add value annotations within the plot area
2018
+ if hasattr(plt, "_annotation_data"):
2019
+ x_range = x_max - x_min
2020
+ for data in plt._annotation_data:
2021
+ y_pos = data["y_pos"]
2022
+ min_change = data["min_change"]
2023
+ median_change = data["median_change"]
2024
+ max_change = data["max_change"]
2025
+
2026
+ # Position annotations inside the plot area
2027
+ # Use the actual values' positions with small offsets
2028
+ offset = x_range * 0.01 # Small offset for readability
2029
+
2030
+ # Position each annotation near its corresponding value
2031
+ plt.text(
2032
+ max_change + offset,
2033
+ y_pos + 0.15,
2034
+ f"{max_change:.1f}%",
2035
+ fontsize=7,
2036
+ va="center",
2037
+ ha="left",
2038
+ color="darkred",
2039
+ weight="bold",
2040
+ bbox=dict(
2041
+ boxstyle="round,pad=0.2",
2042
+ facecolor="white",
2043
+ alpha=0.8,
2044
+ edgecolor="none",
2045
+ ),
2046
+ )
2047
+ plt.text(
2048
+ median_change + offset,
2049
+ y_pos,
2050
+ f"{median_change:.1f}%",
2051
+ fontsize=7,
2052
+ va="center",
2053
+ ha="left",
2054
+ color="black",
2055
+ weight="bold",
2056
+ bbox=dict(
2057
+ boxstyle="round,pad=0.2",
2058
+ facecolor="yellow",
2059
+ alpha=0.8,
2060
+ edgecolor="none",
2061
+ ),
2062
+ )
2063
+ plt.text(
2064
+ min_change + offset,
2065
+ y_pos - 0.15,
2066
+ f"{min_change:.1f}%",
2067
+ fontsize=7,
2068
+ va="center",
2069
+ ha="left",
2070
+ color="darkblue",
2071
+ weight="bold",
2072
+ bbox=dict(
2073
+ boxstyle="round,pad=0.2",
2074
+ facecolor="white",
2075
+ alpha=0.8,
2076
+ edgecolor="none",
2077
+ ),
2078
+ )
2079
+
2080
+ # Clean up the temporary data
2081
+ delattr(plt, "_annotation_data")
2082
+
2083
+ # Set Y-axis labels (commands)
2084
+ plt.yticks(positions, labels_with_count, fontsize=10)
2085
+
2086
+ # Customize the plot
2087
+ title = f"Performance Change Distribution by Redis Command\nRedis is better ← | → Valkey is better"
2088
+ plt.title(title, fontsize=14, fontweight="bold", pad=20)
2089
+ plt.xlabel("Performance Change (%)", fontsize=12)
2090
+ plt.ylabel("Redis Commands", fontsize=12)
2091
+ plt.grid(True, alpha=0.3, axis="x")
2092
+
2093
+ # Add legend for box colors (at the bottom)
2094
+ from matplotlib.patches import Patch
2095
+
2096
+ legend_elements = [
2097
+ Patch(
2098
+ facecolor="lightcoral", alpha=0.7, label="Positive % = Valkey is better"
2099
+ ),
2100
+ Patch(
2101
+ facecolor="lightblue", alpha=0.7, label="Negative % = Redis is better"
2102
+ ),
2103
+ ]
2104
+ plt.legend(
2105
+ handles=legend_elements,
2106
+ bbox_to_anchor=(0.5, -0.05),
2107
+ loc="upper center",
2108
+ fontsize=10,
2109
+ ncol=2,
2110
+ )
2111
+
2112
+ # Add statistics text
2113
+ total_commands = len(command_change)
2114
+ total_measurements = sum(len(changes) for changes in command_change.values())
2115
+ plt.figtext(
2116
+ 0.02,
2117
+ 0.02,
2118
+ f"Commands: {total_commands} | Total measurements: {total_measurements}",
2119
+ fontsize=10,
2120
+ style="italic",
2121
+ )
2122
+
2123
+ # Adjust layout and save
2124
+ plt.tight_layout()
2125
+ plt.savefig(output_filename, dpi=300, bbox_inches="tight")
2126
+ plt.close()
2127
+
2128
+ logging.info(f"Box plot saved to {output_filename}")
2129
+
2130
+ # Print summary statistics
2131
+ logging.info("Command performance summary:")
2132
+ for cmd in commands:
2133
+ changes = command_change[cmd]
2134
+ min_change = min(changes)
2135
+ max_change = max(changes)
2136
+ median_change = np.median(changes)
2137
+ q1_change = np.percentile(changes, 25)
2138
+ q3_change = np.percentile(changes, 75)
2139
+ logging.info(
2140
+ f" {cmd}: min={min_change:.3f}%, max={max_change:.3f}%, median={median_change:.3f}% ({len(changes)} measurements)"
2141
+ )
2142
+
2143
+ # Print quartile summary for boxplot readiness
2144
+ logging.info("Command performance quartile summary (boxplot ready):")
2145
+ for cmd in commands:
2146
+ changes = command_change[cmd]
2147
+ min_change = min(changes)
2148
+ q1_change = np.percentile(changes, 25)
2149
+ median_change = np.median(changes)
2150
+ q3_change = np.percentile(changes, 75)
2151
+ max_change = max(changes)
2152
+ logging.info(
2153
+ f" {cmd}: min={min_change:.3f}%, Q1={q1_change:.3f}%, median={median_change:.3f}%, Q3={q3_change:.3f}%, max={max_change:.3f}%"
2154
+ )
2155
+
2156
+ except Exception as e:
2157
+ logging.error(f"Error generating box plot: {e}")
2158
+ import traceback
2159
+
2160
+ traceback.print_exc()
2161
+
2162
+
2163
+ def generate_command_performance_boxplot(comparison_data, output_filename):
2164
+ """Generate box plot showing performance change distribution per command"""
2165
+ if not MATPLOTLIB_AVAILABLE:
2166
+ logging.error("matplotlib not available, cannot generate box plot")
2167
+ return
2168
+
2169
+ try:
2170
+ # Group data by command
2171
+ command_data = {}
2172
+
2173
+ for test_name, pct_change in comparison_data:
2174
+ command = extract_command_from_test_name(test_name)
2175
+ if command not in command_data:
2176
+ command_data[command] = []
2177
+ command_data[command].append(pct_change)
2178
+
2179
+ if not command_data:
2180
+ logging.warning("No command data found for box plot generation")
2181
+ return
2182
+
2183
+ # Filter out commands with insufficient data
2184
+ filtered_command_data = {
2185
+ cmd: changes
2186
+ for cmd, changes in command_data.items()
2187
+ if len(changes) >= 1 and cmd != "UNKNOWN"
2188
+ }
2189
+
2190
+ if not filtered_command_data:
2191
+ logging.warning("No valid command data found for box plot generation")
2192
+ return
2193
+
2194
+ # Use the new function with the filtered data
2195
+ generate_command_performance_boxplot_from_command_data(
2196
+ filtered_command_data, output_filename, command_group_regex=".*"
2197
+ )
2198
+
2199
+ except Exception as e:
2200
+ logging.error(f"Error generating box plot: {e}")
2201
+ import traceback
2202
+
2203
+ traceback.print_exc()
2204
+
2205
+
1623
2206
  def get_line(
1624
2207
  baseline_v_str,
1625
2208
  comparison_v_str,
@@ -83,6 +83,12 @@ def create_client_runner_args(project_name):
83
83
  default=".*",
84
84
  help="Interpret PATTERN as a regular expression to filter test names",
85
85
  )
86
+ parser.add_argument(
87
+ "--commands-regex",
88
+ type=str,
89
+ default=".*",
90
+ help="Filter tests by command using regex. Only tests that include commands matching this regex will be processed (e.g., 'bitcount|bitpos').",
91
+ )
86
92
  parser.add_argument("--db_server_host", type=str, default="localhost")
87
93
  parser.add_argument("--db_server_password", type=str, default=None)
88
94
  parser.add_argument("--db_server_port", type=int, default=6379)
@@ -1390,6 +1390,31 @@ def process_self_contained_coordinator_stream(
1390
1390
  logging.info(f"Exit requested by user. Skipping test {test_name}.")
1391
1391
  break
1392
1392
 
1393
+ # Filter by command regex if specified
1394
+ if hasattr(args, "commands_regex") and args.commands_regex != ".*":
1395
+ if "tested-commands" in benchmark_config:
1396
+ tested_commands = benchmark_config["tested-commands"]
1397
+ command_regex_compiled = re.compile(
1398
+ args.commands_regex, re.IGNORECASE
1399
+ )
1400
+ command_match = False
1401
+ for command in tested_commands:
1402
+ if re.search(command_regex_compiled, command):
1403
+ command_match = True
1404
+ logging.info(
1405
+ f"Including test {test_name} (matches command: {command})"
1406
+ )
1407
+ break
1408
+ if not command_match:
1409
+ logging.info(
1410
+ f"Skipping test {test_name} (commands: {tested_commands} do not match regex: {args.commands_regex})"
1411
+ )
1412
+ continue
1413
+ else:
1414
+ logging.warning(
1415
+ f"Test {test_name} does not contain 'tested-commands' property. Cannot filter by commands."
1416
+ )
1417
+
1393
1418
  if tls_enabled:
1394
1419
  test_name = test_name + "-tls"
1395
1420
  logging.info(
@@ -674,6 +674,13 @@ def process_self_contained_coordinator_stream(
674
674
  f"detected a command groups regexp definition on the streamdata {command_groups_regexp}"
675
675
  )
676
676
 
677
+ command_regexp = None
678
+ if b"command_regexp" in testDetails:
679
+ command_regexp = testDetails[b"command_regexp"].decode()
680
+ logging.info(
681
+ f"detected a command regexp definition on the streamdata {command_regexp}"
682
+ )
683
+
677
684
  skip_test = False
678
685
  if b"platform" in testDetails:
679
686
  platform = testDetails[b"platform"]
@@ -729,6 +736,7 @@ def process_self_contained_coordinator_stream(
729
736
  tests_regexp,
730
737
  testsuite_spec_files,
731
738
  command_groups_regexp,
739
+ command_regexp,
732
740
  )
733
741
 
734
742
  for test_file in filtered_test_files:
@@ -1657,6 +1665,7 @@ def filter_test_files(
1657
1665
  tests_regexp,
1658
1666
  testsuite_spec_files,
1659
1667
  command_groups_regexp=None,
1668
+ command_regexp=None,
1660
1669
  ):
1661
1670
  filtered_test_files = []
1662
1671
  for test_file in testsuite_spec_files:
@@ -1720,6 +1729,29 @@ def filter_test_files(
1720
1729
  f"The file {test_file} (test name = {test_name}) does not contain the property 'tested-groups'. Cannot filter based uppon groups..."
1721
1730
  )
1722
1731
 
1732
+ # Filter by command regex if specified
1733
+ if command_regexp is not None and command_regexp != ".*":
1734
+ if "tested-commands" in benchmark_config:
1735
+ tested_commands = benchmark_config["tested-commands"]
1736
+ command_regex_compiled = re.compile(command_regexp, re.IGNORECASE)
1737
+ found = False
1738
+ for command in tested_commands:
1739
+ if re.search(command_regex_compiled, command):
1740
+ found = True
1741
+ logging.info(
1742
+ f"found the command {command} matching regex {command_regexp}"
1743
+ )
1744
+ break
1745
+ if found is False:
1746
+ logging.info(
1747
+ f"Skipping {test_file} given the following commands: {tested_commands} does not match command regex {command_regexp}"
1748
+ )
1749
+ continue
1750
+ else:
1751
+ logging.warning(
1752
+ f"The file {test_file} (test name = {test_name}) does not contain the property 'tested-commands'. Cannot filter based upon commands..."
1753
+ )
1754
+
1723
1755
  if "priority" in benchmark_config:
1724
1756
  priority = benchmark_config["priority"]
1725
1757
 
@@ -0,0 +1,75 @@
1
+ version: 0.4
2
+ name: memtier_benchmark-1Kkeys-hash-listpack-500-fields-update-20-fields-with-1B-to-64B-values
3
+ description: |
4
+ Runs memtier_benchmark to measure update performance on large Redis hashes stored as
5
+ listpacks. The dataset is preloaded with 1,000 keys (`test_hash:<id>`), each containing
6
+ 500 field–value pairs, where values are small strings ranging from 1 to 64 bytes.
7
+
8
+ The benchmark focuses on multi-field `HSET` updates, where each operation updates 20
9
+ fields in the same hash. This workload stresses Redis's listpack encoding for hashes,
10
+ particularly when performing batched updates inside already large hashes.
11
+
12
+ Since each key already contains 500 fields, the test highlights the cost of inserting
13
+ into dense listpacks, memory reallocation behavior, and the effectiveness of Redis's
14
+ multi-element insertion optimizations
15
+
16
+ dbconfig:
17
+ configuration-parameters:
18
+ save: '""'
19
+ resources:
20
+ requests:
21
+ memory: 1g
22
+ init_lua: |
23
+ local total_keys = 1000
24
+ local total_fields = 500
25
+ local batch_size = 100 -- number of arguments per HSET call (field + value count)
26
+ for k = 1, total_keys do
27
+ local key = "test_hash:" .. k
28
+ redis.call("DEL", key)
29
+ local field_num = 1
30
+ while field_num <= total_fields do
31
+ local args = {key}
32
+ for j = 1, batch_size, 2 do
33
+ if field_num > total_fields then break end
34
+ table.insert(args, "f" .. field_num)
35
+ table.insert(args, "v" .. field_num)
36
+ field_num = field_num + 1
37
+ end
38
+ redis.call("HSET", unpack(args))
39
+ end
40
+ end
41
+ return "OK"
42
+
43
+
44
+ tested-groups:
45
+ - hash
46
+
47
+ tested-commands:
48
+ - hset
49
+
50
+ redis-topologies:
51
+ - oss-standalone
52
+
53
+ build-variants:
54
+ - gcc:15.2.0-amd64-debian-bookworm-default
55
+ - gcc:15.2.0-arm64-debian-bookworm-default
56
+ - dockerhub
57
+
58
+ clientconfig:
59
+ run_image: redislabs/memtier_benchmark:edge
60
+ tool: memtier_benchmark
61
+ arguments: >
62
+ --key-prefix "test_hash:"
63
+ --key-minimum 1
64
+ --key-maximum 1000
65
+ --data-size-range=1-64
66
+ --pipeline=1
67
+ --test-time=120
68
+ --command='HSET __key__ HSET test_hash f1 __data__ f2 __data__ f3 __data__ f4 __data__ f5 __data__ f6 __data__ f7 __data__ f8 __data__ f9 __data__ f10 __data__ f11 __data__ f12 __data__ f13 __data__ f14 __data__ f15 __data__ f16 __data__ f17 __data__ f18 __data__ f19 __data__ f20 __data__'
69
+ --hide-histogram
70
+ resources:
71
+ requests:
72
+ cpus: '4'
73
+ memory: 2g
74
+
75
+ priority: 150
@@ -0,0 +1,40 @@
1
+ version: 0.4
2
+ name: memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10
3
+ description: Runs memtier_benchmark, for a keyspace of 1M keys with 10% SETs and 90%
4
+ GETs (mixed) with a data size of 1000 Bytes and pipeline 10.
5
+ dbconfig:
6
+ configuration-parameters:
7
+ save: '""'
8
+ check:
9
+ keyspacelen: 1000000
10
+ preload_tool:
11
+ run_image: redislabs/memtier_benchmark:edge
12
+ tool: memtier_benchmark
13
+ arguments: --data-size 1000 --pipeline 50 -n allkeys --ratio 1:0 --key-pattern
14
+ P:P -c 1 -t 4 --hide-histogram --key-minimum 1 --key-maximum 1000000
15
+ resources:
16
+ requests:
17
+ memory: 1g
18
+ dataset_name: 1Mkeys-string-1KiB-size
19
+ dataset_description: This dataset contains 1 million string keys, each with a data
20
+ size of 1 KiB.
21
+ tested-commands:
22
+ - set
23
+ - get
24
+ tested-groups:
25
+ - string
26
+ redis-topologies:
27
+ - oss-standalone
28
+ build-variants:
29
+ - gcc:15.2.0-amd64-debian-bookworm-default
30
+ - gcc:15.2.0-arm64-debian-bookworm-default
31
+ - dockerhub
32
+ clientconfig:
33
+ run_image: redislabs/memtier_benchmark:edge
34
+ tool: memtier_benchmark
35
+ arguments: '"--data-size" "1000" --ratio 1:10 --key-pattern R:R --key-minimum=1 --key-maximum 1000000 --test-time 180 -c 50 -t 4 --hide-histogram --pipeline 10'
36
+ resources:
37
+ requests:
38
+ cpus: '4'
39
+ memory: 2g
40
+ priority: 1
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis-benchmarks-specification
3
- Version: 0.1.323
3
+ Version: 0.1.325
4
4
  Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com
@@ -26,6 +26,7 @@ Requires-Dist: pandas (>=2.1.2,<3.0.0)
26
26
  Requires-Dist: psutil (>=5.9.4,<6.0.0)
27
27
  Requires-Dist: redis (>=4.2.0,<5.0.0)
28
28
  Requires-Dist: redisbench-admin (>=0.11.1,<0.12.0)
29
+ Requires-Dist: seaborn (>=0.13.2,<0.14.0)
29
30
  Requires-Dist: semver (>=2.13.0,<3.0.0)
30
31
  Requires-Dist: typed-ast (>=1.5.0,<2.0.0)
31
32
  Description-Content-Type: text/markdown
@@ -4,10 +4,10 @@ redis_benchmarks_specification/__api__/api.py,sha256=k_CMICtMm1z8jY3hByaL0hIr_5v
4
4
  redis_benchmarks_specification/__api__/app.py,sha256=JzQm84DjIVdfLbDO423BJbrds6gFzMbA0syRkHE_aUU,7063
5
5
  redis_benchmarks_specification/__builder__/Readme.md,sha256=O6MV_J3OSgzW-ir2TbukP8Vhkm_LOzQJJndG1Cykqic,111
6
6
  redis_benchmarks_specification/__builder__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
7
- redis_benchmarks_specification/__builder__/builder.py,sha256=86DQuqf9LhPl1_bpmQK2rkACBxYBz13Wu8fsAnKkm7g,29730
7
+ redis_benchmarks_specification/__builder__/builder.py,sha256=lAoEQ8ab9AWstYcpF2hoixZ_HFmMKf9Icwzc0WV0t_I,29867
8
8
  redis_benchmarks_specification/__builder__/schema.py,sha256=1wcmyVJBcWrBvK58pghN9NCoWLCO3BzPsmdKWYfkVog,584
9
9
  redis_benchmarks_specification/__cli__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
10
- redis_benchmarks_specification/__cli__/args.py,sha256=C0EdJbq5F6Td6kvEkzN5ZWMhWYuizV_tGzVhkPLKEi0,7207
10
+ redis_benchmarks_specification/__cli__/args.py,sha256=X7VlHJvX3n85ZPUQFoovmaFDnY4t7irUrDLf07QAfaA,7430
11
11
  redis_benchmarks_specification/__cli__/cli.py,sha256=iTjINQ-RV_q2ovq1neSoRCAggpGdeP5mX3_1aFxSScY,22001
12
12
  redis_benchmarks_specification/__cli__/stats.py,sha256=r9JIfwGCSR3maozYbDZfZrkthNFQSs0xIymS86yZ6Iw,55574
13
13
  redis_benchmarks_specification/__common__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -19,13 +19,13 @@ redis_benchmarks_specification/__common__/runner.py,sha256=M-o1QZVlp3thFW-55PiaW
19
19
  redis_benchmarks_specification/__common__/spec.py,sha256=D_SN48wg6NMthW_-OS1H5bydSDiuZpfd4WPPj7Vfwmc,5760
20
20
  redis_benchmarks_specification/__common__/timeseries.py,sha256=uvS3T2zdrSmW_B2S0MYTekJfHUllqU3RlD0LrF957RQ,52904
21
21
  redis_benchmarks_specification/__compare__/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
22
- redis_benchmarks_specification/__compare__/args.py,sha256=f3ZSs8GzyIzaMzX2h9cx0nOrnlO4aXToO1SBzBlpzKM,7608
23
- redis_benchmarks_specification/__compare__/compare.py,sha256=OrpCpY66rlbP5so6aYCdSF9Sy3sdhKrnzVJK1u3XQno,62912
22
+ redis_benchmarks_specification/__compare__/args.py,sha256=CNtA7pI9CJDTBJPGL2pNVfis7VDdxLautwRyka7oUCI,8911
23
+ redis_benchmarks_specification/__compare__/compare.py,sha256=_AbuV3FZxtUZIdq4qq24LNzPNIdtQQaqrk8bUjn9blk,84327
24
24
  redis_benchmarks_specification/__init__.py,sha256=YQIEx2sLPPA0JR9OuCuMNMNtm-f_gqDKgzvNJnkGNKY,491
25
25
  redis_benchmarks_specification/__runner__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
26
- redis_benchmarks_specification/__runner__/args.py,sha256=-el2RttOjjc4Y9yOM1P5y9BwIkBPp_Y1k7OsP91P2BI,10651
26
+ redis_benchmarks_specification/__runner__/args.py,sha256=ZzebmTY5JushL-xKoDTkouj5N7um6KLk1aELSPCejcQ,10901
27
27
  redis_benchmarks_specification/__runner__/remote_profiling.py,sha256=R7obNQju8mmY9oKkcndjI4aAuxi84OCLhDSqqaYu1SU,18610
28
- redis_benchmarks_specification/__runner__/runner.py,sha256=13iPDfyg_ivZyiS44ypbe_qViaJGOwp0Baj7kh3UtqE,144861
28
+ redis_benchmarks_specification/__runner__/runner.py,sha256=ou7vWRBi1xdZtM1pLu_JEtZYrZ_yEaGrotXC087LhkU,146139
29
29
  redis_benchmarks_specification/__self_contained_coordinator__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
30
30
  redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=uxBjdQ78klvsVi6lOfGYQVaWIxc8OI-DwYKY16SgvCY,5952
31
31
  redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha256=OVHqJzDgeSSRfUSiKp1ZTAVv14PvSbk-5yJsAAoUfpw,936
@@ -36,7 +36,7 @@ redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=0
36
36
  redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py,sha256=sVLKNnWdAqYY9DjVdqRC5tDaIrVSaI3Ca7w8-DQ-LRM,776
37
37
  redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=1UeFr2T1ZQBcHCSd4W1ZtaWgXyFPfjLyDi_DgDc1eTA,2957
38
38
  redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=noRHn9leTfEm2fa1yHBHQd8TUGhFDoU86QQkHABnWSs,30073
39
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=QcQwPWvhS5C96NNkZFzi0xwMzwLzl6kxYmS1sWTU7s0,82532
39
+ redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=UXGDzNGOMkHNTo21_ov4U-3PnRFkwgfQ39_TPNO0hSg,84156
40
40
  redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
42
42
  redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
@@ -66,6 +66,7 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5
66
66
  redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values-pipeline-10.yml,sha256=Uw54Of2rgpVleRVdKegMddfABYzFJHjNIhkqogfTBXU,1000
67
67
  redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-10B-values.yml,sha256=B-_5Jf3rD8emmZRoKiwxmwgucANpCw4uuts4u8GxoQ0,966
68
68
  redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-string-get-10B-pipeline-100-nokeyprefix.yml,sha256=r2TcEUPqry9UTtRgB5Oevfh6uPJunM52IDLCTCNwD6M,1268
69
+ redis_benchmarks_specification/test-suites/memtier_benchmark-1Kkeys-hash-listpack-500-fields-update-20-fields-with-1B-to-64B-values.yml,sha256=MNvXg-CTghbaGAoHmJZbR8BPL2S67bAkHSyR-KyHsHM,2434
69
70
  redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml,sha256=gGYr-olsucZArDZICL8XMfauUoAJMDxEu97KS2VV6YI,1564
70
71
  redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-expire-use-case.yml,sha256=z31tWYEtOvsuwgY1ZJTGb5oJ_LRel6LjjtiyahTDayU,1582
71
72
  redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-10B-psetex-expire-use-case.yml,sha256=EnIwwMR82wEndvtWBPKZbW2Volk1k7fBbcpThcTeR98,1357
@@ -161,6 +162,7 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed
161
162
  redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-mixed-50-50-set-get-with-expiration-240B-400_conns.yml,sha256=1YIVXSLRytR9-QIayu6jCxnFd1KJlY8o0rwJYT28Hx8,1549
162
163
  redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-set-with-ex-100B-pipeline-10.yml,sha256=5C2S9LpQDH-_IpjWwYH9tCnK0jvm9pZdlnyGmJMA9gc,1300
163
164
  redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setex-100B-pipeline-10.yml,sha256=NE5oujnTwuHINrlvHBjMEFSKY_iwKimAvq4twnYfmXI,1297
165
+ redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setget200c-1KiB-pipeline-10.yml,sha256=VWLwsiTixI1WQEF1KJqPvqL0uTUUHOsi8wZ7GONn4HI,1275
164
166
  redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B-pipeline-10.yml,sha256=h6haEl469vZSnp9LTB3wds12EwGfyNSEm1iqXxh72s8,1329
165
167
  redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-string-setrange-100B.yml,sha256=Wg8joxF6TuCOyQbC4CpW8LTx49PpIAKvNhtwqJpX95k,1303
166
168
  redis_benchmarks_specification/test-suites/memtier_benchmark-1key-100M-bits-bitmap-bitcount.yml,sha256=I4fNkW8P-tVpkkfep8gcPeQ6BgPlZnG4EAytvtxU1hU,1498
@@ -279,8 +281,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-st
279
281
  redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml,sha256=2egtIxPxCze2jlbAfgsk4v9JSQHNMoPLbDWFEW8olDg,7006
280
282
  redis_benchmarks_specification/test-suites/template.txt,sha256=ezqGiRPOvuSDO0iG7GEf-AGXNfHbgXI89_G0RUEzL88,481
281
283
  redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
282
- redis_benchmarks_specification-0.1.323.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
283
- redis_benchmarks_specification-0.1.323.dist-info/METADATA,sha256=KIfE50uAlLrrLWpEoihY9p6EoCqobK4Txg7ZfUSTcHE,22726
284
- redis_benchmarks_specification-0.1.323.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
285
- redis_benchmarks_specification-0.1.323.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
286
- redis_benchmarks_specification-0.1.323.dist-info/RECORD,,
284
+ redis_benchmarks_specification-0.1.325.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
285
+ redis_benchmarks_specification-0.1.325.dist-info/METADATA,sha256=51YuPnuon83Xv4mkQnZ5FT85YhIGiiNtCbohIs5ghpI,22768
286
+ redis_benchmarks_specification-0.1.325.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
287
+ redis_benchmarks_specification-0.1.325.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
288
+ redis_benchmarks_specification-0.1.325.dist-info/RECORD,,