redis-benchmarks-specification 0.1.308__py3-none-any.whl → 0.1.310__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of redis-benchmarks-specification might be problematic. Click here for more details.
- redis_benchmarks_specification/__cli__/stats.py +20 -11
- redis_benchmarks_specification/__common__/runner.py +13 -9
- redis_benchmarks_specification/__common__/timeseries.py +3 -3
- redis_benchmarks_specification/__compare__/args.py +1 -3
- redis_benchmarks_specification/__compare__/compare.py +24 -7
- redis_benchmarks_specification/__runner__/args.py +3 -3
- redis_benchmarks_specification/__runner__/remote_profiling.py +3 -1
- redis_benchmarks_specification/__runner__/runner.py +305 -116
- redis_benchmarks_specification/__self_contained_coordinator__/clients.py +10 -0
- redis_benchmarks_specification/__self_contained_coordinator__/docker.py +15 -2
- redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py +2 -0
- redis_benchmarks_specification/__self_contained_coordinator__/runners.py +11 -2
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +12 -4
- redis_benchmarks_specification/test-suites/memtier_benchmark-1Mkeys-100B-expire-use-case.yml +2 -2
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-100k-sessions.yml +3 -2
- redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml +3 -2
- {redis_benchmarks_specification-0.1.308.dist-info → redis_benchmarks_specification-0.1.310.dist-info}/METADATA +3 -2
- {redis_benchmarks_specification-0.1.308.dist-info → redis_benchmarks_specification-0.1.310.dist-info}/RECORD +21 -21
- {redis_benchmarks_specification-0.1.308.dist-info → redis_benchmarks_specification-0.1.310.dist-info}/WHEEL +1 -1
- {redis_benchmarks_specification-0.1.308.dist-info → redis_benchmarks_specification-0.1.310.dist-info}/LICENSE +0 -0
- {redis_benchmarks_specification-0.1.308.dist-info → redis_benchmarks_specification-0.1.310.dist-info}/entry_points.txt +0 -0
|
@@ -223,6 +223,9 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
223
223
|
command = command.replace("'", "")
|
|
224
224
|
if "-key-pattern" in command:
|
|
225
225
|
continue
|
|
226
|
+
# Skip command-ratio and other memtier arguments that start with -
|
|
227
|
+
if command.startswith("-"):
|
|
228
|
+
continue
|
|
226
229
|
command = command.lower()
|
|
227
230
|
if command not in tested_commands:
|
|
228
231
|
tested_commands.append(command)
|
|
@@ -238,14 +241,16 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
238
241
|
if command not in tracked_commands_json:
|
|
239
242
|
tracked_commands_json[command] = command_json
|
|
240
243
|
|
|
241
|
-
|
|
242
|
-
if group
|
|
244
|
+
# Only process if command_json has group information
|
|
245
|
+
if "group" in command_json:
|
|
246
|
+
group = command_json["group"]
|
|
247
|
+
if group not in tested_groups:
|
|
243
248
|
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
+
tested_groups.append(group)
|
|
250
|
+
if group not in tracked_groups:
|
|
251
|
+
tracked_groups.append(group)
|
|
252
|
+
tracked_groups_hist[group] = 0
|
|
253
|
+
tracked_groups_hist[group] = tracked_groups_hist[group] + 1
|
|
249
254
|
|
|
250
255
|
# Calculate total connections
|
|
251
256
|
total_connections = clients * threads
|
|
@@ -262,12 +267,14 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
262
267
|
data_sizes[data_size] = 0
|
|
263
268
|
data_sizes[data_size] = data_sizes[data_size] + 1
|
|
264
269
|
|
|
265
|
-
if tested_commands != origin_tested_commands:
|
|
270
|
+
if sorted(tested_commands) != sorted(origin_tested_commands):
|
|
266
271
|
requires_override = True
|
|
267
272
|
benchmark_config["tested-commands"] = tested_commands
|
|
268
273
|
logging.warn(
|
|
269
274
|
"there is a difference between specified test-commands in the yaml (name={}) and the ones we've detected {}!={}".format(
|
|
270
|
-
test_name,
|
|
275
|
+
test_name,
|
|
276
|
+
sorted(origin_tested_commands),
|
|
277
|
+
sorted(tested_commands),
|
|
271
278
|
)
|
|
272
279
|
)
|
|
273
280
|
|
|
@@ -323,12 +330,14 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
323
330
|
)
|
|
324
331
|
)
|
|
325
332
|
|
|
326
|
-
if tested_groups != origin_tested_groups:
|
|
333
|
+
if sorted(tested_groups) != sorted(origin_tested_groups):
|
|
327
334
|
tested_groups_match_origin = False
|
|
328
335
|
benchmark_config["tested-groups"] = tested_groups
|
|
329
336
|
logging.warn(
|
|
330
337
|
"there is a difference between specified test-groups in the yaml (name={}) and the ones we've detected {}!={}".format(
|
|
331
|
-
test_name,
|
|
338
|
+
test_name,
|
|
339
|
+
sorted(origin_tested_groups),
|
|
340
|
+
sorted(tested_groups),
|
|
332
341
|
)
|
|
333
342
|
)
|
|
334
343
|
|
|
@@ -67,7 +67,9 @@ def execute_init_commands(benchmark_config, r, dbconfig_keyname="dbconfig"):
|
|
|
67
67
|
|
|
68
68
|
for lua_script in lua_scripts:
|
|
69
69
|
try:
|
|
70
|
-
logging.info(
|
|
70
|
+
logging.info(
|
|
71
|
+
"Executing Lua script (length: {} chars)".format(len(lua_script))
|
|
72
|
+
)
|
|
71
73
|
# Execute the Lua script using EVAL command with 0 keys
|
|
72
74
|
stdout = r.execute_command("EVAL", lua_script, 0)
|
|
73
75
|
logging.info("Lua script result: {}".format(stdout))
|
|
@@ -137,7 +139,6 @@ def extract_testsuites(args):
|
|
|
137
139
|
return testsuite_spec_files
|
|
138
140
|
|
|
139
141
|
|
|
140
|
-
|
|
141
142
|
def commandstats_latencystats_process_name(
|
|
142
143
|
metric_name, prefix, setup_name, variant_labels_dict
|
|
143
144
|
):
|
|
@@ -235,6 +236,7 @@ def collect_redis_metrics(
|
|
|
235
236
|
|
|
236
237
|
return start_time_ms, res, kv_overall
|
|
237
238
|
|
|
239
|
+
|
|
238
240
|
def export_redis_metrics(
|
|
239
241
|
artifact_version,
|
|
240
242
|
end_time_ms,
|
|
@@ -274,9 +276,9 @@ def export_redis_metrics(
|
|
|
274
276
|
"branch": tf_github_branch
|
|
275
277
|
}
|
|
276
278
|
if git_hash is not None and git_hash != "":
|
|
277
|
-
by_variants["by.hash/{}".format(git_hash)] = {
|
|
278
|
-
|
|
279
|
-
|
|
279
|
+
by_variants["by.hash/{}".format(git_hash)] = {"hash": git_hash}
|
|
280
|
+
if artifact_version is not None and artifact_version != "":
|
|
281
|
+
by_variants["by.hash/{}".format(git_hash)]["version"] = artifact_version
|
|
280
282
|
if artifact_version is not None and artifact_version != "":
|
|
281
283
|
by_variants["by.version/{}".format(artifact_version)] = {
|
|
282
284
|
"version": artifact_version
|
|
@@ -339,8 +341,6 @@ def export_redis_metrics(
|
|
|
339
341
|
return datapoint_errors, datapoint_inserts
|
|
340
342
|
|
|
341
343
|
|
|
342
|
-
|
|
343
|
-
|
|
344
344
|
def reset_commandstats(redis_conns):
|
|
345
345
|
for pos, redis_conn in enumerate(redis_conns):
|
|
346
346
|
logging.info("Resetting commmandstats for shard {}".format(pos))
|
|
@@ -468,7 +468,9 @@ def exporter_datasink_common(
|
|
|
468
468
|
|
|
469
469
|
# Update deployment tracking sets
|
|
470
470
|
deployment_type_and_name = f"{setup_type}_AND_{setup_name}"
|
|
471
|
-
deployment_type_and_name_and_version =
|
|
471
|
+
deployment_type_and_name_and_version = (
|
|
472
|
+
f"{setup_type}_AND_{setup_name}_AND_{git_version}"
|
|
473
|
+
)
|
|
472
474
|
|
|
473
475
|
# Add to deployment-specific set (only if datasink connection is available)
|
|
474
476
|
if datasink_conn is not None:
|
|
@@ -483,4 +485,6 @@ def exporter_datasink_common(
|
|
|
483
485
|
|
|
484
486
|
# Add metadata fields to timeseries metadata
|
|
485
487
|
metadata["deployment_type_AND_deployment_name"] = deployment_type_and_name
|
|
486
|
-
metadata["deployment_type_AND_deployment_name_AND_version"] =
|
|
488
|
+
metadata["deployment_type_AND_deployment_name_AND_version"] = (
|
|
489
|
+
deployment_type_and_name_and_version
|
|
490
|
+
)
|
|
@@ -76,7 +76,7 @@ def get_ts_metric_name(
|
|
|
76
76
|
else:
|
|
77
77
|
deployment_name = ""
|
|
78
78
|
ts_name = (
|
|
79
|
-
"ci.benchmarks.
|
|
79
|
+
"ci.benchmarks.redis/{by}/"
|
|
80
80
|
"{triggering_env}/{github_org}/{github_repo}/"
|
|
81
81
|
"{test_name}/{build_variant_str}{running_platform_str}{deployment_type}{deployment_name}/{by_value}/{metric}".format(
|
|
82
82
|
by=by,
|
|
@@ -323,7 +323,7 @@ def from_metric_kv_to_timeserie(
|
|
|
323
323
|
}
|
|
324
324
|
|
|
325
325
|
original_ts_name = ts_name
|
|
326
|
-
target_table_keyname = "target_tables:{triggering_env}:ci.benchmarks.
|
|
326
|
+
target_table_keyname = "target_tables:{triggering_env}:ci.benchmarks.redis/{break_by_key}/{break_by_str}/{tf_github_org}/{tf_github_repo}/{deployment_type}/{deployment_name}/{test_name}/{metric_name}".format(
|
|
327
327
|
triggering_env=tf_triggering_env,
|
|
328
328
|
break_by_key=break_by_key,
|
|
329
329
|
break_by_str=break_by_str,
|
|
@@ -679,7 +679,7 @@ def get_overall_dashboard_keynames(
|
|
|
679
679
|
if running_platform is not None:
|
|
680
680
|
running_platform_str = "/{}".format(running_platform)
|
|
681
681
|
sprefix = (
|
|
682
|
-
"ci.benchmarks.
|
|
682
|
+
"ci.benchmarks.redis/"
|
|
683
683
|
+ "{triggering_env}/{github_org}/{github_repo}".format(
|
|
684
684
|
triggering_env=tf_triggering_env,
|
|
685
685
|
github_org=tf_github_org,
|
|
@@ -76,9 +76,7 @@ def create_compare_arguments(parser):
|
|
|
76
76
|
parser.add_argument("--baseline_deployment_name", type=str, default="")
|
|
77
77
|
parser.add_argument("--comparison_deployment_name", type=str, default="")
|
|
78
78
|
parser.add_argument("--metric_name", type=str, default="ALL_STATS.Totals.Ops/sec")
|
|
79
|
-
parser.add_argument(
|
|
80
|
-
"--running_platform", type=str, default="intel64-ubuntu22.04-redis-icx1"
|
|
81
|
-
)
|
|
79
|
+
parser.add_argument("--running_platform", type=str, default=None)
|
|
82
80
|
parser.add_argument(
|
|
83
81
|
"--running_platform_baseline",
|
|
84
82
|
type=str,
|
|
@@ -59,7 +59,7 @@ def get_overall_dashboard_keynames(
|
|
|
59
59
|
if running_platform is not None:
|
|
60
60
|
running_platform_str = "/{}".format(running_platform)
|
|
61
61
|
sprefix = (
|
|
62
|
-
"ci.benchmarks.
|
|
62
|
+
"ci.benchmarks.redis/"
|
|
63
63
|
+ "{triggering_env}/{github_org}/{github_repo}".format(
|
|
64
64
|
triggering_env=tf_triggering_env,
|
|
65
65
|
github_org=tf_github_org,
|
|
@@ -128,7 +128,7 @@ def get_start_time_vars(start_time=None):
|
|
|
128
128
|
|
|
129
129
|
|
|
130
130
|
def get_project_compare_zsets(triggering_env, org, repo):
|
|
131
|
-
return "ci.benchmarks.
|
|
131
|
+
return "ci.benchmarks.redis/{}/{}/{}:compare:pull_requests:zset".format(
|
|
132
132
|
triggering_env, org, repo
|
|
133
133
|
)
|
|
134
134
|
|
|
@@ -1179,10 +1179,12 @@ def from_rts_to_regression_table(
|
|
|
1179
1179
|
if baseline_str != "":
|
|
1180
1180
|
filters_baseline.append("{}={}".format(by_str_baseline, baseline_str))
|
|
1181
1181
|
if baseline_deployment_name != "":
|
|
1182
|
-
filters_baseline.append(
|
|
1182
|
+
filters_baseline.append(
|
|
1183
|
+
"deployment_name={}".format(baseline_deployment_name)
|
|
1184
|
+
)
|
|
1183
1185
|
if baseline_github_org != "":
|
|
1184
1186
|
filters_baseline.append(f"github_org={baseline_github_org}")
|
|
1185
|
-
if running_platform_baseline is not None:
|
|
1187
|
+
if running_platform_baseline is not None and running_platform_baseline != "":
|
|
1186
1188
|
filters_baseline.append(
|
|
1187
1189
|
"running_platform={}".format(running_platform_baseline)
|
|
1188
1190
|
)
|
|
@@ -1195,14 +1197,19 @@ def from_rts_to_regression_table(
|
|
|
1195
1197
|
if comparison_str != "":
|
|
1196
1198
|
filters_comparison.append("{}={}".format(by_str_comparison, comparison_str))
|
|
1197
1199
|
if comparison_deployment_name != "":
|
|
1198
|
-
filters_comparison.append(
|
|
1200
|
+
filters_comparison.append(
|
|
1201
|
+
"deployment_name={}".format(comparison_deployment_name)
|
|
1202
|
+
)
|
|
1199
1203
|
if comparison_github_org != "":
|
|
1200
1204
|
filters_comparison.append(f"github_org={comparison_github_org}")
|
|
1201
1205
|
if "hash" not in by_str_baseline:
|
|
1202
1206
|
filters_baseline.append("hash==")
|
|
1203
1207
|
if "hash" not in by_str_comparison:
|
|
1204
1208
|
filters_comparison.append("hash==")
|
|
1205
|
-
if
|
|
1209
|
+
if (
|
|
1210
|
+
running_platform_comparison is not None
|
|
1211
|
+
and running_platform_comparison != ""
|
|
1212
|
+
):
|
|
1206
1213
|
filters_comparison.append(
|
|
1207
1214
|
"running_platform={}".format(running_platform_comparison)
|
|
1208
1215
|
)
|
|
@@ -1538,12 +1545,22 @@ def from_rts_to_regression_table(
|
|
|
1538
1545
|
|
|
1539
1546
|
def get_only_Totals(baseline_timeseries):
|
|
1540
1547
|
logging.warning("\t\tTime-series: {}".format(", ".join(baseline_timeseries)))
|
|
1541
|
-
logging.info(
|
|
1548
|
+
logging.info(
|
|
1549
|
+
f"Checking if Totals will reduce timeseries. initial len={len(baseline_timeseries)}"
|
|
1550
|
+
)
|
|
1542
1551
|
new_base = []
|
|
1543
1552
|
for ts_name in baseline_timeseries:
|
|
1553
|
+
if "io-threads" in ts_name:
|
|
1554
|
+
continue
|
|
1555
|
+
if "oss-cluster" in ts_name:
|
|
1556
|
+
continue
|
|
1544
1557
|
if "Totals" in ts_name:
|
|
1545
1558
|
new_base.append(ts_name)
|
|
1546
1559
|
baseline_timeseries = new_base
|
|
1560
|
+
logging.info(
|
|
1561
|
+
f" final len={len(baseline_timeseries)}"
|
|
1562
|
+
)
|
|
1563
|
+
|
|
1547
1564
|
return baseline_timeseries
|
|
1548
1565
|
|
|
1549
1566
|
|
|
@@ -44,19 +44,19 @@ def create_client_runner_args(project_name):
|
|
|
44
44
|
"--deployment_type",
|
|
45
45
|
type=str,
|
|
46
46
|
default="oss-standalone",
|
|
47
|
-
help="Deployment type for the Redis instance (e.g., oss-standalone, oss-cluster, enterprise)"
|
|
47
|
+
help="Deployment type for the Redis instance (e.g., oss-standalone, oss-cluster, enterprise)",
|
|
48
48
|
)
|
|
49
49
|
parser.add_argument(
|
|
50
50
|
"--deployment_name",
|
|
51
51
|
type=str,
|
|
52
52
|
default="redis",
|
|
53
|
-
help="Deployment name identifier for the Redis instance"
|
|
53
|
+
help="Deployment name identifier for the Redis instance",
|
|
54
54
|
)
|
|
55
55
|
parser.add_argument(
|
|
56
56
|
"--core_count",
|
|
57
57
|
type=int,
|
|
58
58
|
default=None,
|
|
59
|
-
help="Number of CPU cores available to the Redis instance"
|
|
59
|
+
help="Number of CPU cores available to the Redis instance",
|
|
60
60
|
)
|
|
61
61
|
parser.add_argument("--github_repo", type=str, default="redis")
|
|
62
62
|
parser.add_argument("--github_org", type=str, default="redis")
|
|
@@ -238,7 +238,9 @@ def extract_server_metadata_for_timeseries(redis_conn) -> Dict[str, str]:
|
|
|
238
238
|
else:
|
|
239
239
|
metadata["config_file"] = "none"
|
|
240
240
|
|
|
241
|
-
logging.info(
|
|
241
|
+
logging.info(
|
|
242
|
+
f"Extracted {len(metadata)} server metadata fields for timeseries: {list(metadata.keys())}"
|
|
243
|
+
)
|
|
242
244
|
|
|
243
245
|
return metadata
|
|
244
246
|
|