redis-benchmarks-specification 0.1.322__py3-none-any.whl → 0.1.323__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of redis-benchmarks-specification might be problematic. Click here for more details.
- redis_benchmarks_specification/__builder__/builder.py +54 -14
- redis_benchmarks_specification/__cli__/args.py +1 -1
- redis_benchmarks_specification/__cli__/cli.py +3 -0
- redis_benchmarks_specification/__cli__/stats.py +589 -1
- redis_benchmarks_specification/__compare__/args.py +0 -6
- redis_benchmarks_specification/__compare__/compare.py +0 -33
- redis_benchmarks_specification/__self_contained_coordinator__/docker.py +2 -1
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py +69 -7
- redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-rpush-bulkload-pipeline-50.yml +39 -0
- {redis_benchmarks_specification-0.1.322.dist-info → redis_benchmarks_specification-0.1.323.dist-info}/METADATA +1 -1
- {redis_benchmarks_specification-0.1.322.dist-info → redis_benchmarks_specification-0.1.323.dist-info}/RECORD +14 -13
- {redis_benchmarks_specification-0.1.322.dist-info → redis_benchmarks_specification-0.1.323.dist-info}/LICENSE +0 -0
- {redis_benchmarks_specification-0.1.322.dist-info → redis_benchmarks_specification-0.1.323.dist-info}/WHEEL +0 -0
- {redis_benchmarks_specification-0.1.322.dist-info → redis_benchmarks_specification-0.1.323.dist-info}/entry_points.txt +0 -0
|
@@ -73,6 +73,18 @@ def main():
|
|
|
73
73
|
parser.add_argument(
|
|
74
74
|
"--arch", type=str, default="amd64", help="arch to build artifacts"
|
|
75
75
|
)
|
|
76
|
+
parser.add_argument(
|
|
77
|
+
"--builder-group",
|
|
78
|
+
type=str,
|
|
79
|
+
default=STREAM_GH_EVENTS_COMMIT_BUILDERS_CG,
|
|
80
|
+
help="Consumer group name to read from the stream",
|
|
81
|
+
)
|
|
82
|
+
parser.add_argument(
|
|
83
|
+
"--builder-id",
|
|
84
|
+
type=str,
|
|
85
|
+
default="1",
|
|
86
|
+
help="Consumer id to read from the stream",
|
|
87
|
+
)
|
|
76
88
|
parser.add_argument(
|
|
77
89
|
"--setups-folder",
|
|
78
90
|
type=str,
|
|
@@ -149,7 +161,14 @@ def main():
|
|
|
149
161
|
|
|
150
162
|
build_spec_image_prefetch(builders_folder, different_build_specs)
|
|
151
163
|
|
|
152
|
-
|
|
164
|
+
builder_group = args.builder_group
|
|
165
|
+
builder_id = args.builder_id
|
|
166
|
+
if builder_group is None:
|
|
167
|
+
builder_group = STREAM_GH_EVENTS_COMMIT_BUILDERS_CG
|
|
168
|
+
if builder_id is None:
|
|
169
|
+
builder_id = "1"
|
|
170
|
+
|
|
171
|
+
builder_consumer_group_create(conn, builder_group)
|
|
153
172
|
if args.github_token is not None:
|
|
154
173
|
logging.info("detected a github token. will update as much as possible!!! =)")
|
|
155
174
|
previous_id = args.consumer_start_id
|
|
@@ -162,28 +181,26 @@ def main():
|
|
|
162
181
|
args.docker_air_gap,
|
|
163
182
|
arch,
|
|
164
183
|
args.github_token,
|
|
184
|
+
builder_group,
|
|
185
|
+
builder_id,
|
|
165
186
|
)
|
|
166
187
|
|
|
167
188
|
|
|
168
|
-
def builder_consumer_group_create(
|
|
189
|
+
def builder_consumer_group_create(
|
|
190
|
+
conn, builder_group=STREAM_GH_EVENTS_COMMIT_BUILDERS_CG, id="$"
|
|
191
|
+
):
|
|
169
192
|
try:
|
|
170
193
|
conn.xgroup_create(
|
|
171
194
|
STREAM_KEYNAME_GH_EVENTS_COMMIT,
|
|
172
|
-
|
|
195
|
+
builder_group,
|
|
173
196
|
mkstream=True,
|
|
174
197
|
id=id,
|
|
175
198
|
)
|
|
176
199
|
logging.info(
|
|
177
|
-
"Created consumer group named {} to distribute work.".format(
|
|
178
|
-
STREAM_GH_EVENTS_COMMIT_BUILDERS_CG
|
|
179
|
-
)
|
|
200
|
+
"Created consumer group named {} to distribute work.".format(builder_group)
|
|
180
201
|
)
|
|
181
202
|
except redis.exceptions.ResponseError:
|
|
182
|
-
logging.info(
|
|
183
|
-
"Consumer group named {} already existed.".format(
|
|
184
|
-
STREAM_GH_EVENTS_COMMIT_BUILDERS_CG
|
|
185
|
-
)
|
|
186
|
-
)
|
|
203
|
+
logging.info("Consumer group named {} already existed.".format(builder_group))
|
|
187
204
|
|
|
188
205
|
|
|
189
206
|
def check_benchmark_build_comment(comments):
|
|
@@ -205,14 +222,22 @@ def builder_process_stream(
|
|
|
205
222
|
docker_air_gap=False,
|
|
206
223
|
arch="amd64",
|
|
207
224
|
github_token=None,
|
|
225
|
+
builder_group=None,
|
|
226
|
+
builder_id=None,
|
|
208
227
|
):
|
|
209
228
|
new_builds_count = 0
|
|
210
229
|
auto_approve_github_comments = True
|
|
211
230
|
build_stream_fields_arr = []
|
|
212
|
-
|
|
213
|
-
|
|
231
|
+
if builder_group is None:
|
|
232
|
+
builder_group = STREAM_GH_EVENTS_COMMIT_BUILDERS_CG
|
|
233
|
+
if builder_id is None:
|
|
234
|
+
builder_id = "1"
|
|
235
|
+
consumer_name = "{}-proc#{}".format(builder_group, builder_id)
|
|
236
|
+
logging.info(
|
|
237
|
+
f"Entering blocking read waiting for work. building for arch: {arch}. Using consumer id {consumer_name}"
|
|
238
|
+
)
|
|
214
239
|
newTestInfo = conn.xreadgroup(
|
|
215
|
-
|
|
240
|
+
builder_group,
|
|
216
241
|
consumer_name,
|
|
217
242
|
{STREAM_KEYNAME_GH_EVENTS_COMMIT: previous_id},
|
|
218
243
|
count=1,
|
|
@@ -230,6 +255,21 @@ def builder_process_stream(
|
|
|
230
255
|
docker_client = docker.from_env()
|
|
231
256
|
from pathlib import Path
|
|
232
257
|
|
|
258
|
+
build_request_arch = None
|
|
259
|
+
if b"arch" in testDetails:
|
|
260
|
+
build_request_arch = testDetails[b"arch"].decode()
|
|
261
|
+
elif b"build_arch" in testDetails:
|
|
262
|
+
build_request_arch = testDetails[b"build_arch"].decode()
|
|
263
|
+
else:
|
|
264
|
+
logging.info("No arch info found on the stream.")
|
|
265
|
+
if build_request_arch is not None and build_request_arch != arch:
|
|
266
|
+
logging.info(
|
|
267
|
+
"skipping build request given requested build arch {}!={}".format(
|
|
268
|
+
build_request_arch, arch
|
|
269
|
+
)
|
|
270
|
+
)
|
|
271
|
+
return previous_id, new_builds_count, build_stream_fields_arr
|
|
272
|
+
|
|
233
273
|
home = str(Path.home())
|
|
234
274
|
if b"git_hash" in testDetails:
|
|
235
275
|
git_hash = testDetails[b"git_hash"]
|
|
@@ -138,7 +138,7 @@ def spec_cli_args(parser):
|
|
|
138
138
|
parser.add_argument("--gh_repo", type=str, default="redis")
|
|
139
139
|
parser.add_argument("--server_name", type=str, default=None)
|
|
140
140
|
parser.add_argument("--run_image", type=str, default="redis")
|
|
141
|
-
parser.add_argument("--build_arch", type=str, default=
|
|
141
|
+
parser.add_argument("--build_arch", type=str, default=None)
|
|
142
142
|
parser.add_argument("--id", type=str, default="dockerhub")
|
|
143
143
|
parser.add_argument("--mnt_point", type=str, default="")
|
|
144
144
|
parser.add_argument("--trigger-unstable-commits", type=bool, default=True)
|
|
@@ -432,6 +432,9 @@ def trigger_tests_cli_command_logic(args, project_name, project_version):
|
|
|
432
432
|
commit_dict["tests_groups_regexp"] = tests_groups_regexp
|
|
433
433
|
commit_dict["github_org"] = args.gh_org
|
|
434
434
|
commit_dict["github_repo"] = args.gh_repo
|
|
435
|
+
if args.build_arch is not None:
|
|
436
|
+
commit_dict["build_arch"] = args.build_arch
|
|
437
|
+
commit_dict["arch"] = args.build_arch
|
|
435
438
|
if args.server_name is not None and args.server_name != "":
|
|
436
439
|
commit_dict["server_name"] = args.server_name
|
|
437
440
|
if args.build_artifacts != "":
|
|
@@ -40,6 +40,28 @@ def clean_number(value):
|
|
|
40
40
|
return 0 # Default to 0 if invalid
|
|
41
41
|
|
|
42
42
|
|
|
43
|
+
def clean_percentage(value):
|
|
44
|
+
"""Parse percentage values like '17.810220866%'"""
|
|
45
|
+
try:
|
|
46
|
+
value = value.replace("%", "").strip()
|
|
47
|
+
return float(value)
|
|
48
|
+
except ValueError:
|
|
49
|
+
logging.error(f"Skipping invalid percentage value: {value}")
|
|
50
|
+
return 0.0
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def format_number_with_suffix(value):
|
|
54
|
+
"""Format large numbers with B/M/K suffixes for readability"""
|
|
55
|
+
if value >= 1_000_000_000:
|
|
56
|
+
return f"{value / 1_000_000_000:.1f}B"
|
|
57
|
+
elif value >= 1_000_000:
|
|
58
|
+
return f"{value / 1_000_000:.1f}M"
|
|
59
|
+
elif value >= 1_000:
|
|
60
|
+
return f"{value / 1_000:.1f}K"
|
|
61
|
+
else:
|
|
62
|
+
return str(value)
|
|
63
|
+
|
|
64
|
+
|
|
43
65
|
def get_arg_value(args, flag, default):
|
|
44
66
|
"""Extract integer values safely from CLI arguments"""
|
|
45
67
|
if flag in args:
|
|
@@ -93,6 +115,35 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
93
115
|
priority_json = json.load(fd)
|
|
94
116
|
tracked_groups = []
|
|
95
117
|
tracked_groups_hist = {}
|
|
118
|
+
|
|
119
|
+
# ACL category tracking for benchmark YAML files
|
|
120
|
+
benchmark_read_commands = {}
|
|
121
|
+
benchmark_write_commands = {}
|
|
122
|
+
benchmark_fast_commands = {}
|
|
123
|
+
benchmark_slow_commands = {}
|
|
124
|
+
benchmark_total_command_count = 0
|
|
125
|
+
|
|
126
|
+
# Group-based read/write tracking for benchmarks
|
|
127
|
+
benchmark_group_read = {} # group -> count
|
|
128
|
+
benchmark_group_write = {} # group -> count
|
|
129
|
+
benchmark_group_total = {} # group -> total count
|
|
130
|
+
|
|
131
|
+
# ACL category tracking for commandstats CSV
|
|
132
|
+
csv_read_commands = {}
|
|
133
|
+
csv_write_commands = {}
|
|
134
|
+
csv_fast_commands = {}
|
|
135
|
+
csv_slow_commands = {}
|
|
136
|
+
csv_total_command_count = 0
|
|
137
|
+
|
|
138
|
+
# Group-based read/write tracking for CSV
|
|
139
|
+
csv_group_read = {} # group -> count
|
|
140
|
+
csv_group_write = {} # group -> count
|
|
141
|
+
csv_group_total = {} # group -> total count
|
|
142
|
+
|
|
143
|
+
# Percentage validation tracking
|
|
144
|
+
csv_provided_percentages = {} # command -> provided percentage
|
|
145
|
+
csv_original_counts = {} # command -> original count from CSV
|
|
146
|
+
|
|
96
147
|
override_enabled = args.override_tests
|
|
97
148
|
fail_on_required_diff = args.fail_on_required_diff
|
|
98
149
|
overall_result = True
|
|
@@ -252,6 +303,75 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
252
303
|
tracked_groups_hist[group] = 0
|
|
253
304
|
tracked_groups_hist[group] = tracked_groups_hist[group] + 1
|
|
254
305
|
|
|
306
|
+
# Track ACL categories for read/write and fast/slow analysis
|
|
307
|
+
if "acl_categories" in command_json:
|
|
308
|
+
acl_categories = command_json["acl_categories"]
|
|
309
|
+
benchmark_total_command_count += 1
|
|
310
|
+
|
|
311
|
+
# Track total by group (all commands)
|
|
312
|
+
if group not in benchmark_group_total:
|
|
313
|
+
benchmark_group_total[group] = 0
|
|
314
|
+
benchmark_group_total[group] += 1
|
|
315
|
+
|
|
316
|
+
# Track read/write commands
|
|
317
|
+
is_read = False
|
|
318
|
+
is_write = False
|
|
319
|
+
|
|
320
|
+
if "@read" in acl_categories:
|
|
321
|
+
is_read = True
|
|
322
|
+
elif "@write" in acl_categories:
|
|
323
|
+
is_write = True
|
|
324
|
+
elif "_ro" in command.lower():
|
|
325
|
+
# Commands with _ro suffix are read-only (like EVALSHA_RO)
|
|
326
|
+
is_read = True
|
|
327
|
+
elif "@pubsub" in acl_categories:
|
|
328
|
+
# Pubsub commands: SUBSCRIBE/UNSUBSCRIBE are read, PUBLISH is write
|
|
329
|
+
if command.lower() in [
|
|
330
|
+
"subscribe",
|
|
331
|
+
"unsubscribe",
|
|
332
|
+
"psubscribe",
|
|
333
|
+
"punsubscribe",
|
|
334
|
+
]:
|
|
335
|
+
is_read = True
|
|
336
|
+
else:
|
|
337
|
+
is_write = (
|
|
338
|
+
True # PUBLISH and other pubsub commands
|
|
339
|
+
)
|
|
340
|
+
else:
|
|
341
|
+
# Commands without explicit read/write ACL but not _ro are assumed write
|
|
342
|
+
# This covers cases like EVALSHA which can modify data
|
|
343
|
+
is_write = True
|
|
344
|
+
|
|
345
|
+
if is_read:
|
|
346
|
+
if command not in benchmark_read_commands:
|
|
347
|
+
benchmark_read_commands[command] = 0
|
|
348
|
+
benchmark_read_commands[command] += 1
|
|
349
|
+
|
|
350
|
+
# Track by group
|
|
351
|
+
if group not in benchmark_group_read:
|
|
352
|
+
benchmark_group_read[group] = 0
|
|
353
|
+
benchmark_group_read[group] += 1
|
|
354
|
+
|
|
355
|
+
elif is_write:
|
|
356
|
+
if command not in benchmark_write_commands:
|
|
357
|
+
benchmark_write_commands[command] = 0
|
|
358
|
+
benchmark_write_commands[command] += 1
|
|
359
|
+
|
|
360
|
+
# Track by group
|
|
361
|
+
if group not in benchmark_group_write:
|
|
362
|
+
benchmark_group_write[group] = 0
|
|
363
|
+
benchmark_group_write[group] += 1
|
|
364
|
+
|
|
365
|
+
# Track fast/slow commands
|
|
366
|
+
if "@fast" in acl_categories:
|
|
367
|
+
if command not in benchmark_fast_commands:
|
|
368
|
+
benchmark_fast_commands[command] = 0
|
|
369
|
+
benchmark_fast_commands[command] += 1
|
|
370
|
+
elif "@slow" in acl_categories:
|
|
371
|
+
if command not in benchmark_slow_commands:
|
|
372
|
+
benchmark_slow_commands[command] = 0
|
|
373
|
+
benchmark_slow_commands[command] += 1
|
|
374
|
+
|
|
255
375
|
# Calculate total connections
|
|
256
376
|
total_connections = clients * threads
|
|
257
377
|
|
|
@@ -428,6 +548,15 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
428
548
|
if len(row) > 2:
|
|
429
549
|
usecs = clean_number(row[2])
|
|
430
550
|
total_usecs += usecs
|
|
551
|
+
|
|
552
|
+
# Parse percentage and original count if available
|
|
553
|
+
provided_percentage = None
|
|
554
|
+
original_count = None
|
|
555
|
+
if len(row) > 3:
|
|
556
|
+
provided_percentage = clean_percentage(row[3])
|
|
557
|
+
if len(row) > 4:
|
|
558
|
+
original_count = clean_number(row[4])
|
|
559
|
+
|
|
431
560
|
if count == 0:
|
|
432
561
|
continue
|
|
433
562
|
tracked = False
|
|
@@ -452,11 +581,97 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
452
581
|
if "deprecated_since" in command_json:
|
|
453
582
|
deprecated = True
|
|
454
583
|
|
|
584
|
+
# Track ACL categories for commandstats CSV data
|
|
585
|
+
if "acl_categories" in command_json:
|
|
586
|
+
acl_categories = command_json["acl_categories"]
|
|
587
|
+
|
|
588
|
+
# Use original count if available, otherwise use parsed count
|
|
589
|
+
tracking_count = (
|
|
590
|
+
original_count if original_count is not None else count
|
|
591
|
+
)
|
|
592
|
+
csv_total_command_count += tracking_count
|
|
593
|
+
|
|
594
|
+
# Track total by group (all commands)
|
|
595
|
+
if group not in csv_group_total:
|
|
596
|
+
csv_group_total[group] = 0
|
|
597
|
+
csv_group_total[group] += tracking_count
|
|
598
|
+
|
|
599
|
+
# Track read/write commands
|
|
600
|
+
is_read = False
|
|
601
|
+
is_write = False
|
|
602
|
+
|
|
603
|
+
if "@read" in acl_categories:
|
|
604
|
+
is_read = True
|
|
605
|
+
elif "@write" in acl_categories:
|
|
606
|
+
is_write = True
|
|
607
|
+
elif "_ro" in cmd.lower():
|
|
608
|
+
# Commands with _ro suffix are read-only (like EVALSHA_RO)
|
|
609
|
+
is_read = True
|
|
610
|
+
elif "@pubsub" in acl_categories:
|
|
611
|
+
# Pubsub commands: SUBSCRIBE/UNSUBSCRIBE are read, PUBLISH is write
|
|
612
|
+
if cmd.lower() in [
|
|
613
|
+
"subscribe",
|
|
614
|
+
"unsubscribe",
|
|
615
|
+
"psubscribe",
|
|
616
|
+
"punsubscribe",
|
|
617
|
+
]:
|
|
618
|
+
is_read = True
|
|
619
|
+
else:
|
|
620
|
+
is_write = True # PUBLISH and other pubsub commands
|
|
621
|
+
else:
|
|
622
|
+
# Commands without explicit read/write ACL but not _ro are assumed write
|
|
623
|
+
# This covers cases like EVALSHA which can modify data
|
|
624
|
+
is_write = True
|
|
625
|
+
|
|
626
|
+
if is_read:
|
|
627
|
+
if cmd.lower() not in csv_read_commands:
|
|
628
|
+
csv_read_commands[cmd.lower()] = 0
|
|
629
|
+
csv_read_commands[cmd.lower()] += tracking_count
|
|
630
|
+
|
|
631
|
+
# Track by group
|
|
632
|
+
if group not in csv_group_read:
|
|
633
|
+
csv_group_read[group] = 0
|
|
634
|
+
csv_group_read[group] += tracking_count
|
|
635
|
+
|
|
636
|
+
elif is_write:
|
|
637
|
+
if cmd.lower() not in csv_write_commands:
|
|
638
|
+
csv_write_commands[cmd.lower()] = 0
|
|
639
|
+
csv_write_commands[cmd.lower()] += tracking_count
|
|
640
|
+
|
|
641
|
+
# Track by group
|
|
642
|
+
if group not in csv_group_write:
|
|
643
|
+
csv_group_write[group] = 0
|
|
644
|
+
csv_group_write[group] += tracking_count
|
|
645
|
+
|
|
646
|
+
# Track fast/slow commands
|
|
647
|
+
if "@fast" in acl_categories:
|
|
648
|
+
if cmd.lower() not in csv_fast_commands:
|
|
649
|
+
csv_fast_commands[cmd.lower()] = 0
|
|
650
|
+
csv_fast_commands[cmd.lower()] += tracking_count
|
|
651
|
+
elif "@slow" in acl_categories:
|
|
652
|
+
if cmd.lower() not in csv_slow_commands:
|
|
653
|
+
csv_slow_commands[cmd.lower()] = 0
|
|
654
|
+
csv_slow_commands[cmd.lower()] += tracking_count
|
|
655
|
+
|
|
455
656
|
if module is False or include_modules:
|
|
456
|
-
|
|
657
|
+
# Use original count if available and different from parsed count
|
|
658
|
+
final_count = count
|
|
659
|
+
if original_count is not None and original_count != count:
|
|
660
|
+
logging.warning(
|
|
661
|
+
f"Using original count for {cmd}: {original_count:,} instead of parsed {count:,}"
|
|
662
|
+
)
|
|
663
|
+
final_count = original_count
|
|
664
|
+
|
|
665
|
+
priority[cmd.lower()] = final_count
|
|
457
666
|
if type(usecs) == int:
|
|
458
667
|
priority_usecs[cmd.lower()] = usecs
|
|
459
668
|
|
|
669
|
+
# Store percentage and original count for validation
|
|
670
|
+
if provided_percentage is not None:
|
|
671
|
+
csv_provided_percentages[cmd.lower()] = provided_percentage
|
|
672
|
+
if original_count is not None:
|
|
673
|
+
csv_original_counts[cmd.lower()] = original_count
|
|
674
|
+
|
|
460
675
|
if cmdstat in tracked_commands_json:
|
|
461
676
|
tracked = True
|
|
462
677
|
if module is False or include_modules:
|
|
@@ -651,6 +866,249 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
651
866
|
logging.info(
|
|
652
867
|
f"There is a total of : {len(list(tracked_commands_json.keys()))} tracked commands."
|
|
653
868
|
)
|
|
869
|
+
|
|
870
|
+
# ACL Category Analysis Summary
|
|
871
|
+
logging.info("=" * 80)
|
|
872
|
+
logging.info("ACL CATEGORY ANALYSIS SUMMARY")
|
|
873
|
+
logging.info("=" * 80)
|
|
874
|
+
|
|
875
|
+
# Benchmark YAML files analysis
|
|
876
|
+
if benchmark_total_command_count > 0:
|
|
877
|
+
logging.info("BENCHMARK TEST SUITES ANALYSIS (from YAML files):")
|
|
878
|
+
logging.info("-" * 50)
|
|
879
|
+
|
|
880
|
+
# Calculate read/write percentages for benchmarks
|
|
881
|
+
benchmark_read_count = sum(benchmark_read_commands.values())
|
|
882
|
+
benchmark_write_count = sum(benchmark_write_commands.values())
|
|
883
|
+
benchmark_rw_count = benchmark_read_count + benchmark_write_count
|
|
884
|
+
|
|
885
|
+
if benchmark_rw_count > 0:
|
|
886
|
+
read_percentage = (benchmark_read_count / benchmark_rw_count) * 100
|
|
887
|
+
write_percentage = (benchmark_write_count / benchmark_rw_count) * 100
|
|
888
|
+
|
|
889
|
+
logging.info(f"READ/WRITE COMMAND DISTRIBUTION:")
|
|
890
|
+
logging.info(
|
|
891
|
+
f" Read commands: {benchmark_read_count:6d} ({read_percentage:5.1f}%)"
|
|
892
|
+
)
|
|
893
|
+
logging.info(
|
|
894
|
+
f" Write commands: {benchmark_write_count:6d} ({write_percentage:5.1f}%)"
|
|
895
|
+
)
|
|
896
|
+
logging.info(f" Total R/W: {benchmark_rw_count:6d} (100.0%)")
|
|
897
|
+
else:
|
|
898
|
+
logging.info("No read/write commands detected in benchmark ACL categories")
|
|
899
|
+
|
|
900
|
+
# Calculate fast/slow percentages for benchmarks
|
|
901
|
+
benchmark_fast_count = sum(benchmark_fast_commands.values())
|
|
902
|
+
benchmark_slow_count = sum(benchmark_slow_commands.values())
|
|
903
|
+
benchmark_fs_count = benchmark_fast_count + benchmark_slow_count
|
|
904
|
+
|
|
905
|
+
if benchmark_fs_count > 0:
|
|
906
|
+
fast_percentage = (benchmark_fast_count / benchmark_fs_count) * 100
|
|
907
|
+
slow_percentage = (benchmark_slow_count / benchmark_fs_count) * 100
|
|
908
|
+
|
|
909
|
+
logging.info(f"")
|
|
910
|
+
logging.info(f"FAST/SLOW COMMAND DISTRIBUTION:")
|
|
911
|
+
logging.info(
|
|
912
|
+
f" Fast commands: {benchmark_fast_count:6d} ({fast_percentage:5.1f}%)"
|
|
913
|
+
)
|
|
914
|
+
logging.info(
|
|
915
|
+
f" Slow commands: {benchmark_slow_count:6d} ({slow_percentage:5.1f}%)"
|
|
916
|
+
)
|
|
917
|
+
logging.info(f" Total F/S: {benchmark_fs_count:6d} (100.0%)")
|
|
918
|
+
else:
|
|
919
|
+
logging.info("No fast/slow commands detected in benchmark ACL categories")
|
|
920
|
+
|
|
921
|
+
# Group breakdown for benchmarks
|
|
922
|
+
if benchmark_group_total:
|
|
923
|
+
logging.info("")
|
|
924
|
+
logging.info("READ/WRITE BREAKDOWN BY COMMAND GROUP:")
|
|
925
|
+
|
|
926
|
+
# Calculate total calls across all groups
|
|
927
|
+
total_all_calls = sum(benchmark_group_total.values())
|
|
928
|
+
|
|
929
|
+
# Create list of groups with their total calls for sorting
|
|
930
|
+
group_data = []
|
|
931
|
+
for group, total_group in benchmark_group_total.items():
|
|
932
|
+
read_count = benchmark_group_read.get(group, 0)
|
|
933
|
+
write_count = benchmark_group_write.get(group, 0)
|
|
934
|
+
group_data.append((group, read_count, write_count, total_group))
|
|
935
|
+
|
|
936
|
+
# Sort by total calls (descending)
|
|
937
|
+
group_data.sort(key=lambda x: x[3], reverse=True)
|
|
938
|
+
|
|
939
|
+
total_read_all = 0
|
|
940
|
+
total_write_all = 0
|
|
941
|
+
|
|
942
|
+
for group, read_count, write_count, total_group in group_data:
|
|
943
|
+
group_pct = (total_group / total_all_calls) * 100
|
|
944
|
+
read_pct = (read_count / total_group) * 100 if total_group > 0 else 0
|
|
945
|
+
write_pct = (write_count / total_group) * 100 if total_group > 0 else 0
|
|
946
|
+
|
|
947
|
+
read_formatted = format_number_with_suffix(read_count)
|
|
948
|
+
write_formatted = format_number_with_suffix(write_count)
|
|
949
|
+
|
|
950
|
+
logging.info(
|
|
951
|
+
f" {group.upper():>12} ({group_pct:4.1f}%): {read_formatted:>8} read ({read_pct:5.1f}%), {write_formatted:>8} write ({write_pct:5.1f}%)"
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
total_read_all += read_count
|
|
955
|
+
total_write_all += write_count
|
|
956
|
+
|
|
957
|
+
# Add total row
|
|
958
|
+
if group_data:
|
|
959
|
+
total_read_pct = (total_read_all / total_all_calls) * 100
|
|
960
|
+
total_write_pct = (total_write_all / total_all_calls) * 100
|
|
961
|
+
total_read_formatted = format_number_with_suffix(total_read_all)
|
|
962
|
+
total_write_formatted = format_number_with_suffix(total_write_all)
|
|
963
|
+
|
|
964
|
+
logging.info(
|
|
965
|
+
f" {'TOTAL':>12} (100.0%): {total_read_formatted:>8} read ({total_read_pct:5.1f}%), {total_write_formatted:>8} write ({total_write_pct:5.1f}%)"
|
|
966
|
+
)
|
|
967
|
+
else:
|
|
968
|
+
logging.info(
|
|
969
|
+
"BENCHMARK TEST SUITES ANALYSIS: No commands with ACL categories found"
|
|
970
|
+
)
|
|
971
|
+
|
|
972
|
+
# CommandStats CSV analysis
|
|
973
|
+
if csv_total_command_count > 0:
|
|
974
|
+
logging.info("")
|
|
975
|
+
logging.info("COMMANDSTATS CSV ANALYSIS (actual Redis usage):")
|
|
976
|
+
logging.info("-" * 50)
|
|
977
|
+
|
|
978
|
+
# Calculate read/write percentages for CSV data
|
|
979
|
+
csv_read_count = sum(csv_read_commands.values())
|
|
980
|
+
csv_write_count = sum(csv_write_commands.values())
|
|
981
|
+
csv_rw_count = csv_read_count + csv_write_count
|
|
982
|
+
|
|
983
|
+
if csv_rw_count > 0:
|
|
984
|
+
read_percentage = (csv_read_count / csv_rw_count) * 100
|
|
985
|
+
write_percentage = (csv_write_count / csv_rw_count) * 100
|
|
986
|
+
|
|
987
|
+
logging.info(f"READ/WRITE COMMAND DISTRIBUTION:")
|
|
988
|
+
logging.info(
|
|
989
|
+
f" Read commands: {csv_read_count:8d} ({read_percentage:5.1f}%)"
|
|
990
|
+
)
|
|
991
|
+
logging.info(
|
|
992
|
+
f" Write commands: {csv_write_count:8d} ({write_percentage:5.1f}%)"
|
|
993
|
+
)
|
|
994
|
+
logging.info(f" Total R/W: {csv_rw_count:8d} (100.0%)")
|
|
995
|
+
else:
|
|
996
|
+
logging.info("No read/write commands detected in CSV ACL categories")
|
|
997
|
+
|
|
998
|
+
# Calculate fast/slow percentages for CSV data
|
|
999
|
+
csv_fast_count = sum(csv_fast_commands.values())
|
|
1000
|
+
csv_slow_count = sum(csv_slow_commands.values())
|
|
1001
|
+
csv_fs_count = csv_fast_count + csv_slow_count
|
|
1002
|
+
|
|
1003
|
+
if csv_fs_count > 0:
|
|
1004
|
+
fast_percentage = (csv_fast_count / csv_fs_count) * 100
|
|
1005
|
+
slow_percentage = (csv_slow_count / csv_fs_count) * 100
|
|
1006
|
+
|
|
1007
|
+
logging.info(f"")
|
|
1008
|
+
logging.info(f"FAST/SLOW COMMAND DISTRIBUTION:")
|
|
1009
|
+
logging.info(
|
|
1010
|
+
f" Fast commands: {csv_fast_count:8d} ({fast_percentage:5.1f}%)"
|
|
1011
|
+
)
|
|
1012
|
+
logging.info(
|
|
1013
|
+
f" Slow commands: {csv_slow_count:8d} ({slow_percentage:5.1f}%)"
|
|
1014
|
+
)
|
|
1015
|
+
logging.info(f" Total F/S: {csv_fs_count:8d} (100.0%)")
|
|
1016
|
+
else:
|
|
1017
|
+
logging.info("No fast/slow commands detected in CSV ACL categories")
|
|
1018
|
+
|
|
1019
|
+
# Group breakdown for CSV data
|
|
1020
|
+
if csv_group_total:
|
|
1021
|
+
logging.info("")
|
|
1022
|
+
logging.info("READ/WRITE BREAKDOWN BY COMMAND GROUP:")
|
|
1023
|
+
|
|
1024
|
+
# Calculate total calls across all groups
|
|
1025
|
+
total_all_calls = sum(csv_group_total.values())
|
|
1026
|
+
|
|
1027
|
+
# Create list of groups with their total calls for sorting
|
|
1028
|
+
group_data = []
|
|
1029
|
+
for group, total_group in csv_group_total.items():
|
|
1030
|
+
read_count = csv_group_read.get(group, 0)
|
|
1031
|
+
write_count = csv_group_write.get(group, 0)
|
|
1032
|
+
group_data.append((group, read_count, write_count, total_group))
|
|
1033
|
+
|
|
1034
|
+
# Sort by total calls (descending)
|
|
1035
|
+
group_data.sort(key=lambda x: x[3], reverse=True)
|
|
1036
|
+
|
|
1037
|
+
total_read_all = 0
|
|
1038
|
+
total_write_all = 0
|
|
1039
|
+
|
|
1040
|
+
for group, read_count, write_count, total_group in group_data:
|
|
1041
|
+
group_pct = (total_group / total_all_calls) * 100
|
|
1042
|
+
read_pct = (read_count / total_group) * 100 if total_group > 0 else 0
|
|
1043
|
+
write_pct = (write_count / total_group) * 100 if total_group > 0 else 0
|
|
1044
|
+
|
|
1045
|
+
read_formatted = format_number_with_suffix(read_count)
|
|
1046
|
+
write_formatted = format_number_with_suffix(write_count)
|
|
1047
|
+
|
|
1048
|
+
logging.info(
|
|
1049
|
+
f" {group.upper():>12} ({group_pct:4.1f}%): {read_formatted:>8} read ({read_pct:5.1f}%), {write_formatted:>8} write ({write_pct:5.1f}%)"
|
|
1050
|
+
)
|
|
1051
|
+
|
|
1052
|
+
total_read_all += read_count
|
|
1053
|
+
total_write_all += write_count
|
|
1054
|
+
|
|
1055
|
+
# Add total row
|
|
1056
|
+
if group_data:
|
|
1057
|
+
total_read_pct = (total_read_all / total_all_calls) * 100
|
|
1058
|
+
total_write_pct = (total_write_all / total_all_calls) * 100
|
|
1059
|
+
total_read_formatted = format_number_with_suffix(total_read_all)
|
|
1060
|
+
total_write_formatted = format_number_with_suffix(total_write_all)
|
|
1061
|
+
|
|
1062
|
+
logging.info(
|
|
1063
|
+
f" {'TOTAL':>12} (100.0%): {total_read_formatted:>8} read ({total_read_pct:5.1f}%), {total_write_formatted:>8} write ({total_write_pct:5.1f}%)"
|
|
1064
|
+
)
|
|
1065
|
+
|
|
1066
|
+
# Validate parsing accuracy by comparing with provided percentages
|
|
1067
|
+
if csv_provided_percentages and csv_original_counts:
|
|
1068
|
+
logging.info("")
|
|
1069
|
+
logging.info("PARSING VALIDATION:")
|
|
1070
|
+
logging.info("-" * 30)
|
|
1071
|
+
|
|
1072
|
+
# Calculate total from original counts
|
|
1073
|
+
total_original = sum(csv_original_counts.values())
|
|
1074
|
+
total_provided_percentage = sum(csv_provided_percentages.values())
|
|
1075
|
+
|
|
1076
|
+
logging.info(f"Total original count: {total_original:,}")
|
|
1077
|
+
logging.info(
|
|
1078
|
+
f"Sum of provided percentages: {total_provided_percentage:.6f}%"
|
|
1079
|
+
)
|
|
1080
|
+
|
|
1081
|
+
# Check if our billion parsing matches original counts
|
|
1082
|
+
parsing_errors = 0
|
|
1083
|
+
for cmd in csv_original_counts:
|
|
1084
|
+
if cmd in priority: # priority contains our parsed values
|
|
1085
|
+
parsed_value = priority[cmd]
|
|
1086
|
+
original_value = csv_original_counts[cmd]
|
|
1087
|
+
if parsed_value != original_value:
|
|
1088
|
+
parsing_errors += 1
|
|
1089
|
+
logging.warning(
|
|
1090
|
+
f"Parsing mismatch for {cmd}: parsed={parsed_value:,} vs original={original_value:,}"
|
|
1091
|
+
)
|
|
1092
|
+
|
|
1093
|
+
if parsing_errors == 0:
|
|
1094
|
+
logging.info("✓ All billion/million/thousand parsing is accurate")
|
|
1095
|
+
else:
|
|
1096
|
+
logging.warning(f"✗ Found {parsing_errors} parsing errors")
|
|
1097
|
+
|
|
1098
|
+
# Validate percentage calculation
|
|
1099
|
+
if abs(total_provided_percentage - 100.0) < 0.001:
|
|
1100
|
+
logging.info("✓ Provided percentages sum to 100%")
|
|
1101
|
+
else:
|
|
1102
|
+
logging.warning(
|
|
1103
|
+
f"✗ Provided percentages sum to {total_provided_percentage:.6f}% (not 100%)"
|
|
1104
|
+
)
|
|
1105
|
+
else:
|
|
1106
|
+
logging.info("")
|
|
1107
|
+
logging.info(
|
|
1108
|
+
"COMMANDSTATS CSV ANALYSIS: No CSV file provided or no commands found"
|
|
1109
|
+
)
|
|
1110
|
+
|
|
1111
|
+
logging.info("=" * 80)
|
|
654
1112
|
# Save pipeline count to CSV
|
|
655
1113
|
csv_filename = "memtier_pipeline_count.csv"
|
|
656
1114
|
with open(csv_filename, "w", newline="") as csvfile:
|
|
@@ -714,3 +1172,133 @@ def generate_stats_cli_command_logic(args, project_name, project_version):
|
|
|
714
1172
|
)
|
|
715
1173
|
|
|
716
1174
|
logging.info(f"Sorted command groups count data saved to {csv_filename}")
|
|
1175
|
+
|
|
1176
|
+
# Save ACL category data to CSV files
|
|
1177
|
+
|
|
1178
|
+
# Benchmark data CSV files
|
|
1179
|
+
csv_filename = "benchmark_acl_read_write_commands.csv"
|
|
1180
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1181
|
+
fieldnames = ["command", "type", "count"]
|
|
1182
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1183
|
+
writer.writeheader()
|
|
1184
|
+
|
|
1185
|
+
for command, count in sorted(benchmark_read_commands.items()):
|
|
1186
|
+
writer.writerow({"command": command, "type": "read", "count": count})
|
|
1187
|
+
for command, count in sorted(benchmark_write_commands.items()):
|
|
1188
|
+
writer.writerow({"command": command, "type": "write", "count": count})
|
|
1189
|
+
|
|
1190
|
+
logging.info(f"Benchmark ACL read/write commands data saved to {csv_filename}")
|
|
1191
|
+
|
|
1192
|
+
csv_filename = "benchmark_acl_fast_slow_commands.csv"
|
|
1193
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1194
|
+
fieldnames = ["command", "type", "count"]
|
|
1195
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1196
|
+
writer.writeheader()
|
|
1197
|
+
|
|
1198
|
+
for command, count in sorted(benchmark_fast_commands.items()):
|
|
1199
|
+
writer.writerow({"command": command, "type": "fast", "count": count})
|
|
1200
|
+
for command, count in sorted(benchmark_slow_commands.items()):
|
|
1201
|
+
writer.writerow({"command": command, "type": "slow", "count": count})
|
|
1202
|
+
|
|
1203
|
+
logging.info(f"Benchmark ACL fast/slow commands data saved to {csv_filename}")
|
|
1204
|
+
|
|
1205
|
+
# CommandStats CSV data files
|
|
1206
|
+
csv_filename = "commandstats_acl_read_write_commands.csv"
|
|
1207
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1208
|
+
fieldnames = ["command", "type", "count"]
|
|
1209
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1210
|
+
writer.writeheader()
|
|
1211
|
+
|
|
1212
|
+
for command, count in sorted(csv_read_commands.items()):
|
|
1213
|
+
writer.writerow({"command": command, "type": "read", "count": count})
|
|
1214
|
+
for command, count in sorted(csv_write_commands.items()):
|
|
1215
|
+
writer.writerow({"command": command, "type": "write", "count": count})
|
|
1216
|
+
|
|
1217
|
+
logging.info(f"CommandStats ACL read/write commands data saved to {csv_filename}")
|
|
1218
|
+
|
|
1219
|
+
csv_filename = "commandstats_acl_fast_slow_commands.csv"
|
|
1220
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1221
|
+
fieldnames = ["command", "type", "count"]
|
|
1222
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1223
|
+
writer.writeheader()
|
|
1224
|
+
|
|
1225
|
+
for command, count in sorted(csv_fast_commands.items()):
|
|
1226
|
+
writer.writerow({"command": command, "type": "fast", "count": count})
|
|
1227
|
+
for command, count in sorted(csv_slow_commands.items()):
|
|
1228
|
+
writer.writerow({"command": command, "type": "slow", "count": count})
|
|
1229
|
+
|
|
1230
|
+
logging.info(f"CommandStats ACL fast/slow commands data saved to {csv_filename}")
|
|
1231
|
+
|
|
1232
|
+
# Save group breakdown data to CSV files
|
|
1233
|
+
|
|
1234
|
+
# Benchmark group breakdown
|
|
1235
|
+
csv_filename = "benchmark_group_read_write_breakdown.csv"
|
|
1236
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1237
|
+
fieldnames = [
|
|
1238
|
+
"group",
|
|
1239
|
+
"read_count",
|
|
1240
|
+
"write_count",
|
|
1241
|
+
"total_count",
|
|
1242
|
+
"read_percentage",
|
|
1243
|
+
"write_percentage",
|
|
1244
|
+
]
|
|
1245
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1246
|
+
writer.writeheader()
|
|
1247
|
+
|
|
1248
|
+
all_groups = set(benchmark_group_read.keys()) | set(
|
|
1249
|
+
benchmark_group_write.keys()
|
|
1250
|
+
)
|
|
1251
|
+
for group in sorted(all_groups):
|
|
1252
|
+
read_count = benchmark_group_read.get(group, 0)
|
|
1253
|
+
write_count = benchmark_group_write.get(group, 0)
|
|
1254
|
+
total_count = read_count + write_count
|
|
1255
|
+
read_pct = (read_count / total_count * 100) if total_count > 0 else 0
|
|
1256
|
+
write_pct = (write_count / total_count * 100) if total_count > 0 else 0
|
|
1257
|
+
|
|
1258
|
+
writer.writerow(
|
|
1259
|
+
{
|
|
1260
|
+
"group": group,
|
|
1261
|
+
"read_count": read_count,
|
|
1262
|
+
"write_count": write_count,
|
|
1263
|
+
"total_count": total_count,
|
|
1264
|
+
"read_percentage": round(read_pct, 2),
|
|
1265
|
+
"write_percentage": round(write_pct, 2),
|
|
1266
|
+
}
|
|
1267
|
+
)
|
|
1268
|
+
|
|
1269
|
+
logging.info(f"Benchmark group read/write breakdown saved to {csv_filename}")
|
|
1270
|
+
|
|
1271
|
+
# CommandStats group breakdown
|
|
1272
|
+
csv_filename = "commandstats_group_read_write_breakdown.csv"
|
|
1273
|
+
with open(csv_filename, "w", newline="") as csvfile:
|
|
1274
|
+
fieldnames = [
|
|
1275
|
+
"group",
|
|
1276
|
+
"read_count",
|
|
1277
|
+
"write_count",
|
|
1278
|
+
"total_count",
|
|
1279
|
+
"read_percentage",
|
|
1280
|
+
"write_percentage",
|
|
1281
|
+
]
|
|
1282
|
+
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
|
1283
|
+
writer.writeheader()
|
|
1284
|
+
|
|
1285
|
+
all_groups = set(csv_group_read.keys()) | set(csv_group_write.keys())
|
|
1286
|
+
for group in sorted(all_groups):
|
|
1287
|
+
read_count = csv_group_read.get(group, 0)
|
|
1288
|
+
write_count = csv_group_write.get(group, 0)
|
|
1289
|
+
total_count = read_count + write_count
|
|
1290
|
+
read_pct = (read_count / total_count * 100) if total_count > 0 else 0
|
|
1291
|
+
write_pct = (write_count / total_count * 100) if total_count > 0 else 0
|
|
1292
|
+
|
|
1293
|
+
writer.writerow(
|
|
1294
|
+
{
|
|
1295
|
+
"group": group,
|
|
1296
|
+
"read_count": read_count,
|
|
1297
|
+
"write_count": write_count,
|
|
1298
|
+
"total_count": total_count,
|
|
1299
|
+
"read_percentage": round(read_pct, 2),
|
|
1300
|
+
"write_percentage": round(write_pct, 2),
|
|
1301
|
+
}
|
|
1302
|
+
)
|
|
1303
|
+
|
|
1304
|
+
logging.info(f"CommandStats group read/write breakdown saved to {csv_filename}")
|
|
@@ -46,12 +46,6 @@ def create_compare_arguments(parser):
|
|
|
46
46
|
default="",
|
|
47
47
|
help="specify a test (or a comma separated list of tests) to use for comparison. If none is specified by default will use all of them.",
|
|
48
48
|
)
|
|
49
|
-
parser.add_argument(
|
|
50
|
-
"--use-test-suites-folder",
|
|
51
|
-
action="store_true",
|
|
52
|
-
default=False,
|
|
53
|
-
help="Use test names from YAML files in test-suites folder instead of database",
|
|
54
|
-
)
|
|
55
49
|
parser.add_argument(
|
|
56
50
|
"--defaults_filename",
|
|
57
51
|
type=str,
|
|
@@ -399,8 +399,6 @@ def compare_command_logic(args, project_name, project_version):
|
|
|
399
399
|
args.regression_str,
|
|
400
400
|
args.improvement_str,
|
|
401
401
|
tests_with_config,
|
|
402
|
-
args.use_test_suites_folder,
|
|
403
|
-
testsuites_folder,
|
|
404
402
|
)
|
|
405
403
|
total_regressions = len(regressions_list)
|
|
406
404
|
total_improvements = len(improvements_list)
|
|
@@ -688,8 +686,6 @@ def compute_regression_table(
|
|
|
688
686
|
regression_str="REGRESSION",
|
|
689
687
|
improvement_str="IMPROVEMENT",
|
|
690
688
|
tests_with_config={},
|
|
691
|
-
use_test_suites_folder=False,
|
|
692
|
-
test_suites_folder=None,
|
|
693
689
|
):
|
|
694
690
|
START_TIME_NOW_UTC, _, _ = get_start_time_vars()
|
|
695
691
|
START_TIME_LAST_MONTH_UTC = START_TIME_NOW_UTC - datetime.timedelta(days=31)
|
|
@@ -750,10 +746,6 @@ def compute_regression_table(
|
|
|
750
746
|
if test != "":
|
|
751
747
|
test_names = test.split(",")
|
|
752
748
|
logging.info("Using test name {}".format(test_names))
|
|
753
|
-
elif use_test_suites_folder:
|
|
754
|
-
test_names = get_test_names_from_yaml_files(
|
|
755
|
-
test_suites_folder, tags_regex_string
|
|
756
|
-
)
|
|
757
749
|
else:
|
|
758
750
|
test_names = get_test_names_from_db(
|
|
759
751
|
rts, tags_regex_string, test_names, used_key
|
|
@@ -1628,31 +1620,6 @@ def get_test_names_from_db(rts, tags_regex_string, test_names, used_key):
|
|
|
1628
1620
|
return test_names
|
|
1629
1621
|
|
|
1630
1622
|
|
|
1631
|
-
def get_test_names_from_yaml_files(test_suites_folder, tags_regex_string):
|
|
1632
|
-
"""Get test names from YAML files in test-suites folder"""
|
|
1633
|
-
from redis_benchmarks_specification.__common__.runner import get_benchmark_specs
|
|
1634
|
-
|
|
1635
|
-
# Get all YAML files
|
|
1636
|
-
yaml_files = get_benchmark_specs(test_suites_folder, test="", test_regex=".*")
|
|
1637
|
-
|
|
1638
|
-
# Extract test names (remove path and .yml extension)
|
|
1639
|
-
test_names = []
|
|
1640
|
-
for yaml_file in yaml_files:
|
|
1641
|
-
test_name = os.path.basename(yaml_file).replace(".yml", "")
|
|
1642
|
-
# Apply regex filtering like database version
|
|
1643
|
-
match_obj = re.search(tags_regex_string, test_name)
|
|
1644
|
-
if match_obj is not None:
|
|
1645
|
-
test_names.append(test_name)
|
|
1646
|
-
|
|
1647
|
-
test_names.sort()
|
|
1648
|
-
logging.info(
|
|
1649
|
-
"Based on test-suites folder ({}) we have {} comparison points: {}".format(
|
|
1650
|
-
test_suites_folder, len(test_names), test_names
|
|
1651
|
-
)
|
|
1652
|
-
)
|
|
1653
|
-
return test_names
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
1623
|
def get_line(
|
|
1657
1624
|
baseline_v_str,
|
|
1658
1625
|
comparison_v_str,
|
|
@@ -15,7 +15,7 @@ def generate_standalone_redis_server_args(
|
|
|
15
15
|
redis_arguments="",
|
|
16
16
|
password=None,
|
|
17
17
|
):
|
|
18
|
-
added_params = ["port", "protected-mode", "dir", "requirepass"]
|
|
18
|
+
added_params = ["port", "protected-mode", "dir", "requirepass", "logfile"]
|
|
19
19
|
# start redis-server
|
|
20
20
|
command = [
|
|
21
21
|
binary,
|
|
@@ -31,6 +31,7 @@ def generate_standalone_redis_server_args(
|
|
|
31
31
|
logging.info("Redis server will be started with password authentication")
|
|
32
32
|
if dbdir != "":
|
|
33
33
|
command.extend(["--dir", dbdir])
|
|
34
|
+
command.extend(["--logfile", f"{dbdir}redis.log"])
|
|
34
35
|
if configuration_parameters is not None:
|
|
35
36
|
for parameter, parameter_value in configuration_parameters.items():
|
|
36
37
|
if parameter not in added_params:
|
|
@@ -107,6 +107,43 @@ from redis_benchmarks_specification.__self_contained_coordinator__.artifacts imp
|
|
|
107
107
|
from redis_benchmarks_specification.__self_contained_coordinator__.build_info import (
|
|
108
108
|
extract_build_info_from_streamdata,
|
|
109
109
|
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def print_directory_logs(directory_path, description=""):
|
|
113
|
+
"""Print all .log files in a directory for debugging purposes."""
|
|
114
|
+
if not os.path.exists(directory_path):
|
|
115
|
+
logging.warning(f"Directory {directory_path} does not exist")
|
|
116
|
+
return
|
|
117
|
+
|
|
118
|
+
logging.info(
|
|
119
|
+
f"Printing all .log files in {description} directory: {directory_path}"
|
|
120
|
+
)
|
|
121
|
+
try:
|
|
122
|
+
for root, dirs, files in os.walk(directory_path):
|
|
123
|
+
for file in files:
|
|
124
|
+
# Only process .log files
|
|
125
|
+
if not file.endswith(".log"):
|
|
126
|
+
continue
|
|
127
|
+
|
|
128
|
+
file_path = os.path.join(root, file)
|
|
129
|
+
logging.info(f"Found log file: {file_path}")
|
|
130
|
+
try:
|
|
131
|
+
# Try to read and print the log file content
|
|
132
|
+
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
133
|
+
content = f.read()
|
|
134
|
+
if content.strip(): # Only print non-empty files
|
|
135
|
+
logging.info(f"Content of {file_path}:")
|
|
136
|
+
logging.info("-" * 40)
|
|
137
|
+
logging.info(content)
|
|
138
|
+
logging.info("-" * 40)
|
|
139
|
+
else:
|
|
140
|
+
logging.info(f"Log file {file_path} is empty")
|
|
141
|
+
except Exception as e:
|
|
142
|
+
logging.warning(f"Could not read log file {file_path}: {e}")
|
|
143
|
+
except Exception as e:
|
|
144
|
+
logging.error(f"Error walking directory {directory_path}: {e}")
|
|
145
|
+
|
|
146
|
+
|
|
110
147
|
from redis_benchmarks_specification.__self_contained_coordinator__.cpuset import (
|
|
111
148
|
extract_db_cpu_limit,
|
|
112
149
|
generate_cpuset_cpus,
|
|
@@ -1319,6 +1356,18 @@ def process_self_contained_coordinator_stream(
|
|
|
1319
1356
|
|
|
1320
1357
|
print("-" * 60)
|
|
1321
1358
|
|
|
1359
|
+
# Print all log files in the temporary directories for debugging
|
|
1360
|
+
logging.critical(
|
|
1361
|
+
"Printing all files in temporary directories for debugging..."
|
|
1362
|
+
)
|
|
1363
|
+
try:
|
|
1364
|
+
print_directory_logs(temporary_dir, "Redis server")
|
|
1365
|
+
print_directory_logs(temporary_dir_client, "Client")
|
|
1366
|
+
except Exception as log_error:
|
|
1367
|
+
logging.error(
|
|
1368
|
+
f"Failed to print directory logs: {log_error}"
|
|
1369
|
+
)
|
|
1370
|
+
|
|
1322
1371
|
test_result = False
|
|
1323
1372
|
# tear-down
|
|
1324
1373
|
logging.info("Tearing down setup")
|
|
@@ -1349,14 +1398,27 @@ def process_self_contained_coordinator_stream(
|
|
|
1349
1398
|
)
|
|
1350
1399
|
)
|
|
1351
1400
|
pass
|
|
1352
|
-
logging.info(
|
|
1353
|
-
"Removing temporary dirs {} and {}".format(
|
|
1354
|
-
temporary_dir, temporary_dir_client
|
|
1355
|
-
)
|
|
1356
|
-
)
|
|
1357
1401
|
|
|
1358
|
-
|
|
1359
|
-
|
|
1402
|
+
# Only remove temporary directories if test passed
|
|
1403
|
+
if test_result:
|
|
1404
|
+
logging.info(
|
|
1405
|
+
"Test passed. Removing temporary dirs {} and {}".format(
|
|
1406
|
+
temporary_dir, temporary_dir_client
|
|
1407
|
+
)
|
|
1408
|
+
)
|
|
1409
|
+
shutil.rmtree(temporary_dir, ignore_errors=True)
|
|
1410
|
+
shutil.rmtree(
|
|
1411
|
+
temporary_dir_client, ignore_errors=True
|
|
1412
|
+
)
|
|
1413
|
+
else:
|
|
1414
|
+
logging.warning(
|
|
1415
|
+
"Test failed. Preserving temporary dirs for debugging: {} and {}".format(
|
|
1416
|
+
temporary_dir, temporary_dir_client
|
|
1417
|
+
)
|
|
1418
|
+
)
|
|
1419
|
+
# Print all log files in the temporary directories for debugging
|
|
1420
|
+
print_directory_logs(temporary_dir, "Redis server")
|
|
1421
|
+
print_directory_logs(temporary_dir_client, "Client")
|
|
1360
1422
|
|
|
1361
1423
|
overall_result &= test_result
|
|
1362
1424
|
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
ersion: 0.4
|
|
2
|
+
name: memtier_benchmark-10Kkeys-load-list-rpush-bulkload-pipeline-50
|
|
3
|
+
description: |
|
|
4
|
+
Runs memtier_benchmark to stress multi-element RPUSH on a single LIST key using
|
|
5
|
+
high pipelining and concurrency. This targets quicklist multi-insert behavior and
|
|
6
|
+
bulk argument parsing performance (context: Redis PR #13860).
|
|
7
|
+
dbconfig:
|
|
8
|
+
configuration-parameters:
|
|
9
|
+
save: '""'
|
|
10
|
+
check:
|
|
11
|
+
keyspacelen: 0
|
|
12
|
+
resources:
|
|
13
|
+
requests:
|
|
14
|
+
memory: 1g
|
|
15
|
+
tested-groups:
|
|
16
|
+
- list
|
|
17
|
+
tested-commands:
|
|
18
|
+
- rpush
|
|
19
|
+
redis-topologies:
|
|
20
|
+
- oss-standalone
|
|
21
|
+
build-variants:
|
|
22
|
+
- gcc:15.2.0-amd64-debian-bookworm-default
|
|
23
|
+
- gcc:15.2.0-arm64-debian-bookworm-default
|
|
24
|
+
- dockerhub
|
|
25
|
+
clientconfig:
|
|
26
|
+
run_image: redislabs/memtier_benchmark:edge
|
|
27
|
+
tool: memtier_benchmark
|
|
28
|
+
arguments: >-
|
|
29
|
+
--pipeline 50
|
|
30
|
+
--command "RPUSH __key__ a b c d e f g h i j k l m n o p q r s t u v w x y z
|
|
31
|
+
aa bb cc dd ee ff gg hh ii jj kk ll mm nn oo pp qq rr ss tt uu vv ww xx yy zz
|
|
32
|
+
ab bc cd de ef fg gh hi ij jk kl lm mn no op pq qr rs st tu uv vw wx xy yz za
|
|
33
|
+
ac bd ce df eg fh gi hj ik jl km ln mo np oq pr qs rt su tv uw vx" --distinct-client-seed
|
|
34
|
+
--test-time 120 -c 50 -t 4 --hide-histogram --key-minimum=1 --key-maximum 10000
|
|
35
|
+
resources:
|
|
36
|
+
requests:
|
|
37
|
+
cpus: '4'
|
|
38
|
+
memory: 2g
|
|
39
|
+
priority: 39
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: redis-benchmarks-specification
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.323
|
|
4
4
|
Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
|
|
5
5
|
Author: filipecosta90
|
|
6
6
|
Author-email: filipecosta.90@gmail.com
|
|
@@ -4,12 +4,12 @@ redis_benchmarks_specification/__api__/api.py,sha256=k_CMICtMm1z8jY3hByaL0hIr_5v
|
|
|
4
4
|
redis_benchmarks_specification/__api__/app.py,sha256=JzQm84DjIVdfLbDO423BJbrds6gFzMbA0syRkHE_aUU,7063
|
|
5
5
|
redis_benchmarks_specification/__builder__/Readme.md,sha256=O6MV_J3OSgzW-ir2TbukP8Vhkm_LOzQJJndG1Cykqic,111
|
|
6
6
|
redis_benchmarks_specification/__builder__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
7
|
-
redis_benchmarks_specification/__builder__/builder.py,sha256=
|
|
7
|
+
redis_benchmarks_specification/__builder__/builder.py,sha256=86DQuqf9LhPl1_bpmQK2rkACBxYBz13Wu8fsAnKkm7g,29730
|
|
8
8
|
redis_benchmarks_specification/__builder__/schema.py,sha256=1wcmyVJBcWrBvK58pghN9NCoWLCO3BzPsmdKWYfkVog,584
|
|
9
9
|
redis_benchmarks_specification/__cli__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
10
|
-
redis_benchmarks_specification/__cli__/args.py,sha256=
|
|
11
|
-
redis_benchmarks_specification/__cli__/cli.py,sha256=
|
|
12
|
-
redis_benchmarks_specification/__cli__/stats.py,sha256=
|
|
10
|
+
redis_benchmarks_specification/__cli__/args.py,sha256=C0EdJbq5F6Td6kvEkzN5ZWMhWYuizV_tGzVhkPLKEi0,7207
|
|
11
|
+
redis_benchmarks_specification/__cli__/cli.py,sha256=iTjINQ-RV_q2ovq1neSoRCAggpGdeP5mX3_1aFxSScY,22001
|
|
12
|
+
redis_benchmarks_specification/__cli__/stats.py,sha256=r9JIfwGCSR3maozYbDZfZrkthNFQSs0xIymS86yZ6Iw,55574
|
|
13
13
|
redis_benchmarks_specification/__common__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
14
14
|
redis_benchmarks_specification/__common__/builder_schema.py,sha256=kfDpRIk7NkJrb5qj9jzsBhLVNO7K_W2Clumj4pxrkG8,5938
|
|
15
15
|
redis_benchmarks_specification/__common__/env.py,sha256=kvJ8Ll-fvI_Tc0vynrzUEr22TqnJizzvJ4Lu9RjNr_M,3119
|
|
@@ -19,8 +19,8 @@ redis_benchmarks_specification/__common__/runner.py,sha256=M-o1QZVlp3thFW-55PiaW
|
|
|
19
19
|
redis_benchmarks_specification/__common__/spec.py,sha256=D_SN48wg6NMthW_-OS1H5bydSDiuZpfd4WPPj7Vfwmc,5760
|
|
20
20
|
redis_benchmarks_specification/__common__/timeseries.py,sha256=uvS3T2zdrSmW_B2S0MYTekJfHUllqU3RlD0LrF957RQ,52904
|
|
21
21
|
redis_benchmarks_specification/__compare__/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
|
|
22
|
-
redis_benchmarks_specification/__compare__/args.py,sha256
|
|
23
|
-
redis_benchmarks_specification/__compare__/compare.py,sha256=
|
|
22
|
+
redis_benchmarks_specification/__compare__/args.py,sha256=f3ZSs8GzyIzaMzX2h9cx0nOrnlO4aXToO1SBzBlpzKM,7608
|
|
23
|
+
redis_benchmarks_specification/__compare__/compare.py,sha256=OrpCpY66rlbP5so6aYCdSF9Sy3sdhKrnzVJK1u3XQno,62912
|
|
24
24
|
redis_benchmarks_specification/__init__.py,sha256=YQIEx2sLPPA0JR9OuCuMNMNtm-f_gqDKgzvNJnkGNKY,491
|
|
25
25
|
redis_benchmarks_specification/__runner__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
26
26
|
redis_benchmarks_specification/__runner__/args.py,sha256=-el2RttOjjc4Y9yOM1P5y9BwIkBPp_Y1k7OsP91P2BI,10651
|
|
@@ -32,11 +32,11 @@ redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha25
|
|
|
32
32
|
redis_benchmarks_specification/__self_contained_coordinator__/build_info.py,sha256=vlg8H8Rxu2falW8xp1GvL1SV1fyBguSbz6Apxc7A2yM,2282
|
|
33
33
|
redis_benchmarks_specification/__self_contained_coordinator__/clients.py,sha256=EL1V4-i-tTav1mcF_CUosqPF3Q1qi9BZL0zFajEk70c,1878
|
|
34
34
|
redis_benchmarks_specification/__self_contained_coordinator__/cpuset.py,sha256=sRvtoJIitppcOpm3R5LbVmSfPEAqPumOqVATnF5Wbek,594
|
|
35
|
-
redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=
|
|
35
|
+
redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=09SyAfqlzs1KG9ZAajClNWtiNk4Jqzd--4-m3n1rLjU,3156
|
|
36
36
|
redis_benchmarks_specification/__self_contained_coordinator__/post_processing.py,sha256=sVLKNnWdAqYY9DjVdqRC5tDaIrVSaI3Ca7w8-DQ-LRM,776
|
|
37
37
|
redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=1UeFr2T1ZQBcHCSd4W1ZtaWgXyFPfjLyDi_DgDc1eTA,2957
|
|
38
38
|
redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=noRHn9leTfEm2fa1yHBHQd8TUGhFDoU86QQkHABnWSs,30073
|
|
39
|
-
redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=
|
|
39
|
+
redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=QcQwPWvhS5C96NNkZFzi0xwMzwLzl6kxYmS1sWTU7s0,82532
|
|
40
40
|
redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
41
41
|
redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
|
|
42
42
|
redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
|
|
@@ -59,6 +59,7 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-
|
|
|
59
59
|
redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-100B-values.yml,sha256=QcXCcTdWyaNW5M0qHNC7nUmevzCxwuQVoOMbFU0DmkE,1849
|
|
60
60
|
redis_benchmarks_specification/test-suites/memtier_benchmark-100Kkeys-load-hash-50-fields-with-10B-values.yml,sha256=WIdn1sGgeeYsCC9FMggHBxSsR_2fQR_q5hpPTs0iXw0,1842
|
|
61
61
|
redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-hash-50-fields-with-10000B-values.yml,sha256=QVVn2S5wtCvjHnBuSTU3U1S9nCTvA0HMHnwoKCX4Quo,1852
|
|
62
|
+
redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-rpush-bulkload-pipeline-50.yml,sha256=Grvp5OgJ9dMpB8kTKZ6PBjWnPR6tcu9_AvQHV5tyR9o,1262
|
|
62
63
|
redis_benchmarks_specification/test-suites/memtier_benchmark-10Kkeys-load-list-with-10B-values-pipeline-50.yml,sha256=MOvXrPrkdkmIyFaFaEBQbl5UlFSaS4VoQIjl3Ue-Yl4,894
|
|
63
64
|
redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values-pipeline-10.yml,sha256=yQDHjk7C1wxxGgY3wiw0ebtX19VeQYAZ6U9nZj7jii0,1003
|
|
64
65
|
redis_benchmarks_specification/test-suites/memtier_benchmark-10Mkeys-load-hash-5-fields-with-100B-values.yml,sha256=r_8UlvEcYm49UND5-Kt4EY7-0XOeH-DtmhvJNiGVu5Q,969
|
|
@@ -278,8 +279,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-st
|
|
|
278
279
|
redis_benchmarks_specification/test-suites/memtier_benchmark-playbook-session-storage-1k-sessions.yml,sha256=2egtIxPxCze2jlbAfgsk4v9JSQHNMoPLbDWFEW8olDg,7006
|
|
279
280
|
redis_benchmarks_specification/test-suites/template.txt,sha256=ezqGiRPOvuSDO0iG7GEf-AGXNfHbgXI89_G0RUEzL88,481
|
|
280
281
|
redis_benchmarks_specification/vector-search-test-suites/vector_db_benchmark_test.yml,sha256=PD7ow-k4Ll2BkhEC3aIqiaCZt8Hc4aJIp96Lw3J3mcI,791
|
|
281
|
-
redis_benchmarks_specification-0.1.
|
|
282
|
-
redis_benchmarks_specification-0.1.
|
|
283
|
-
redis_benchmarks_specification-0.1.
|
|
284
|
-
redis_benchmarks_specification-0.1.
|
|
285
|
-
redis_benchmarks_specification-0.1.
|
|
282
|
+
redis_benchmarks_specification-0.1.323.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
283
|
+
redis_benchmarks_specification-0.1.323.dist-info/METADATA,sha256=KIfE50uAlLrrLWpEoihY9p6EoCqobK4Txg7ZfUSTcHE,22726
|
|
284
|
+
redis_benchmarks_specification-0.1.323.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
285
|
+
redis_benchmarks_specification-0.1.323.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
|
|
286
|
+
redis_benchmarks_specification-0.1.323.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|