redis-benchmarks-specification 0.1.94__py3-none-any.whl → 0.1.96__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of redis-benchmarks-specification might be problematic. Click here for more details.

@@ -2,6 +2,17 @@ import logging
2
2
  from github import Github
3
3
 
4
4
 
5
+ def check_regression_comment(comments):
6
+ res = False
7
+ pos = -1
8
+ for n, comment in enumerate(comments):
9
+ body = comment.body
10
+ if "Comparison between" in body and "Time Period from" in body:
11
+ res = True
12
+ pos = n
13
+ return res, pos
14
+
15
+
5
16
  def generate_build_started_pr_comment(
6
17
  build_datetime,
7
18
  commit_datetime,
@@ -21,6 +32,8 @@ def generate_build_started_pr_comment(
21
32
  )
22
33
  comment_body += f"Started building at {build_datetime}\n"
23
34
  comment_body += "You can check each build/benchmark progress in grafana:\n"
35
+ if not isinstance(git_hash, str):
36
+ git_hash = git_hash.decode()
24
37
  comment_body += f" - git hash: {git_hash}\n"
25
38
  comment_body += f" - git branch: {git_branch}\n"
26
39
  comment_body += f" - commit date and time: {commit_datetime}\n"
@@ -57,6 +70,8 @@ def generate_build_finished_pr_comment(
57
70
  )
58
71
  comment_body += f"Started building at {build_start_datetime} and took {build_duration_seconds} seconds.\n"
59
72
  comment_body += "You can check each build/benchmark progress in grafana:\n"
73
+ if not isinstance(git_hash, str):
74
+ git_hash = git_hash.decode()
60
75
  comment_body += f" - git hash: {git_hash}\n"
61
76
  comment_body += f" - git branch: {git_branch}\n"
62
77
  comment_body += f" - commit date and time: {commit_datetime}\n"
@@ -237,7 +252,7 @@ def generate_benchmark_started_pr_comment(
237
252
  )
238
253
 
239
254
  comment_body += f"Started benchmark suite at {benchmark_suite_start_datetime} and took {benchmark_suite_duration_secs} seconds "
240
- if total_pending == 0:
255
+ if total_pending > 0:
241
256
  comment_body += "up until now.\n"
242
257
  else:
243
258
  comment_body += "to finish.\n"
@@ -20,6 +20,7 @@ from redis_benchmarks_specification.__common__.github import (
20
20
  update_comment_if_needed,
21
21
  create_new_pr_comment,
22
22
  check_github_available_and_actionable,
23
+ check_regression_comment,
23
24
  )
24
25
  from redis_benchmarks_specification.__compare__.args import create_compare_arguments
25
26
 
@@ -165,45 +166,9 @@ def compare_command_logic(args, project_name, project_version):
165
166
  username=args.redistimeseries_user,
166
167
  )
167
168
  rts.ping()
168
- default_baseline_branch = None
169
- default_metrics_str = ""
170
- if args.defaults_filename != "" and os.path.exists(args.defaults_filename):
171
- logging.info(
172
- "Loading configuration from defaults file: {}".format(
173
- args.defaults_filename
174
- )
175
- )
176
- with open(args.defaults_filename) as yaml_fd:
177
- defaults_dict = yaml.safe_load(yaml_fd)
178
- if "exporter" in defaults_dict:
179
- exporter_dict = defaults_dict["exporter"]
180
- if "comparison" in exporter_dict:
181
- comparison_dict = exporter_dict["comparison"]
182
- if "metrics" in comparison_dict:
183
- metrics = comparison_dict["metrics"]
184
- logging.info("Detected defaults metrics info. reading metrics")
185
- default_metrics = []
186
-
187
- for metric in metrics:
188
- if metric.startswith("$."):
189
- metric = metric[2:]
190
- logging.info("Will use metric: {}".format(metric))
191
- default_metrics.append(metric)
192
- if len(default_metrics) == 1:
193
- default_metrics_str = default_metrics[0]
194
- if len(default_metrics) > 1:
195
- default_metrics_str = "({})".format(
196
- ",".join(default_metrics)
197
- )
198
- logging.info("Default metrics: {}".format(default_metrics_str))
199
-
200
- if "baseline-branch" in comparison_dict:
201
- default_baseline_branch = comparison_dict["baseline-branch"]
202
- logging.info(
203
- "Detected baseline branch in defaults file. {}".format(
204
- default_baseline_branch
205
- )
206
- )
169
+ default_baseline_branch, default_metrics_str = extract_default_branch_and_metric(
170
+ args.defaults_filename
171
+ )
207
172
 
208
173
  tf_github_org = args.github_org
209
174
  tf_github_repo = args.github_repo
@@ -300,25 +265,7 @@ def compare_command_logic(args, project_name, project_version):
300
265
  ) = check_github_available_and_actionable(
301
266
  fn, github_token, pull_request, tf_github_org, tf_github_repo, verbose
302
267
  )
303
-
304
- grafana_dashboards_uids = {
305
- "redisgraph": "SH9_rQYGz",
306
- "redisbloom": "q4-5sRR7k",
307
- "redisearch": "3Ejv2wZnk",
308
- "redisjson": "UErSC0jGk",
309
- "redistimeseries": "2WMw61UGz",
310
- }
311
- uid = None
312
- if tf_github_repo.lower() in grafana_dashboards_uids:
313
- uid = grafana_dashboards_uids[tf_github_repo.lower()]
314
- grafana_link_base = None
315
- if uid is not None:
316
- grafana_link_base = "{}/{}".format(grafana_base_dashboard, uid)
317
- logging.info(
318
- "There is a grafana dashboard for this repo. Base link: {}".format(
319
- grafana_link_base
320
- )
321
- )
268
+ grafana_link_base = "https://benchmarksredisio.grafana.net/d/1fWbtb7nz/experimental-oss-spec-benchmarks"
322
269
 
323
270
  (
324
271
  detected_regressions,
@@ -358,7 +305,71 @@ def compare_command_logic(args, project_name, project_version):
358
305
  use_metric_context_path,
359
306
  running_platform,
360
307
  )
361
- comment_body = ""
308
+ prepare_regression_comment(
309
+ auto_approve,
310
+ baseline_branch,
311
+ baseline_tag,
312
+ comparison_branch,
313
+ comparison_tag,
314
+ contains_regression_comment,
315
+ github_pr,
316
+ grafana_link_base,
317
+ is_actionable_pr,
318
+ old_regression_comment_body,
319
+ pr_link,
320
+ regression_comment,
321
+ rts,
322
+ running_platform,
323
+ table_output,
324
+ tf_github_org,
325
+ tf_github_repo,
326
+ tf_triggering_env,
327
+ total_comparison_points,
328
+ total_improvements,
329
+ total_regressions,
330
+ total_stable,
331
+ total_unstable,
332
+ verbose,
333
+ args.regressions_percent_lower_limit,
334
+ )
335
+ return (
336
+ detected_regressions,
337
+ "",
338
+ total_improvements,
339
+ total_regressions,
340
+ total_stable,
341
+ total_unstable,
342
+ total_comparison_points,
343
+ )
344
+
345
+
346
+ def prepare_regression_comment(
347
+ auto_approve,
348
+ baseline_branch,
349
+ baseline_tag,
350
+ comparison_branch,
351
+ comparison_tag,
352
+ contains_regression_comment,
353
+ github_pr,
354
+ grafana_link_base,
355
+ is_actionable_pr,
356
+ old_regression_comment_body,
357
+ pr_link,
358
+ regression_comment,
359
+ rts,
360
+ running_platform,
361
+ table_output,
362
+ tf_github_org,
363
+ tf_github_repo,
364
+ tf_triggering_env,
365
+ total_comparison_points,
366
+ total_improvements,
367
+ total_regressions,
368
+ total_stable,
369
+ total_unstable,
370
+ verbose,
371
+ regressions_percent_lower_limit,
372
+ ):
362
373
  if total_comparison_points > 0:
363
374
  comment_body = "### Automated performance analysis summary\n\n"
364
375
  comment_body += "This comment was automatically generated given there is performance data available.\n\n"
@@ -386,7 +397,7 @@ def compare_command_logic(args, project_name, project_version):
386
397
  )
387
398
  if total_regressions > 0:
388
399
  comparison_summary += "- Detected a total of {} regressions bellow the regression water line {}.\n".format(
389
- total_regressions, args.regressions_percent_lower_limit
400
+ total_regressions, regressions_percent_lower_limit
390
401
  )
391
402
 
392
403
  comment_body += comparison_summary
@@ -430,19 +441,6 @@ def compare_command_logic(args, project_name, project_version):
430
441
  zset_project_pull_request, comparison_branch, res
431
442
  )
432
443
  )
433
- user_input = "n"
434
- html_url = "n/a"
435
- (
436
- baseline_str,
437
- by_str_baseline,
438
- comparison_str,
439
- by_str_comparison,
440
- ) = get_by_strings(
441
- baseline_branch,
442
- comparison_branch,
443
- baseline_tag,
444
- comparison_tag,
445
- )
446
444
 
447
445
  if contains_regression_comment:
448
446
  update_comment_if_needed(
@@ -457,26 +455,47 @@ def compare_command_logic(args, project_name, project_version):
457
455
 
458
456
  else:
459
457
  logging.error("There was no comparison points to produce a table...")
460
- return (
461
- detected_regressions,
462
- comment_body,
463
- total_improvements,
464
- total_regressions,
465
- total_stable,
466
- total_unstable,
467
- total_comparison_points,
468
- )
469
458
 
470
459
 
471
- def check_regression_comment(comments):
472
- res = False
473
- pos = -1
474
- for n, comment in enumerate(comments):
475
- body = comment.body
476
- if "Comparison between" in body and "Time Period from" in body:
477
- res = True
478
- pos = n
479
- return res, pos
460
+ def extract_default_branch_and_metric(defaults_filename):
461
+ default_baseline_branch = None
462
+ default_metrics_str = ""
463
+ if defaults_filename != "" and os.path.exists(defaults_filename):
464
+ logging.info(
465
+ "Loading configuration from defaults file: {}".format(defaults_filename)
466
+ )
467
+ with open(defaults_filename) as yaml_fd:
468
+ defaults_dict = yaml.safe_load(yaml_fd)
469
+ if "exporter" in defaults_dict:
470
+ exporter_dict = defaults_dict["exporter"]
471
+ if "comparison" in exporter_dict:
472
+ comparison_dict = exporter_dict["comparison"]
473
+ if "metrics" in comparison_dict:
474
+ metrics = comparison_dict["metrics"]
475
+ logging.info("Detected defaults metrics info. reading metrics")
476
+ default_metrics = []
477
+
478
+ for metric in metrics:
479
+ if metric.startswith("$."):
480
+ metric = metric[2:]
481
+ logging.info("Will use metric: {}".format(metric))
482
+ default_metrics.append(metric)
483
+ if len(default_metrics) == 1:
484
+ default_metrics_str = default_metrics[0]
485
+ if len(default_metrics) > 1:
486
+ default_metrics_str = "({})".format(
487
+ ",".join(default_metrics)
488
+ )
489
+ logging.info("Default metrics: {}".format(default_metrics_str))
490
+
491
+ if "baseline-branch" in comparison_dict:
492
+ default_baseline_branch = comparison_dict["baseline-branch"]
493
+ logging.info(
494
+ "Detected baseline branch in defaults file. {}".format(
495
+ default_baseline_branch
496
+ )
497
+ )
498
+ return default_baseline_branch, default_metrics_str
480
499
 
481
500
 
482
501
  def compute_regression_table(
@@ -1,4 +1,5 @@
1
1
  import argparse
2
+ import datetime
2
3
  import os
3
4
  from redis_benchmarks_specification.__common__.env import (
4
5
  MACHINE_CPU_COUNT,
@@ -19,6 +20,10 @@ from redis_benchmarks_specification.__common__.env import (
19
20
  PROFILERS_DEFAULT,
20
21
  ALLOWED_PROFILERS,
21
22
  )
23
+ from redis_benchmarks_specification.__compare__.args import (
24
+ START_TIME_NOW_UTC,
25
+ START_TIME_LAST_SIX_MONTHS_UTC,
26
+ )
22
27
 
23
28
  PERFORMANCE_GH_TOKEN = os.getenv("PERFORMANCE_GH_TOKEN", None)
24
29
 
@@ -31,6 +31,7 @@ from redis_benchmarks_specification.__common__.github import (
31
31
  update_comment_if_needed,
32
32
  create_new_pr_comment,
33
33
  generate_benchmark_started_pr_comment,
34
+ check_regression_comment,
34
35
  )
35
36
  from redis_benchmarks_specification.__common__.package import (
36
37
  get_version_string,
@@ -42,6 +43,11 @@ from redis_benchmarks_specification.__common__.runner import (
42
43
  exporter_datasink_common,
43
44
  execute_init_commands,
44
45
  )
46
+ from redis_benchmarks_specification.__compare__.compare import (
47
+ compute_regression_table,
48
+ prepare_regression_comment,
49
+ extract_default_branch_and_metric,
50
+ )
45
51
  from redis_benchmarks_specification.__runner__.runner import (
46
52
  print_results_table_stdout,
47
53
  )
@@ -145,7 +151,7 @@ def main():
145
151
  )
146
152
  )
147
153
  try:
148
- conn = redis.StrictRedis(
154
+ gh_event_conn = redis.StrictRedis(
149
155
  host=args.event_stream_host,
150
156
  port=args.event_stream_port,
151
157
  decode_responses=False, # dont decode due to binary archives
@@ -155,7 +161,7 @@ def main():
155
161
  socket_connect_timeout=REDIS_SOCKET_TIMEOUT,
156
162
  socket_keepalive=True,
157
163
  )
158
- conn.ping()
164
+ gh_event_conn.ping()
159
165
  except redis.exceptions.ConnectionError as e:
160
166
  logging.error(
161
167
  "Unable to connect to redis available at: {}:{} to read the event streams".format(
@@ -195,7 +201,7 @@ def main():
195
201
 
196
202
  logging.info("checking build spec requirements")
197
203
  running_platform = args.platform_name
198
- build_runners_consumer_group_create(conn, running_platform)
204
+ build_runners_consumer_group_create(gh_event_conn, running_platform)
199
205
  stream_id = None
200
206
  docker_client = docker.from_env()
201
207
  home = str(Path.home())
@@ -211,6 +217,10 @@ def main():
211
217
  f"Using priority for test filters [{priority_lower_limit},{priority_upper_limit}]"
212
218
  )
213
219
 
220
+ default_baseline_branch, default_metrics_str = extract_default_branch_and_metric(
221
+ args.defaults_filename
222
+ )
223
+
214
224
  # TODO: confirm we do have enough cores to run the spec
215
225
  # availabe_cpus = args.cpu_count
216
226
  datasink_push_results_redistimeseries = args.datasink_push_results_redistimeseries
@@ -270,7 +280,7 @@ def main():
270
280
  stream_id = args.consumer_start_id
271
281
  while True:
272
282
  _, stream_id, _, _ = self_contained_coordinator_blocking_read(
273
- conn,
283
+ gh_event_conn,
274
284
  datasink_push_results_redistimeseries,
275
285
  docker_client,
276
286
  home,
@@ -292,11 +302,13 @@ def main():
292
302
  github_token,
293
303
  priority_lower_limit,
294
304
  priority_upper_limit,
305
+ default_baseline_branch,
306
+ default_metrics_str,
295
307
  )
296
308
 
297
309
 
298
310
  def self_contained_coordinator_blocking_read(
299
- conn,
311
+ github_event_conn,
300
312
  datasink_push_results_redistimeseries,
301
313
  docker_client,
302
314
  home,
@@ -318,6 +330,8 @@ def self_contained_coordinator_blocking_read(
318
330
  github_token=None,
319
331
  priority_lower_limit=0,
320
332
  priority_upper_limit=10000,
333
+ default_baseline_branch="unstable",
334
+ default_metrics_str="ALL_STATS.Totals.Ops/sec",
321
335
  ):
322
336
  num_process_streams = 0
323
337
  num_process_test_suites = 0
@@ -330,7 +344,7 @@ def self_contained_coordinator_blocking_read(
330
344
  get_runners_consumer_group_name(platform_name), consumer_name
331
345
  )
332
346
  )
333
- newTestInfo = conn.xreadgroup(
347
+ newTestInfo = github_event_conn.xreadgroup(
334
348
  get_runners_consumer_group_name(platform_name),
335
349
  consumer_name,
336
350
  {STREAM_KEYNAME_NEW_BUILD_EVENTS: stream_id},
@@ -345,7 +359,7 @@ def self_contained_coordinator_blocking_read(
345
359
  overall_result,
346
360
  total_test_suite_runs,
347
361
  ) = process_self_contained_coordinator_stream(
348
- conn,
362
+ github_event_conn,
349
363
  datasink_push_results_redistimeseries,
350
364
  docker_client,
351
365
  home,
@@ -367,11 +381,13 @@ def self_contained_coordinator_blocking_read(
367
381
  github_token,
368
382
  priority_lower_limit,
369
383
  priority_upper_limit,
384
+ default_baseline_branch,
385
+ default_metrics_str,
370
386
  )
371
387
  num_process_streams = num_process_streams + 1
372
388
  num_process_test_suites = num_process_test_suites + total_test_suite_runs
373
389
  if overall_result is True:
374
- ack_reply = conn.xack(
390
+ ack_reply = github_event_conn.xack(
375
391
  STREAM_KEYNAME_NEW_BUILD_EVENTS,
376
392
  get_runners_consumer_group_name(platform_name),
377
393
  stream_id,
@@ -420,7 +436,7 @@ def prepare_memtier_benchmark_parameters(
420
436
 
421
437
 
422
438
  def process_self_contained_coordinator_stream(
423
- conn,
439
+ github_event_conn,
424
440
  datasink_push_results_redistimeseries,
425
441
  docker_client,
426
442
  home,
@@ -442,15 +458,17 @@ def process_self_contained_coordinator_stream(
442
458
  github_token=None,
443
459
  priority_lower_limit=0,
444
460
  priority_upper_limit=10000,
461
+ default_baseline_branch="unstable",
462
+ default_metrics_str="ALL_STATS.Totals.Ops/sec",
445
463
  ):
446
464
  stream_id = "n/a"
447
465
  overall_result = False
448
466
  total_test_suite_runs = 0
449
467
  # github updates
450
468
  is_actionable_pr = False
451
- contains_regression_comment = False
469
+ contains_benchmark_run_comment = False
452
470
  github_pr = None
453
- old_regression_comment_body = ""
471
+ old_benchmark_run_comment_body = ""
454
472
  pr_link = ""
455
473
  regression_comment = None
456
474
  pull_request = None
@@ -500,12 +518,12 @@ def process_self_contained_coordinator_stream(
500
518
  verbose = True
501
519
  fn = check_benchmark_running_comment
502
520
  (
503
- contains_regression_comment,
521
+ contains_benchmark_run_comment,
504
522
  github_pr,
505
523
  is_actionable_pr,
506
- old_regression_comment_body,
524
+ old_benchmark_run_comment_body,
507
525
  pr_link,
508
- regression_comment,
526
+ benchmark_run_comment,
509
527
  ) = check_github_available_and_actionable(
510
528
  fn, github_token, pull_request, "redis", "redis", verbose
511
529
  )
@@ -546,13 +564,13 @@ def process_self_contained_coordinator_stream(
546
564
  run_image, airgap_key
547
565
  )
548
566
  )
549
- airgap_docker_image_bin = conn.get(airgap_key)
567
+ airgap_docker_image_bin = github_event_conn.get(airgap_key)
550
568
  images_loaded = docker_client.images.load(airgap_docker_image_bin)
551
569
  logging.info("Successfully loaded images {}".format(images_loaded))
552
570
 
553
571
  stream_time_ms = stream_id.split("-")[0]
554
572
  zset_running_platform_benchmarks = f"ci.benchmarks.redis/ci/redis/redis:benchmarks:{running_platform}:zset"
555
- res = conn.zadd(
573
+ res = github_event_conn.zadd(
556
574
  zset_running_platform_benchmarks,
557
575
  {stream_id: stream_time_ms},
558
576
  )
@@ -580,8 +598,10 @@ def process_self_contained_coordinator_stream(
580
598
  benchmark_config,
581
599
  test_name,
582
600
  ) = get_final_benchmark_config(None, stream, "")
583
- conn.lpush(stream_test_list_pending, test_name)
584
- conn.expire(stream_test_list_pending, REDIS_BINS_EXPIRE_SECS)
601
+ github_event_conn.lpush(stream_test_list_pending, test_name)
602
+ github_event_conn.expire(
603
+ stream_test_list_pending, REDIS_BINS_EXPIRE_SECS
604
+ )
585
605
  logging.info(
586
606
  f"Added test named {test_name} to the pending test list in key {stream_test_list_pending}"
587
607
  )
@@ -599,16 +619,16 @@ def process_self_contained_coordinator_stream(
599
619
  )
600
620
  # update on github if needed
601
621
  if is_actionable_pr:
602
- if contains_regression_comment:
622
+ if contains_benchmark_run_comment:
603
623
  update_comment_if_needed(
604
624
  auto_approve_github,
605
625
  comment_body,
606
- old_regression_comment_body,
607
- regression_comment,
626
+ old_benchmark_run_comment_body,
627
+ benchmark_run_comment,
608
628
  verbose,
609
629
  )
610
630
  else:
611
- regression_comment = create_new_pr_comment(
631
+ benchmark_run_comment = create_new_pr_comment(
612
632
  auto_approve_github, comment_body, github_pr, pr_link
613
633
  )
614
634
 
@@ -621,9 +641,11 @@ def process_self_contained_coordinator_stream(
621
641
  benchmark_config,
622
642
  test_name,
623
643
  ) = get_final_benchmark_config(None, stream, "")
624
- conn.lrem(stream_test_list_pending, 1, test_name)
625
- conn.lpush(stream_test_list_running, test_name)
626
- conn.expire(stream_test_list_running, REDIS_BINS_EXPIRE_SECS)
644
+ github_event_conn.lrem(stream_test_list_pending, 1, test_name)
645
+ github_event_conn.lpush(stream_test_list_running, test_name)
646
+ github_event_conn.expire(
647
+ stream_test_list_running, REDIS_BINS_EXPIRE_SECS
648
+ )
627
649
  logging.info(
628
650
  f"Added test named {test_name} to the pending test list in key {stream_test_list_running}"
629
651
  )
@@ -703,7 +725,10 @@ def process_self_contained_coordinator_stream(
703
725
  )
704
726
 
705
727
  restore_build_artifacts_from_test_details(
706
- build_artifacts, conn, temporary_dir, testDetails
728
+ build_artifacts,
729
+ github_event_conn,
730
+ temporary_dir,
731
+ testDetails,
707
732
  )
708
733
  mnt_point = "/mnt/redis/"
709
734
  command = generate_standalone_redis_server_args(
@@ -915,7 +940,7 @@ def process_self_contained_coordinator_stream(
915
940
  tf_github_repo,
916
941
  git_hash,
917
942
  overall_tabular_data_map,
918
- conn,
943
+ github_event_conn,
919
944
  setup_name,
920
945
  start_time_ms,
921
946
  start_time_str,
@@ -1143,11 +1168,13 @@ def process_self_contained_coordinator_stream(
1143
1168
 
1144
1169
  overall_result &= test_result
1145
1170
 
1146
- conn.lrem(stream_test_list_running, 1, test_name)
1147
- conn.lpush(stream_test_list_completed, test_name)
1148
- conn.expire(stream_test_list_completed, REDIS_BINS_EXPIRE_SECS)
1171
+ github_event_conn.lrem(stream_test_list_running, 1, test_name)
1172
+ github_event_conn.lpush(stream_test_list_completed, test_name)
1173
+ github_event_conn.expire(
1174
+ stream_test_list_completed, REDIS_BINS_EXPIRE_SECS
1175
+ )
1149
1176
  if test_result is False:
1150
- conn.lpush(stream_test_list_failed, test_name)
1177
+ github_event_conn.lpush(stream_test_list_failed, test_name)
1151
1178
  failed_tests = failed_tests + 1
1152
1179
  logging.warning(
1153
1180
  f"updating key {stream_test_list_failed} with the failed test: {test_name}. Total failed tests {failed_tests}."
@@ -1175,12 +1202,126 @@ def process_self_contained_coordinator_stream(
1175
1202
  update_comment_if_needed(
1176
1203
  auto_approve_github,
1177
1204
  comment_body,
1205
+ old_benchmark_run_comment_body,
1206
+ benchmark_run_comment,
1207
+ verbose,
1208
+ )
1209
+ logging.info(
1210
+ f"Updated github comment with latest test info {regression_comment.html_url}"
1211
+ )
1212
+
1213
+ ###########################
1214
+ # regression part
1215
+ ###########################
1216
+ fn = check_regression_comment
1217
+ (
1218
+ contains_regression_comment,
1219
+ github_pr,
1220
+ is_actionable_pr,
1178
1221
  old_regression_comment_body,
1222
+ pr_link,
1179
1223
  regression_comment,
1224
+ ) = check_github_available_and_actionable(
1225
+ fn,
1226
+ github_token,
1227
+ pull_request,
1228
+ tf_github_org,
1229
+ tf_github_repo,
1180
1230
  verbose,
1181
1231
  )
1182
1232
  logging.info(
1183
- f"Updated github comment with latest test info {regression_comment.html_url}"
1233
+ f"Preparing regression info for the data available"
1234
+ )
1235
+ print_improvements_only = False
1236
+ print_regressions_only = False
1237
+ skip_unstable = False
1238
+ regressions_percent_lower_limit = 10.0
1239
+ simplify_table = False
1240
+ testname_regex = ""
1241
+ test = ""
1242
+ last_n_baseline = 1
1243
+ last_n_comparison = 31
1244
+ use_metric_context_path = False
1245
+ baseline_tag = None
1246
+ baseline_deployment_name = "oss-standalone"
1247
+ comparison_deployment_name = "oss-standalone"
1248
+ metric_name = "ALL_STATS.Totals.Ops/sec"
1249
+ metric_mode = "higher-better"
1250
+ to_date = datetime.datetime.utcnow()
1251
+ from_date = to_date - datetime.timedelta(days=180)
1252
+ baseline_branch = default_baseline_branch
1253
+ comparison_tag = git_branch
1254
+ comparison_branch = git_version
1255
+ to_ts_ms = None
1256
+ from_ts_ms = None
1257
+
1258
+ (
1259
+ detected_regressions,
1260
+ table_output,
1261
+ total_improvements,
1262
+ total_regressions,
1263
+ total_stable,
1264
+ total_unstable,
1265
+ total_comparison_points,
1266
+ ) = compute_regression_table(
1267
+ datasink_conn,
1268
+ tf_github_org,
1269
+ tf_github_repo,
1270
+ tf_triggering_env,
1271
+ metric_name,
1272
+ comparison_branch,
1273
+ baseline_branch,
1274
+ baseline_tag,
1275
+ comparison_tag,
1276
+ baseline_deployment_name,
1277
+ comparison_deployment_name,
1278
+ print_improvements_only,
1279
+ print_regressions_only,
1280
+ skip_unstable,
1281
+ regressions_percent_lower_limit,
1282
+ simplify_table,
1283
+ test,
1284
+ testname_regex,
1285
+ verbose,
1286
+ last_n_baseline,
1287
+ last_n_comparison,
1288
+ metric_mode,
1289
+ from_date,
1290
+ from_ts_ms,
1291
+ to_date,
1292
+ to_ts_ms,
1293
+ use_metric_context_path,
1294
+ running_platform,
1295
+ )
1296
+ auto_approve = True
1297
+ grafana_link_base = "https://benchmarksredisio.grafana.net/d/1fWbtb7nz/experimental-oss-spec-benchmarks"
1298
+
1299
+ prepare_regression_comment(
1300
+ auto_approve,
1301
+ baseline_branch,
1302
+ baseline_tag,
1303
+ comparison_branch,
1304
+ comparison_tag,
1305
+ contains_regression_comment,
1306
+ github_pr,
1307
+ grafana_link_base,
1308
+ is_actionable_pr,
1309
+ old_regression_comment_body,
1310
+ pr_link,
1311
+ regression_comment,
1312
+ datasink_conn,
1313
+ running_platform,
1314
+ table_output,
1315
+ tf_github_org,
1316
+ tf_github_repo,
1317
+ tf_triggering_env,
1318
+ total_comparison_points,
1319
+ total_improvements,
1320
+ total_regressions,
1321
+ total_stable,
1322
+ total_unstable,
1323
+ verbose,
1324
+ regressions_percent_lower_limit,
1184
1325
  )
1185
1326
  logging.info(
1186
1327
  f"Added test named {test_name} to the completed test list in key {stream_test_list_completed}"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redis-benchmarks-specification
3
- Version: 0.1.94
3
+ Version: 0.1.96
4
4
  Summary: The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute.
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com
@@ -13,19 +13,19 @@ redis_benchmarks_specification/__cli__/stats.py,sha256=wahzZRbpfokv8dQU8O4BH5JFr
13
13
  redis_benchmarks_specification/__common__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  redis_benchmarks_specification/__common__/builder_schema.py,sha256=LW00BSz_LXa83wbgRFylOCyRMMH4-3YpWgYh2hevOFM,5693
15
15
  redis_benchmarks_specification/__common__/env.py,sha256=l58AH5LC2jQcyPRJA4ue_4kMloOewcqnLvkLWfzph_A,3119
16
- redis_benchmarks_specification/__common__/github.py,sha256=OcEzo87jcQsKnRX5qjlgu19katM_IrmktF95OMrwRnA,9975
16
+ redis_benchmarks_specification/__common__/github.py,sha256=_1Cwzzgm7dcWgUwUCKkSXn0MIU5QM9z7MMeMoFI73Js,10401
17
17
  redis_benchmarks_specification/__common__/package.py,sha256=4uVt1BAZ999LV2rZkq--Tk6otAVIf9YR3g3KGeUpiW4,834
18
18
  redis_benchmarks_specification/__common__/runner.py,sha256=6x1L8UAo-gmxLMcsUU4FGQ5OLV08fqygDnoVZ1rVUVQ,6642
19
19
  redis_benchmarks_specification/__common__/spec.py,sha256=3hvfAb7RuAsqB_PNEo_-iuOtgz1ZCWe3ouMwS5Mw54A,1002
20
20
  redis_benchmarks_specification/__compare__/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
21
21
  redis_benchmarks_specification/__compare__/args.py,sha256=71-pYjlbTQNAXQMbAiet898yhWRIplBBNU5USQqFar4,5341
22
- redis_benchmarks_specification/__compare__/compare.py,sha256=6LuWtVxfjwNgCOeS5tuLexp9ap5bF1hP092Pfts1wZE,36584
22
+ redis_benchmarks_specification/__compare__/compare.py,sha256=ClVHnKZtKS0cEccQiZzxBtthY9xe7DSnFUOgT6ZhtsE,36838
23
23
  redis_benchmarks_specification/__init__.py,sha256=YQIEx2sLPPA0JR9OuCuMNMNtm-f_gqDKgzvNJnkGNKY,491
24
24
  redis_benchmarks_specification/__runner__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
25
25
  redis_benchmarks_specification/__runner__/args.py,sha256=lYvbPd_3ppHZv4f2sRwXcF-fcBrwRSn3H2RMmNVkojY,7221
26
26
  redis_benchmarks_specification/__runner__/runner.py,sha256=ty-SzkeW4JGJD3p5DBsTTqKtY5iIOGan311stCCCjsA,47859
27
27
  redis_benchmarks_specification/__self_contained_coordinator__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
28
- redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=VHjWWjZ0bs05rcQaeZYSFxf1d_0t02PRoXgfrhfF5nU,5770
28
+ redis_benchmarks_specification/__self_contained_coordinator__/args.py,sha256=unQxatbVlkQmIQwDnoVmruNdmbhTkxDJs0FETc67Ovg,5910
29
29
  redis_benchmarks_specification/__self_contained_coordinator__/artifacts.py,sha256=rTVzBzPNDz7myIkiI6ToEh29Jo53Bi5a_zNALscSvKI,840
30
30
  redis_benchmarks_specification/__self_contained_coordinator__/build_info.py,sha256=psZrbt7hdAm3jyOKLr-UqaXllu2-Pzd8r6rjRja8CTU,2211
31
31
  redis_benchmarks_specification/__self_contained_coordinator__/clients.py,sha256=voL6zP3RenpZ1A7JKGVkvEWVXI9KYwmnSgVJr6l8o-4,710
@@ -33,7 +33,7 @@ redis_benchmarks_specification/__self_contained_coordinator__/cpuset.py,sha256=s
33
33
  redis_benchmarks_specification/__self_contained_coordinator__/docker.py,sha256=iivxZ55vL2kVHHkqVbXY2ftvxvceqH_Zw079KLCv9N8,2507
34
34
  redis_benchmarks_specification/__self_contained_coordinator__/prepopulation.py,sha256=ajhpzxsBy6tiHrO79gEIKQYxZR-Us6B4rC6NYg1EZjM,2875
35
35
  redis_benchmarks_specification/__self_contained_coordinator__/runners.py,sha256=Ul8UoxvWRxCVWmyaCBadpLMDOVEoNSp-A9KMPtPmUwM,28483
36
- redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=HKqhpLTwu7z-nifwT4JyjU6jeUjLcyYEqConGncjGg8,59697
36
+ redis_benchmarks_specification/__self_contained_coordinator__/self_contained_coordinator.py,sha256=iTzdcHSVWiqR9jNkOVkCJwC5ldV2xvW0yXgVOKOy8cI,66167
37
37
  redis_benchmarks_specification/__setups__/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
38
  redis_benchmarks_specification/__setups__/topologies.py,sha256=xQ1IJkcTji_ZjLiJd3vOxZpvbNtBLZw9cPkw5hGJKHU,481
39
39
  redis_benchmarks_specification/__spec__/__init__.py,sha256=l-G1z-t6twUgi8QLueqoTQLvJmv3hJoEYskGm6H7L6M,83
@@ -150,8 +150,8 @@ redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-set-10-100-el
150
150
  redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries-pipeline-10.yml,sha256=RSkNgV5SsjdkXhM0mifi2GlwIxtiHR8N3u-ieI23BoQ,1126
151
151
  redis_benchmarks_specification/test-suites/memtier_benchmark-2keys-stream-5-entries-xread-all-entries.yml,sha256=w7-dOIU-eATHXCvJbSeih6Vt54oygtkXKskQdzCll3o,1100
152
152
  redis_benchmarks_specification/test-suites/template.txt,sha256=qrci_94QV9bPUJe0cL8lsUaQmX5Woz-jT-pDF0629AE,423
153
- redis_benchmarks_specification-0.1.94.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
154
- redis_benchmarks_specification-0.1.94.dist-info/METADATA,sha256=9MR8g3J9HkACHZDPgxLHBx8QfYaIVZW1oq3KbjdJa_g,22482
155
- redis_benchmarks_specification-0.1.94.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
156
- redis_benchmarks_specification-0.1.94.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
157
- redis_benchmarks_specification-0.1.94.dist-info/RECORD,,
153
+ redis_benchmarks_specification-0.1.96.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
154
+ redis_benchmarks_specification-0.1.96.dist-info/METADATA,sha256=qcGWW-g8BH50PhrWlOpY5IrUXMVyNCb-wLp6y-Htzew,22482
155
+ redis_benchmarks_specification-0.1.96.dist-info/WHEEL,sha256=7Z8_27uaHI_UZAc4Uox4PpBhQ9Y5_modZXWMxtUi4NU,88
156
+ redis_benchmarks_specification-0.1.96.dist-info/entry_points.txt,sha256=x5WBXCZsnDRTZxV7SBGmC65L2k-ygdDOxV8vuKN00Nk,715
157
+ redis_benchmarks_specification-0.1.96.dist-info/RECORD,,