redisbench-admin 0.11.39__py3-none-any.whl → 0.11.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -131,6 +131,7 @@ def create_compare_arguments(parser):
131
131
  parser.add_argument("--simple-table", type=bool, default=False)
132
132
  parser.add_argument("--use_metric_context_path", type=bool, default=False)
133
133
  parser.add_argument("--testname_regex", type=str, default=".*", required=False)
134
+ parser.add_argument("--test-regex", type=str, default=".*", required=False)
134
135
  parser.add_argument(
135
136
  "--regressions-percent-lower-limit",
136
137
  type=float,
@@ -161,6 +162,10 @@ def create_compare_arguments(parser):
161
162
  type=str,
162
163
  default="https://benchmarksrediscom.grafana.net/d/",
163
164
  )
165
+ parser.add_argument(
166
+ "--grafana_uid",
167
+ default=None,
168
+ )
164
169
  parser.add_argument(
165
170
  "--auto-approve",
166
171
  required=False,
@@ -251,9 +251,14 @@ def compare_command_logic(args, project_name, project_version):
251
251
  }
252
252
  baseline_architecture = args.baseline_architecture
253
253
  comparison_architecture = args.comparison_architecture
254
- uid = None
255
- if tf_github_repo.lower() in grafana_dashboards_uids:
254
+ uid = args.grafana_uid
255
+ if tf_github_repo.lower() in grafana_dashboards_uids and uid is None:
256
256
  uid = grafana_dashboards_uids[tf_github_repo.lower()]
257
+ logging.info(
258
+ f"Using uid from grafana_dashboards_uids. {grafana_dashboards_uids}. uid={uid}"
259
+ )
260
+ else:
261
+ logging.info(f"Using uid from args. uid={uid}")
257
262
  grafana_link_base = None
258
263
  if uid is not None:
259
264
  grafana_link_base = "{}/{}".format(grafana_base_dashboard, uid)
@@ -321,9 +326,7 @@ def compare_command_logic(args, project_name, project_version):
321
326
  comparison_summary = "In summary:\n"
322
327
  if total_stable > 0:
323
328
  comparison_summary += (
324
- "- Detected a total of {} stable tests between versions.\n".format(
325
- total_stable,
326
- )
329
+ f"- Detected a total of {total_stable} stable tests between versions.\n"
327
330
  )
328
331
 
329
332
  if total_unstable > 0:
@@ -385,13 +388,10 @@ def compare_command_logic(args, project_name, project_version):
385
388
 
386
389
  comparison_summary += f" - {test_display_name}: {', '.join(commands_info)}{confidence_indicator}\n"
387
390
  if total_improvements > 0:
388
- comparison_summary += "- Detected a total of {} improvements above the improvement water line.\n".format(
389
- total_improvements
390
- )
391
+ comparison_summary += f"- Detected a total of {total_improvements} improvements above the improvement water line.\n"
391
392
  if total_regressions > 0:
392
- comparison_summary += "- Detected a total of {} regressions bellow the regression water line {}.\n".format(
393
- total_regressions, args.regressions_percent_lower_limit
394
- )
393
+ comparison_summary += f"- Detected a total of {total_regressions} regressions bellow the regression water line {args.regressions_percent_lower_limit}%.\n"
394
+ comparison_summary += "\n"
395
395
 
396
396
  comment_body += comparison_summary
397
397
  comment_body += "\n"
@@ -1216,9 +1216,9 @@ def from_rts_to_regression_table(
1216
1216
  f"Blocking regression confirmation for '{test_name}' due to unstable latency data"
1217
1217
  )
1218
1218
  if server_has_unstable:
1219
- logging.info(f" Server-side latency data is unstable")
1219
+ logging.info(" Server-side latency data is unstable")
1220
1220
  if client_has_unstable:
1221
- logging.info(f" Client-side latency data is unstable")
1221
+ logging.info(" Client-side latency data is unstable")
1222
1222
  else:
1223
1223
  both_confirm_regression = (
1224
1224
  server_confirms_regression and client_confirms_regression
@@ -1244,12 +1244,12 @@ def from_rts_to_regression_table(
1244
1244
  server_regression_details or client_regression_details
1245
1245
  )
1246
1246
  if combined_regression_details:
1247
- combined_regression_details[
1248
- "server_side"
1249
- ] = server_confirms_regression
1250
- combined_regression_details[
1251
- "client_side"
1252
- ] = client_confirms_regression
1247
+ combined_regression_details["server_side"] = (
1248
+ server_confirms_regression
1249
+ )
1250
+ combined_regression_details["client_side"] = (
1251
+ client_confirms_regression
1252
+ )
1253
1253
 
1254
1254
  # 2nd level confirmation is sufficient - always add to confirmed regressions
1255
1255
  logging.info(
@@ -1291,17 +1291,17 @@ def from_rts_to_regression_table(
1291
1291
  f"Confidence analysis for '{test_name}': {confidence_note}"
1292
1292
  )
1293
1293
  # Use 3rd level confidence if available
1294
- combined_regression_details[
1295
- "high_confidence"
1296
- ] = high_confidence
1294
+ combined_regression_details["high_confidence"] = (
1295
+ high_confidence
1296
+ )
1297
1297
  else:
1298
1298
  # No 3rd level data available - default to moderate confidence since 2nd level confirmed
1299
1299
  logging.info(
1300
1300
  f"No 3rd level data available for '{test_name}' - using 2nd level confirmation"
1301
1301
  )
1302
- combined_regression_details[
1303
- "high_confidence"
1304
- ] = True # 2nd level confirmation is reliable
1302
+ combined_regression_details["high_confidence"] = (
1303
+ True # 2nd level confirmation is reliable
1304
+ )
1305
1305
 
1306
1306
  # Always add to confirmed regressions when 2nd level confirms
1307
1307
  latency_confirmed_regression_details.append(
@@ -73,7 +73,15 @@ def deploy_command_logic(args, project_name, project_version):
73
73
  tf_triggering_env = "redisbench-admin-deploy"
74
74
  logging.info("Setting an infra timeout of {} secs".format(infra_timeout_secs))
75
75
  if args.destroy is False:
76
- (tf_return_code, _, _, _, _, _, _,) = setup_remote_environment(
76
+ (
77
+ tf_return_code,
78
+ _,
79
+ _,
80
+ _,
81
+ _,
82
+ _,
83
+ _,
84
+ ) = setup_remote_environment(
77
85
  tf,
78
86
  tf_github_sha,
79
87
  tf_github_actor,
@@ -89,6 +89,20 @@ def generate_meet_cmds(shard_count, shard_host, start_port):
89
89
  def setup_oss_cluster_from_conns(meet_cmds, redis_conns, shard_count):
90
90
  status = False
91
91
  try:
92
+ # Pre-setup validation: check uptime and cluster mode
93
+ for primary_pos, redis_conn in enumerate(redis_conns):
94
+ redis_conn.ping()
95
+
96
+ server_info = redis_conn.info("server")
97
+ uptime = server_info.get("uptime_in_seconds", 0)
98
+ cluster_enabled = server_info.get("cluster_enabled", 0)
99
+ tcp_port = server_info.get("tcp_port", "n/a")
100
+
101
+ logging.info(
102
+ f"Node {primary_pos} ({tcp_port}): uptime={uptime}s cluster_enabled={cluster_enabled}"
103
+ )
104
+
105
+ # Send meet commands
92
106
  for primary_pos, redis_conn in enumerate(redis_conns):
93
107
  logging.info(
94
108
  "Sending to primary #{} a total of {} MEET commands".format(
@@ -138,6 +152,29 @@ def setup_oss_cluster_from_conns(meet_cmds, redis_conns, shard_count):
138
152
  )
139
153
  logging.info("Node {}: cluster_state {}".format(n, cluster_state_ok))
140
154
  sleep(1)
155
+
156
+ # Post-setup validation: check uptime and cluster mode
157
+ sleep(10)
158
+ for primary_pos, redis_conn in enumerate(redis_conns):
159
+ redis_conn.ping()
160
+
161
+ server_info = redis_conn.info("server")
162
+ uptime = server_info.get("uptime_in_seconds", 0)
163
+ server_info = redis_conn.info("cluster")
164
+ cluster_enabled = server_info.get("cluster_enabled", -1)
165
+ tcp_port = server_info.get("tcp_port", "n/a")
166
+
167
+ logging.info(
168
+ f"Node {primary_pos} ({tcp_port}): uptime={uptime}s cluster_enabled={cluster_enabled}"
169
+ )
170
+
171
+ if cluster_enabled != 1:
172
+ logging.error(
173
+ "Node {}: cluster mode is not enabled (cluster_enabled={})".format(
174
+ primary_pos, cluster_enabled
175
+ )
176
+ )
177
+ return False
141
178
  status = True
142
179
  except redis.exceptions.RedisError as e:
143
180
  logging.warning("Received an error {}".format(e.__str__()))
@@ -42,7 +42,13 @@ def export_command_logic(args, project_name, project_version):
42
42
  deployment_name = args.deployment_name
43
43
  deployment_type = args.deployment_type
44
44
  results_format = args.results_format
45
- (_, github_branch, github_org, github_repo, _,) = git_vars_crosscheck(
45
+ (
46
+ _,
47
+ github_branch,
48
+ github_org,
49
+ github_repo,
50
+ _,
51
+ ) = git_vars_crosscheck(
46
52
  None, args.github_branch, args.github_org, args.github_repo, None
47
53
  )
48
54
  exporter_timemetric_path = None
@@ -400,9 +400,9 @@ class Perf:
400
400
  "Main THREAD Flame Graph: " + use_case, details
401
401
  )
402
402
  if artifact_result is True:
403
- outputs[
404
- "Main THREAD Flame Graph {}".format(identifier)
405
- ] = flame_graph_output
403
+ outputs["Main THREAD Flame Graph {}".format(identifier)] = (
404
+ flame_graph_output
405
+ )
406
406
  result &= artifact_result
407
407
 
408
408
  tid = self.pid
@@ -440,9 +440,9 @@ class Perf:
440
440
  )
441
441
 
442
442
  if artifact_result is True:
443
- outputs[
444
- "perf report per dso,sym {}".format(identifier)
445
- ] = perf_report_artifact
443
+ outputs["perf report per dso,sym {}".format(identifier)] = (
444
+ perf_report_artifact
445
+ )
446
446
  result &= artifact_result
447
447
 
448
448
  # generate perf report per dso,sym
@@ -460,9 +460,9 @@ class Perf:
460
460
  )
461
461
 
462
462
  if artifact_result is True:
463
- outputs[
464
- "perf report per dso,sym with callgraph {}".format(identifier)
465
- ] = perf_report_artifact
463
+ outputs["perf report per dso,sym with callgraph {}".format(identifier)] = (
464
+ perf_report_artifact
465
+ )
466
466
  result &= artifact_result
467
467
 
468
468
  # generate perf report per dso,sym,srcline
@@ -487,9 +487,9 @@ class Perf:
487
487
  )
488
488
 
489
489
  if artifact_result is True:
490
- outputs[
491
- "perf report per dso,sym,srcline {}".format(identifier)
492
- ] = perf_report_artifact
490
+ outputs["perf report per dso,sym,srcline {}".format(identifier)] = (
491
+ perf_report_artifact
492
+ )
493
493
  result &= artifact_result
494
494
 
495
495
  self.logger.info(
@@ -527,9 +527,9 @@ class Perf:
527
527
  )
528
528
 
529
529
  if artifact_result is True:
530
- outputs[
531
- "perf report top self-cpu {}".format(identifier)
532
- ] = perf_report_artifact
530
+ outputs["perf report top self-cpu {}".format(identifier)] = (
531
+ perf_report_artifact
532
+ )
533
533
  result &= artifact_result
534
534
 
535
535
  # generate perf report --stdio report
@@ -546,9 +546,9 @@ class Perf:
546
546
  )
547
547
 
548
548
  if artifact_result is True:
549
- outputs[
550
- "perf report top self-cpu (dso={})".format(binary)
551
- ] = perf_report_artifact
549
+ outputs["perf report top self-cpu (dso={})".format(binary)] = (
550
+ perf_report_artifact
551
+ )
552
552
  result &= artifact_result
553
553
 
554
554
  if self.callgraph_mode == "dwarf":
@@ -590,9 +590,9 @@ class Perf:
590
590
  )
591
591
  result &= artifact_result
592
592
  if artifact_result is True:
593
- outputs[
594
- "Top entries in text form by LOC"
595
- ] = pprof_artifact_text_output
593
+ outputs["Top entries in text form by LOC"] = (
594
+ pprof_artifact_text_output
595
+ )
596
596
  tabular_data_map["text-lines"] = tabular_data
597
597
  self.logger.info("Generating pprof png output")
598
598
  pprof_png_output = self.output + ".pprof.png"
@@ -604,9 +604,9 @@ class Perf:
604
604
  self.output,
605
605
  )
606
606
  if artifact_result is True:
607
- outputs[
608
- "Output graph image in PNG format"
609
- ] = pprof_artifact_png_output
607
+ outputs["Output graph image in PNG format"] = (
608
+ pprof_artifact_png_output
609
+ )
610
610
  result &= artifact_result
611
611
 
612
612
  # save stack collapsed
@@ -206,7 +206,10 @@ def prepare_benchmark_parameters_specif_tooling(
206
206
  if isremote is True:
207
207
  benchmark_tool = "/tmp/{}".format(benchmark_tool)
208
208
  input_data_file = "/tmp/input.data"
209
- (command_arr, command_str,) = prepare_tsbs_benchmark_command(
209
+ (
210
+ command_arr,
211
+ command_str,
212
+ ) = prepare_tsbs_benchmark_command(
210
213
  benchmark_tool,
211
214
  server_private_ip,
212
215
  server_plaintext_port,
@@ -218,7 +221,10 @@ def prepare_benchmark_parameters_specif_tooling(
218
221
  cluster_api_enabled,
219
222
  )
220
223
  if "memtier_benchmark" in benchmark_tool:
221
- (command_arr, command_str,) = prepare_memtier_benchmark_command(
224
+ (
225
+ command_arr,
226
+ command_str,
227
+ ) = prepare_memtier_benchmark_command(
222
228
  benchmark_tool,
223
229
  server_private_ip,
224
230
  server_plaintext_port,
@@ -236,7 +242,10 @@ def prepare_benchmark_parameters_specif_tooling(
236
242
  ann_path = stdout[0].strip() + "/run/ann/pkg/multirun.py"
237
243
  logging.info("Remote ann-benchmark path: {}".format(ann_path))
238
244
 
239
- (command_arr, command_str,) = prepare_ann_benchmark_command(
245
+ (
246
+ command_arr,
247
+ command_str,
248
+ ) = prepare_ann_benchmark_command(
240
249
  server_private_ip,
241
250
  server_plaintext_port,
242
251
  cluster_api_enabled,
@@ -250,7 +259,10 @@ def prepare_benchmark_parameters_specif_tooling(
250
259
  if isremote is True:
251
260
  benchmark_tool = "/tmp/{}".format(benchmark_tool)
252
261
  input_data_file = "/tmp/input.data"
253
- (command_arr, command_str,) = prepare_ftsb_benchmark_command(
262
+ (
263
+ command_arr,
264
+ command_str,
265
+ ) = prepare_ftsb_benchmark_command(
254
266
  benchmark_tool,
255
267
  server_private_ip,
256
268
  server_plaintext_port,
@@ -267,7 +279,10 @@ def prepare_benchmark_parameters_specif_tooling(
267
279
  if isremote is True:
268
280
  benchmark_tool = "/tmp/{}".format(benchmark_tool)
269
281
  input_data_file = "/tmp/input.data"
270
- (command_arr, command_str,) = prepare_aibench_benchmark_command(
282
+ (
283
+ command_arr,
284
+ command_str,
285
+ ) = prepare_aibench_benchmark_command(
271
286
  benchmark_tool,
272
287
  server_private_ip,
273
288
  server_plaintext_port,
@@ -772,7 +787,10 @@ def print_results_table_stdout(
772
787
  metric_names=[],
773
788
  ):
774
789
  # check which metrics to extract
775
- (_, metrics,) = merge_default_and_config_metrics(
790
+ (
791
+ _,
792
+ metrics,
793
+ ) = merge_default_and_config_metrics(
776
794
  benchmark_config,
777
795
  default_metrics,
778
796
  None,
@@ -188,8 +188,6 @@ BENCHMARK_CPU_STATS_GLOBAL = {}
188
188
 
189
189
 
190
190
  def collect_cpu_data(redis_conns=[], delta_secs: float = 5.0, delay_start: float = 1.0):
191
- global BENCHMARK_CPU_STATS_GLOBAL
192
- global BENCHMARK_RUNNING_GLOBAL
193
191
  import time
194
192
 
195
193
  counter = 0
@@ -114,7 +114,11 @@ class TerraformClass:
114
114
  def async_runner_setup(
115
115
  self,
116
116
  ):
117
- (remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
117
+ (
118
+ remote_setup,
119
+ deployment_type,
120
+ remote_id,
121
+ ) = fetch_remote_setup_from_config(
118
122
  [{"type": "async", "setup": "runner"}],
119
123
  "https://github.com/RedisLabsModules/testing-infrastructure.git",
120
124
  "master",
@@ -229,7 +233,11 @@ def terraform_spin_or_reuse_env(
229
233
  tf_override_name=None,
230
234
  tf_folder_path=None,
231
235
  ):
232
- (remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
236
+ (
237
+ remote_setup,
238
+ deployment_type,
239
+ remote_id,
240
+ ) = fetch_remote_setup_from_config(
233
241
  benchmark_config["remote"],
234
242
  "https://github.com/RedisLabsModules/testing-infrastructure.git",
235
243
  "master",
@@ -28,9 +28,9 @@ WantedBy=multi-user.target
28
28
  argv.append("--private_key")
29
29
  argv.append("/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem")
30
30
  else:
31
- argv[
32
- argv.index(args.private_key)
33
- ] = "/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
31
+ argv[argv.index(args.private_key)] = (
32
+ "/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
33
+ )
34
34
  if len(args.module_path) != 0:
35
35
  argv[argv.index(args.module_path[0])] = (
36
36
  "/home/ubuntu/work_dir/tests/benchmarks/"
@@ -40,4 +40,16 @@ def create_run_local_arguments(parser):
40
40
  default=IGNORE_KEYSPACE_ERRORS,
41
41
  help="Ignore keyspace check errors. Will still log them as errors",
42
42
  )
43
+ parser.add_argument(
44
+ "--dry-run",
45
+ default=False,
46
+ action="store_true",
47
+ help="Setup environment and test connectivity without running benchmarks",
48
+ )
49
+ parser.add_argument(
50
+ "--dry-run-with-preload",
51
+ default=False,
52
+ action="store_true",
53
+ help="Setup environment, preload data, and test connectivity without running benchmarks",
54
+ )
43
55
  return parser