redisbench-admin 0.11.39__py3-none-any.whl → 0.11.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -161,6 +161,10 @@ def create_compare_arguments(parser):
161
161
  type=str,
162
162
  default="https://benchmarksrediscom.grafana.net/d/",
163
163
  )
164
+ parser.add_argument(
165
+ "--grafana_uid",
166
+ default=None,
167
+ )
164
168
  parser.add_argument(
165
169
  "--auto-approve",
166
170
  required=False,
@@ -251,9 +251,12 @@ def compare_command_logic(args, project_name, project_version):
251
251
  }
252
252
  baseline_architecture = args.baseline_architecture
253
253
  comparison_architecture = args.comparison_architecture
254
- uid = None
255
- if tf_github_repo.lower() in grafana_dashboards_uids:
254
+ uid = args.grafana_uid
255
+ if tf_github_repo.lower() in grafana_dashboards_uids and uid is None:
256
256
  uid = grafana_dashboards_uids[tf_github_repo.lower()]
257
+ logging.info(f"Using uid from grafana_dashboards_uids. {grafana_dashboards_uids}. uid={uid}")
258
+ else:
259
+ logging.info(f"Using uid from args. uid={uid}")
257
260
  grafana_link_base = None
258
261
  if uid is not None:
259
262
  grafana_link_base = "{}/{}".format(grafana_base_dashboard, uid)
@@ -1216,9 +1219,9 @@ def from_rts_to_regression_table(
1216
1219
  f"Blocking regression confirmation for '{test_name}' due to unstable latency data"
1217
1220
  )
1218
1221
  if server_has_unstable:
1219
- logging.info(f" Server-side latency data is unstable")
1222
+ logging.info(" Server-side latency data is unstable")
1220
1223
  if client_has_unstable:
1221
- logging.info(f" Client-side latency data is unstable")
1224
+ logging.info(" Client-side latency data is unstable")
1222
1225
  else:
1223
1226
  both_confirm_regression = (
1224
1227
  server_confirms_regression and client_confirms_regression
@@ -1244,12 +1247,12 @@ def from_rts_to_regression_table(
1244
1247
  server_regression_details or client_regression_details
1245
1248
  )
1246
1249
  if combined_regression_details:
1247
- combined_regression_details[
1248
- "server_side"
1249
- ] = server_confirms_regression
1250
- combined_regression_details[
1251
- "client_side"
1252
- ] = client_confirms_regression
1250
+ combined_regression_details["server_side"] = (
1251
+ server_confirms_regression
1252
+ )
1253
+ combined_regression_details["client_side"] = (
1254
+ client_confirms_regression
1255
+ )
1253
1256
 
1254
1257
  # 2nd level confirmation is sufficient - always add to confirmed regressions
1255
1258
  logging.info(
@@ -1291,17 +1294,17 @@ def from_rts_to_regression_table(
1291
1294
  f"Confidence analysis for '{test_name}': {confidence_note}"
1292
1295
  )
1293
1296
  # Use 3rd level confidence if available
1294
- combined_regression_details[
1295
- "high_confidence"
1296
- ] = high_confidence
1297
+ combined_regression_details["high_confidence"] = (
1298
+ high_confidence
1299
+ )
1297
1300
  else:
1298
1301
  # No 3rd level data available - default to moderate confidence since 2nd level confirmed
1299
1302
  logging.info(
1300
1303
  f"No 3rd level data available for '{test_name}' - using 2nd level confirmation"
1301
1304
  )
1302
- combined_regression_details[
1303
- "high_confidence"
1304
- ] = True # 2nd level confirmation is reliable
1305
+ combined_regression_details["high_confidence"] = (
1306
+ True # 2nd level confirmation is reliable
1307
+ )
1305
1308
 
1306
1309
  # Always add to confirmed regressions when 2nd level confirms
1307
1310
  latency_confirmed_regression_details.append(
@@ -73,7 +73,15 @@ def deploy_command_logic(args, project_name, project_version):
73
73
  tf_triggering_env = "redisbench-admin-deploy"
74
74
  logging.info("Setting an infra timeout of {} secs".format(infra_timeout_secs))
75
75
  if args.destroy is False:
76
- (tf_return_code, _, _, _, _, _, _,) = setup_remote_environment(
76
+ (
77
+ tf_return_code,
78
+ _,
79
+ _,
80
+ _,
81
+ _,
82
+ _,
83
+ _,
84
+ ) = setup_remote_environment(
77
85
  tf,
78
86
  tf_github_sha,
79
87
  tf_github_actor,
@@ -89,6 +89,20 @@ def generate_meet_cmds(shard_count, shard_host, start_port):
89
89
  def setup_oss_cluster_from_conns(meet_cmds, redis_conns, shard_count):
90
90
  status = False
91
91
  try:
92
+ # Pre-setup validation: check uptime and cluster mode
93
+ for primary_pos, redis_conn in enumerate(redis_conns):
94
+ redis_conn.ping()
95
+
96
+ server_info = redis_conn.info("server")
97
+ uptime = server_info.get("uptime_in_seconds", 0)
98
+ cluster_enabled = server_info.get("cluster_enabled", 0)
99
+ tcp_port = server_info.get("tcp_port", "n/a")
100
+
101
+ logging.info(
102
+ f"Node {primary_pos} ({tcp_port}): uptime={uptime}s cluster_enabled={cluster_enabled}"
103
+ )
104
+
105
+ # Send meet commands
92
106
  for primary_pos, redis_conn in enumerate(redis_conns):
93
107
  logging.info(
94
108
  "Sending to primary #{} a total of {} MEET commands".format(
@@ -138,6 +152,29 @@ def setup_oss_cluster_from_conns(meet_cmds, redis_conns, shard_count):
138
152
  )
139
153
  logging.info("Node {}: cluster_state {}".format(n, cluster_state_ok))
140
154
  sleep(1)
155
+
156
+ # Post-setup validation: check uptime and cluster mode
157
+ sleep(10)
158
+ for primary_pos, redis_conn in enumerate(redis_conns):
159
+ redis_conn.ping()
160
+
161
+ server_info = redis_conn.info("server")
162
+ uptime = server_info.get("uptime_in_seconds", 0)
163
+ server_info = redis_conn.info("cluster")
164
+ cluster_enabled = server_info.get("cluster_enabled", -1)
165
+ tcp_port = server_info.get("tcp_port", "n/a")
166
+
167
+ logging.info(
168
+ f"Node {primary_pos} ({tcp_port}): uptime={uptime}s cluster_enabled={cluster_enabled}"
169
+ )
170
+
171
+ if cluster_enabled != 1:
172
+ logging.error(
173
+ "Node {}: cluster mode is not enabled (cluster_enabled={})".format(
174
+ primary_pos, cluster_enabled
175
+ )
176
+ )
177
+ return False
141
178
  status = True
142
179
  except redis.exceptions.RedisError as e:
143
180
  logging.warning("Received an error {}".format(e.__str__()))
@@ -42,7 +42,13 @@ def export_command_logic(args, project_name, project_version):
42
42
  deployment_name = args.deployment_name
43
43
  deployment_type = args.deployment_type
44
44
  results_format = args.results_format
45
- (_, github_branch, github_org, github_repo, _,) = git_vars_crosscheck(
45
+ (
46
+ _,
47
+ github_branch,
48
+ github_org,
49
+ github_repo,
50
+ _,
51
+ ) = git_vars_crosscheck(
46
52
  None, args.github_branch, args.github_org, args.github_repo, None
47
53
  )
48
54
  exporter_timemetric_path = None
@@ -400,9 +400,9 @@ class Perf:
400
400
  "Main THREAD Flame Graph: " + use_case, details
401
401
  )
402
402
  if artifact_result is True:
403
- outputs[
404
- "Main THREAD Flame Graph {}".format(identifier)
405
- ] = flame_graph_output
403
+ outputs["Main THREAD Flame Graph {}".format(identifier)] = (
404
+ flame_graph_output
405
+ )
406
406
  result &= artifact_result
407
407
 
408
408
  tid = self.pid
@@ -440,9 +440,9 @@ class Perf:
440
440
  )
441
441
 
442
442
  if artifact_result is True:
443
- outputs[
444
- "perf report per dso,sym {}".format(identifier)
445
- ] = perf_report_artifact
443
+ outputs["perf report per dso,sym {}".format(identifier)] = (
444
+ perf_report_artifact
445
+ )
446
446
  result &= artifact_result
447
447
 
448
448
  # generate perf report per dso,sym
@@ -460,9 +460,9 @@ class Perf:
460
460
  )
461
461
 
462
462
  if artifact_result is True:
463
- outputs[
464
- "perf report per dso,sym with callgraph {}".format(identifier)
465
- ] = perf_report_artifact
463
+ outputs["perf report per dso,sym with callgraph {}".format(identifier)] = (
464
+ perf_report_artifact
465
+ )
466
466
  result &= artifact_result
467
467
 
468
468
  # generate perf report per dso,sym,srcline
@@ -487,9 +487,9 @@ class Perf:
487
487
  )
488
488
 
489
489
  if artifact_result is True:
490
- outputs[
491
- "perf report per dso,sym,srcline {}".format(identifier)
492
- ] = perf_report_artifact
490
+ outputs["perf report per dso,sym,srcline {}".format(identifier)] = (
491
+ perf_report_artifact
492
+ )
493
493
  result &= artifact_result
494
494
 
495
495
  self.logger.info(
@@ -527,9 +527,9 @@ class Perf:
527
527
  )
528
528
 
529
529
  if artifact_result is True:
530
- outputs[
531
- "perf report top self-cpu {}".format(identifier)
532
- ] = perf_report_artifact
530
+ outputs["perf report top self-cpu {}".format(identifier)] = (
531
+ perf_report_artifact
532
+ )
533
533
  result &= artifact_result
534
534
 
535
535
  # generate perf report --stdio report
@@ -546,9 +546,9 @@ class Perf:
546
546
  )
547
547
 
548
548
  if artifact_result is True:
549
- outputs[
550
- "perf report top self-cpu (dso={})".format(binary)
551
- ] = perf_report_artifact
549
+ outputs["perf report top self-cpu (dso={})".format(binary)] = (
550
+ perf_report_artifact
551
+ )
552
552
  result &= artifact_result
553
553
 
554
554
  if self.callgraph_mode == "dwarf":
@@ -590,9 +590,9 @@ class Perf:
590
590
  )
591
591
  result &= artifact_result
592
592
  if artifact_result is True:
593
- outputs[
594
- "Top entries in text form by LOC"
595
- ] = pprof_artifact_text_output
593
+ outputs["Top entries in text form by LOC"] = (
594
+ pprof_artifact_text_output
595
+ )
596
596
  tabular_data_map["text-lines"] = tabular_data
597
597
  self.logger.info("Generating pprof png output")
598
598
  pprof_png_output = self.output + ".pprof.png"
@@ -604,9 +604,9 @@ class Perf:
604
604
  self.output,
605
605
  )
606
606
  if artifact_result is True:
607
- outputs[
608
- "Output graph image in PNG format"
609
- ] = pprof_artifact_png_output
607
+ outputs["Output graph image in PNG format"] = (
608
+ pprof_artifact_png_output
609
+ )
610
610
  result &= artifact_result
611
611
 
612
612
  # save stack collapsed
@@ -206,7 +206,10 @@ def prepare_benchmark_parameters_specif_tooling(
206
206
  if isremote is True:
207
207
  benchmark_tool = "/tmp/{}".format(benchmark_tool)
208
208
  input_data_file = "/tmp/input.data"
209
- (command_arr, command_str,) = prepare_tsbs_benchmark_command(
209
+ (
210
+ command_arr,
211
+ command_str,
212
+ ) = prepare_tsbs_benchmark_command(
210
213
  benchmark_tool,
211
214
  server_private_ip,
212
215
  server_plaintext_port,
@@ -218,7 +221,10 @@ def prepare_benchmark_parameters_specif_tooling(
218
221
  cluster_api_enabled,
219
222
  )
220
223
  if "memtier_benchmark" in benchmark_tool:
221
- (command_arr, command_str,) = prepare_memtier_benchmark_command(
224
+ (
225
+ command_arr,
226
+ command_str,
227
+ ) = prepare_memtier_benchmark_command(
222
228
  benchmark_tool,
223
229
  server_private_ip,
224
230
  server_plaintext_port,
@@ -236,7 +242,10 @@ def prepare_benchmark_parameters_specif_tooling(
236
242
  ann_path = stdout[0].strip() + "/run/ann/pkg/multirun.py"
237
243
  logging.info("Remote ann-benchmark path: {}".format(ann_path))
238
244
 
239
- (command_arr, command_str,) = prepare_ann_benchmark_command(
245
+ (
246
+ command_arr,
247
+ command_str,
248
+ ) = prepare_ann_benchmark_command(
240
249
  server_private_ip,
241
250
  server_plaintext_port,
242
251
  cluster_api_enabled,
@@ -250,7 +259,10 @@ def prepare_benchmark_parameters_specif_tooling(
250
259
  if isremote is True:
251
260
  benchmark_tool = "/tmp/{}".format(benchmark_tool)
252
261
  input_data_file = "/tmp/input.data"
253
- (command_arr, command_str,) = prepare_ftsb_benchmark_command(
262
+ (
263
+ command_arr,
264
+ command_str,
265
+ ) = prepare_ftsb_benchmark_command(
254
266
  benchmark_tool,
255
267
  server_private_ip,
256
268
  server_plaintext_port,
@@ -267,7 +279,10 @@ def prepare_benchmark_parameters_specif_tooling(
267
279
  if isremote is True:
268
280
  benchmark_tool = "/tmp/{}".format(benchmark_tool)
269
281
  input_data_file = "/tmp/input.data"
270
- (command_arr, command_str,) = prepare_aibench_benchmark_command(
282
+ (
283
+ command_arr,
284
+ command_str,
285
+ ) = prepare_aibench_benchmark_command(
271
286
  benchmark_tool,
272
287
  server_private_ip,
273
288
  server_plaintext_port,
@@ -772,7 +787,10 @@ def print_results_table_stdout(
772
787
  metric_names=[],
773
788
  ):
774
789
  # check which metrics to extract
775
- (_, metrics,) = merge_default_and_config_metrics(
790
+ (
791
+ _,
792
+ metrics,
793
+ ) = merge_default_and_config_metrics(
776
794
  benchmark_config,
777
795
  default_metrics,
778
796
  None,
@@ -188,8 +188,6 @@ BENCHMARK_CPU_STATS_GLOBAL = {}
188
188
 
189
189
 
190
190
  def collect_cpu_data(redis_conns=[], delta_secs: float = 5.0, delay_start: float = 1.0):
191
- global BENCHMARK_CPU_STATS_GLOBAL
192
- global BENCHMARK_RUNNING_GLOBAL
193
191
  import time
194
192
 
195
193
  counter = 0
@@ -114,7 +114,11 @@ class TerraformClass:
114
114
  def async_runner_setup(
115
115
  self,
116
116
  ):
117
- (remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
117
+ (
118
+ remote_setup,
119
+ deployment_type,
120
+ remote_id,
121
+ ) = fetch_remote_setup_from_config(
118
122
  [{"type": "async", "setup": "runner"}],
119
123
  "https://github.com/RedisLabsModules/testing-infrastructure.git",
120
124
  "master",
@@ -229,7 +233,11 @@ def terraform_spin_or_reuse_env(
229
233
  tf_override_name=None,
230
234
  tf_folder_path=None,
231
235
  ):
232
- (remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
236
+ (
237
+ remote_setup,
238
+ deployment_type,
239
+ remote_id,
240
+ ) = fetch_remote_setup_from_config(
233
241
  benchmark_config["remote"],
234
242
  "https://github.com/RedisLabsModules/testing-infrastructure.git",
235
243
  "master",
@@ -28,9 +28,9 @@ WantedBy=multi-user.target
28
28
  argv.append("--private_key")
29
29
  argv.append("/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem")
30
30
  else:
31
- argv[
32
- argv.index(args.private_key)
33
- ] = "/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
31
+ argv[argv.index(args.private_key)] = (
32
+ "/home/ubuntu/work_dir/tests/benchmarks/benchmarks.redislabs.pem"
33
+ )
34
34
  if len(args.module_path) != 0:
35
35
  argv[argv.index(args.module_path[0])] = (
36
36
  "/home/ubuntu/work_dir/tests/benchmarks/"
@@ -40,4 +40,16 @@ def create_run_local_arguments(parser):
40
40
  default=IGNORE_KEYSPACE_ERRORS,
41
41
  help="Ignore keyspace check errors. Will still log them as errors",
42
42
  )
43
+ parser.add_argument(
44
+ "--dry-run",
45
+ default=False,
46
+ action="store_true",
47
+ help="Setup environment and test connectivity without running benchmarks",
48
+ )
49
+ parser.add_argument(
50
+ "--dry-run-with-preload",
51
+ default=False,
52
+ action="store_true",
53
+ help="Setup environment, preload data, and test connectivity without running benchmarks",
54
+ )
43
55
  return parser
@@ -16,6 +16,7 @@ from redisbench_admin.run.git import git_vars_crosscheck
16
16
  from redisbench_admin.utils.remote import (
17
17
  get_project_ts_tags,
18
18
  push_data_to_redistimeseries,
19
+ perform_connectivity_test,
19
20
  )
20
21
 
21
22
  import redisbench_admin.run.metrics
@@ -326,6 +327,7 @@ def run_local_command_logic(args, project_name, project_version):
326
327
  )
327
328
 
328
329
  # run the benchmark
330
+
329
331
  cpu_stats_thread = threading.Thread(
330
332
  target=collect_cpu_data,
331
333
  args=(redis_conns, 5.0, 1.0),
@@ -335,9 +337,60 @@ def run_local_command_logic(args, project_name, project_version):
335
337
  )
336
338
  cpu_stats_thread.start()
337
339
  benchmark_start_time = datetime.datetime.now()
338
- stdout, stderr = run_local_benchmark(
339
- benchmark_tool, command
340
+ logging.info(
341
+ "Running benchmark command: {}".format(command)
340
342
  )
343
+ # Handle dry-run modes
344
+ if args.dry_run or args.dry_run_with_preload:
345
+ logging.info(
346
+ "🏃 Dry-run mode detected - performing connectivity tests"
347
+ )
348
+
349
+ # Test basic connectivity after setup
350
+ connectivity_success = perform_connectivity_test(
351
+ redis_conns, "after local environment setup"
352
+ )
353
+
354
+ if args.dry_run_with_preload:
355
+ logging.info(
356
+ "📦 Dry-run with preload - data loading already completed during setup"
357
+ )
358
+ # Test connectivity after preload (data was loaded during local_db_spin)
359
+ connectivity_success = (
360
+ perform_connectivity_test(
361
+ redis_conns, "after data preloading"
362
+ )
363
+ and connectivity_success
364
+ )
365
+
366
+ # Print dry-run summary
367
+ logging.info("=" * 50)
368
+ logging.info("🎯 DRY-RUN SUMMARY")
369
+ logging.info("=" * 50)
370
+ logging.info(
371
+ f"✅ Database: {setup_type} ({'cluster' if cluster_api_enabled else 'standalone'}) started locally"
372
+ )
373
+ logging.info(
374
+ f"✅ Client tools: {benchmark_tool} available"
375
+ )
376
+ logging.info(
377
+ f"{'✅' if connectivity_success else '❌'} Connectivity: {len(redis_conns)} connection(s) tested"
378
+ )
379
+ if args.dry_run_with_preload:
380
+ logging.info(
381
+ "✅ Data preload: Completed during setup"
382
+ )
383
+ logging.info("🏁 Dry-run completed successfully")
384
+ logging.info(
385
+ "⏭️ Benchmark execution skipped (dry-run mode)"
386
+ )
387
+ logging.info("=" * 50)
388
+
389
+ # Skip benchmark execution and continue to next test
390
+ else:
391
+ stdout, stderr = run_local_benchmark(
392
+ benchmark_tool, command
393
+ )
341
394
  benchmark_end_time = datetime.datetime.now()
342
395
  redisbench_admin.run.metrics.BENCHMARK_RUNNING_GLOBAL = (
343
396
  False
@@ -364,10 +417,10 @@ def run_local_command_logic(args, project_name, project_version):
364
417
  benchmark_end_time, benchmark_start_time
365
418
  )
366
419
  )
367
-
368
- logging.info("Extracting the benchmark results")
369
- logging.info("stdout: {}".format(stdout))
370
- logging.info("stderr: {}".format(stderr))
420
+ if args.dry_run is False:
421
+ logging.info("Extracting the benchmark results")
422
+ logging.info("stdout: {}".format(stdout))
423
+ logging.info("stderr: {}".format(stderr))
371
424
 
372
425
  (
373
426
  _,
@@ -420,53 +473,54 @@ def run_local_command_logic(args, project_name, project_version):
420
473
  test_name,
421
474
  tf_triggering_env,
422
475
  )
423
-
424
- post_process_benchmark_results(
425
- benchmark_tool,
426
- local_benchmark_output_filename,
427
- start_time_ms,
428
- start_time_str,
429
- stdout,
430
- )
431
476
  results_dict = {}
432
- with open(
433
- local_benchmark_output_filename, "r"
434
- ) as json_file:
435
- results_dict = json.load(json_file)
436
- print_results_table_stdout(
437
- benchmark_config,
438
- default_metrics,
439
- results_dict,
440
- setup_name,
441
- setup_type,
442
- test_name,
443
- total_shards_cpu_usage,
444
- overall_end_time_metrics,
445
- [
446
- "memory_used_memory",
447
- "memory_used_memory_dataset",
448
- ],
449
- )
450
- export_redis_metrics(
451
- artifact_version,
452
- end_time_ms,
453
- overall_end_time_metrics,
454
- rts,
455
- setup_name,
456
- setup_type,
457
- test_name,
458
- tf_github_branch,
459
- tf_github_org,
460
- tf_github_repo,
461
- tf_triggering_env,
462
- {"metric-type": "redis-metrics"},
463
- 0,
477
+ if args.dry_run is False:
478
+ post_process_benchmark_results(
479
+ benchmark_tool,
480
+ local_benchmark_output_filename,
481
+ start_time_ms,
482
+ start_time_str,
483
+ stdout,
464
484
  )
465
485
 
466
- # check KPIs
467
- return_code = results_dict_kpi_check(
468
- benchmark_config, results_dict, return_code
469
- )
486
+ with open(
487
+ local_benchmark_output_filename, "r"
488
+ ) as json_file:
489
+ results_dict = json.load(json_file)
490
+ print_results_table_stdout(
491
+ benchmark_config,
492
+ default_metrics,
493
+ results_dict,
494
+ setup_name,
495
+ setup_type,
496
+ test_name,
497
+ total_shards_cpu_usage,
498
+ overall_end_time_metrics,
499
+ [
500
+ "memory_used_memory",
501
+ "memory_used_memory_dataset",
502
+ ],
503
+ )
504
+ export_redis_metrics(
505
+ artifact_version,
506
+ end_time_ms,
507
+ overall_end_time_metrics,
508
+ rts,
509
+ setup_name,
510
+ setup_type,
511
+ test_name,
512
+ tf_github_branch,
513
+ tf_github_org,
514
+ tf_github_repo,
515
+ tf_triggering_env,
516
+ {"metric-type": "redis-metrics"},
517
+ 0,
518
+ )
519
+
520
+ # check KPIs
521
+ return_code = results_dict_kpi_check(
522
+ benchmark_config, results_dict, return_code
523
+ )
470
524
 
471
525
  metadata_tags = get_metadata_tags(benchmark_config)
472
526
  (
@@ -508,7 +562,10 @@ def run_local_command_logic(args, project_name, project_version):
508
562
  "Some unexpected exception was caught "
509
563
  "during local work. Failing test...."
510
564
  )
511
- logging.critical(sys.exc_info()[0])
565
+ if len(sys.exc_info()) > 0:
566
+ logging.critical(sys.exc_info()[0])
567
+ else:
568
+ logging.critical(sys.exc_info())
512
569
  print("-" * 60)
513
570
  traceback.print_exc(file=sys.stdout)
514
571
  print("-" * 60)
@@ -687,17 +744,17 @@ def commandstats_latencystats_process_name(
687
744
  branch = variant_labels_dict["branch"]
688
745
 
689
746
  if version is not None:
690
- variant_labels_dict[
691
- "command_and_metric_and_version"
692
- ] = "{} - {} - {}".format(command, metric, version)
693
- variant_labels_dict[
694
- "command_and_metric_and_setup_and_version"
695
- ] = "{} - {} - {} - {}".format(command, metric, setup_name, version)
747
+ variant_labels_dict["command_and_metric_and_version"] = (
748
+ "{} - {} - {}".format(command, metric, version)
749
+ )
750
+ variant_labels_dict["command_and_metric_and_setup_and_version"] = (
751
+ "{} - {} - {} - {}".format(command, metric, setup_name, version)
752
+ )
696
753
 
697
754
  if branch is not None:
698
- variant_labels_dict[
699
- "command_and_metric_and_branch"
700
- ] = "{} - {} - {}".format(command, metric, branch)
701
- variant_labels_dict[
702
- "command_and_metric_and_setup_and_branch"
703
- ] = "{} - {} - {} - {}".format(command, metric, setup_name, branch)
755
+ variant_labels_dict["command_and_metric_and_branch"] = (
756
+ "{} - {} - {}".format(command, metric, branch)
757
+ )
758
+ variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
759
+ "{} - {} - {} - {}".format(command, metric, setup_name, branch)
760
+ )
@@ -112,6 +112,18 @@ def create_run_remote_arguments(parser):
112
112
  action="store_true",
113
113
  help="skip environment variables check",
114
114
  )
115
+ parser.add_argument(
116
+ "--dry-run",
117
+ default=False,
118
+ action="store_true",
119
+ help="Setup environment and test connectivity without running benchmarks",
120
+ )
121
+ parser.add_argument(
122
+ "--dry-run-with-preload",
123
+ default=False,
124
+ action="store_true",
125
+ help="Setup environment, preload data, and test connectivity without running benchmarks",
126
+ )
115
127
  parser.add_argument(
116
128
  "--continue-on-module-check-error",
117
129
  default=False,
@@ -106,7 +106,11 @@ def remote_tool_pre_bench_step(
106
106
  )
107
107
 
108
108
  if "ftsb_" in benchmark_tool:
109
- (queries_file_link, remote_tool_link, tool_link,) = extract_ftsb_extra_links(
109
+ (
110
+ queries_file_link,
111
+ remote_tool_link,
112
+ tool_link,
113
+ ) = extract_ftsb_extra_links(
110
114
  benchmark_config, benchmark_tool, config_key, architecture
111
115
  )
112
116
  logging.info(
@@ -69,6 +69,7 @@ from redisbench_admin.utils.remote import (
69
69
  get_project_ts_tags,
70
70
  push_data_to_redistimeseries,
71
71
  fetch_remote_id_from_config,
72
+ perform_connectivity_test,
72
73
  )
73
74
 
74
75
  from redisbench_admin.utils.utils import (
@@ -695,6 +696,62 @@ def run_remote_command_logic(args, project_name, project_version):
695
696
  )
696
697
  )
697
698
 
699
+ # Handle dry-run modes
700
+ if args.dry_run or args.dry_run_with_preload:
701
+ logging.info(
702
+ "🏃 Dry-run mode detected - performing connectivity tests"
703
+ )
704
+
705
+ # Test basic connectivity after setup
706
+ connectivity_success = (
707
+ perform_connectivity_test(
708
+ redis_conns, "after environment setup"
709
+ )
710
+ )
711
+
712
+ if args.dry_run_with_preload:
713
+ logging.info(
714
+ "📦 Dry-run with preload - data loading already completed during setup"
715
+ )
716
+ # Test connectivity after preload (data was loaded during remote_db_spin)
717
+ connectivity_success = (
718
+ perform_connectivity_test(
719
+ redis_conns, "after data preloading"
720
+ )
721
+ and connectivity_success
722
+ )
723
+
724
+ # Print dry-run summary
725
+ logging.info("=" * 50)
726
+ logging.info("🎯 DRY-RUN SUMMARY")
727
+ logging.info("=" * 50)
728
+ logging.info(
729
+ f"✅ Infrastructure: {'Deployed' if args.inventory is None else 'Using existing'}"
730
+ )
731
+ logging.info(
732
+ f"✅ Database: {setup_type} ({'cluster' if cluster_enabled else 'standalone'}) started"
733
+ )
734
+ logging.info(
735
+ f"✅ Client tools: Setup completed on {client_public_ip}"
736
+ )
737
+ logging.info(
738
+ f"{'✅' if connectivity_success else '❌'} Connectivity: {len(redis_conns)} connection(s) tested"
739
+ )
740
+ if args.dry_run_with_preload:
741
+ logging.info(
742
+ "✅ Data preload: Completed during setup"
743
+ )
744
+ logging.info(
745
+ "🏁 Dry-run completed successfully"
746
+ )
747
+ logging.info(
748
+ "⏭️ Benchmark execution skipped (dry-run mode)"
749
+ )
750
+ logging.info("=" * 50)
751
+
752
+ # Skip benchmark execution and continue to next test
753
+ continue
754
+
698
755
  logging.info(
699
756
  "Will store benchmark json output to local file {}".format(
700
757
  local_bench_fname
@@ -1429,20 +1486,20 @@ def commandstats_latencystats_process_name(
1429
1486
  branch = variant_labels_dict["branch"]
1430
1487
 
1431
1488
  if version is not None:
1432
- variant_labels_dict[
1433
- "command_and_metric_and_version"
1434
- ] = "{} - {} - {}".format(command, metric, version)
1435
- variant_labels_dict[
1436
- "command_and_metric_and_setup_and_version"
1437
- ] = "{} - {} - {} - {}".format(command, metric, setup_name, version)
1489
+ variant_labels_dict["command_and_metric_and_version"] = (
1490
+ "{} - {} - {}".format(command, metric, version)
1491
+ )
1492
+ variant_labels_dict["command_and_metric_and_setup_and_version"] = (
1493
+ "{} - {} - {} - {}".format(command, metric, setup_name, version)
1494
+ )
1438
1495
 
1439
1496
  if branch is not None:
1440
- variant_labels_dict[
1441
- "command_and_metric_and_branch"
1442
- ] = "{} - {} - {}".format(command, metric, branch)
1443
- variant_labels_dict[
1444
- "command_and_metric_and_setup_and_branch"
1445
- ] = "{} - {} - {} - {}".format(command, metric, setup_name, branch)
1497
+ variant_labels_dict["command_and_metric_and_branch"] = (
1498
+ "{} - {} - {}".format(command, metric, branch)
1499
+ )
1500
+ variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
1501
+ "{} - {} - {} - {}".format(command, metric, setup_name, branch)
1502
+ )
1446
1503
 
1447
1504
 
1448
1505
  def shutdown_remote_redis(redis_conns, ssh_tunnel):
@@ -32,7 +32,11 @@ def terraform_spin_or_reuse_env(
32
32
  tf_folder_path=None,
33
33
  architecture="x86_64",
34
34
  ):
35
- (remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
35
+ (
36
+ remote_setup,
37
+ deployment_type,
38
+ remote_id,
39
+ ) = fetch_remote_setup_from_config(
36
40
  benchmark_config["remote"],
37
41
  "https://github.com/redis-performance/testing-infrastructure.git",
38
42
  "master",
@@ -771,7 +771,10 @@ def extract_perversion_timeseries_from_results(
771
771
  ):
772
772
  break_by_key = "version"
773
773
  break_by_str = "by.{}".format(break_by_key)
774
- (branch_time_series_dict, target_tables,) = common_timeseries_extraction(
774
+ (
775
+ branch_time_series_dict,
776
+ target_tables,
777
+ ) = common_timeseries_extraction(
775
778
  break_by_key,
776
779
  break_by_str,
777
780
  datapoints_timestamp,
@@ -942,9 +945,9 @@ def from_metric_kv_to_timeserie(
942
945
 
943
946
  target_table_dict[target_name] = target_value
944
947
 
945
- target_table_dict[
946
- "{}:percent {}".format(target_name, comparison_type)
947
- ] = target_value_pct_str
948
+ target_table_dict["{}:percent {}".format(target_name, comparison_type)] = (
949
+ target_value_pct_str
950
+ )
948
951
  return target_table_keyname, target_table_dict
949
952
 
950
953
 
@@ -1186,3 +1189,31 @@ def check_ec2_env():
1186
1189
  logging.error(error_message)
1187
1190
 
1188
1191
  return status, error_message
1192
+
1193
+
1194
+ def perform_connectivity_test(redis_conns, test_description=""):
1195
+ """Perform PING test on all Redis connections"""
1196
+ logging.info(f"🔍 Performing connectivity test: {test_description}")
1197
+
1198
+ success_count = 0
1199
+ total_count = len(redis_conns)
1200
+
1201
+ for i, conn in enumerate(redis_conns):
1202
+ try:
1203
+ result = conn.ping()
1204
+ if result:
1205
+ logging.info(f"✅ Connection {i}: PING successful")
1206
+ success_count += 1
1207
+ else:
1208
+ logging.error(f"❌ Connection {i}: PING returned False")
1209
+ except Exception as e:
1210
+ logging.error(f"❌ Connection {i}: PING failed - {e}")
1211
+
1212
+ if success_count == total_count:
1213
+ logging.info(f"🎉 All {total_count} connectivity tests passed!")
1214
+ return True
1215
+ else:
1216
+ logging.error(
1217
+ f"💥 {total_count - success_count}/{total_count} connectivity tests failed!"
1218
+ )
1219
+ return False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redisbench-admin
3
- Version: 0.11.39
3
+ Version: 0.11.40
4
4
  Summary: Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... ).
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com
@@ -154,7 +154,13 @@ $ tox
154
154
 
155
155
  To run a specific test:
156
156
  ```sh
157
- $ tox -- tests/test_redistimeseries.py
157
+ $ tox -- tests/test_defaults_purpose_built_env.py
158
+ ```
159
+
160
+ To run a specific test and persist the docker container used for timeseries:
161
+
162
+ ```
163
+ tox --docker-dont-stop=rts_datasink -- -vv --log-cli-level=INFO tests/test_defaults_purpose_built_env.py
158
164
  ```
159
165
 
160
166
  To run a specific test with verbose logging:
@@ -3,19 +3,19 @@ redisbench_admin/cli.py,sha256=LAS5qnqScXKhxHYfXWB0mvAYaUYrSurIwadhexEa9g4,7740
3
3
  redisbench_admin/commands/__init__.py,sha256=mzVrEtqefFdopyzR-W6xx3How95dyZfToGKm1-_YzeY,95
4
4
  redisbench_admin/commands/commands.json.py,sha256=mzVrEtqefFdopyzR-W6xx3How95dyZfToGKm1-_YzeY,95
5
5
  redisbench_admin/compare/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
6
- redisbench_admin/compare/args.py,sha256=Wy0An7bAcqUO06a2_cNeAS2ighL1zFRqbgGZIK1eKHs,6178
7
- redisbench_admin/compare/compare.py,sha256=KJ8jBiQ8WSlcFeal82xEg3-FkhY5SeXQa_ubuyC7zmw,103180
6
+ redisbench_admin/compare/args.py,sha256=qyOgo0CBE3NdDRTa5dqgNV_0eZJ0jkGBliGTasHuWls,6256
7
+ redisbench_admin/compare/compare.py,sha256=ZE6j5T8SIiyW6mpUCkVWRvo5xT4Tkf-SnOnYSSQ9zt4,103382
8
8
  redisbench_admin/deploy/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
9
9
  redisbench_admin/deploy/args.py,sha256=neLUcQqI__HkJItkQg2C293hl5g3yHG40t171r7-E5Y,1732
10
- redisbench_admin/deploy/deploy.py,sha256=c1srxDMaUHuyh6wGdgLqzTz3ljZFtGqiumtAmguVyuk,3791
10
+ redisbench_admin/deploy/deploy.py,sha256=MtfJbsL97DLrbBYut6zRCzyEMebX4xWoZE-m4-JDRB8,3885
11
11
  redisbench_admin/environments/__init__.py,sha256=cD7zfXt0VEmy0b7452HvcAxX_9kVj6Vm213yNdUHP20,95
12
- redisbench_admin/environments/oss_cluster.py,sha256=lUOG6oN8VXAnDXFK7Xns-ag-hSOSxxxL8jZ4Mh03hQY,6681
12
+ redisbench_admin/environments/oss_cluster.py,sha256=yYOvDRcEJxG9dkzd-lvaKTrt71MNeMdAV0G6TjHxsX8,8142
13
13
  redisbench_admin/environments/oss_standalone.py,sha256=Sl38rUpwJ3wNOl9zn38iK8q2iJi2pRFmaJAZJbuT_SQ,2474
14
14
  redisbench_admin/export/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
15
15
  redisbench_admin/export/args.py,sha256=v_WjJCNz_LeIFMNwSN6XwRmvSx1K2ys8XS1gK50EM_4,3508
16
16
  redisbench_admin/export/common/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
17
17
  redisbench_admin/export/common/common.py,sha256=LnvXjMLlJRzMTxiFIjrfRFfDx9JJm88OZHu7lnTOpFA,4331
18
- redisbench_admin/export/export.py,sha256=ozt3WmCMbttJoY6Ac2CAdA_5DuhKNaImun4BEdf_1f0,11228
18
+ redisbench_admin/export/export.py,sha256=u00NjaCbWhCJ319leVlP4ZkqiqZt5FN4Gbag4Poo23M,11274
19
19
  redisbench_admin/export/google_benchmark/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
20
20
  redisbench_admin/export/google_benchmark/google_benchmark_json_format.py,sha256=OuMaMmmma5VvXA0rcLIQSMxIq81oa5I3xYDFhbWj-IA,1804
21
21
  redisbench_admin/export/memtier_benchmark/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
@@ -35,7 +35,7 @@ redisbench_admin/grafana_api/grafana_api.py,sha256=dG17GCYmWRILmy7h3-OiBeGzuNGnR
35
35
  redisbench_admin/profilers/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
36
36
  redisbench_admin/profilers/daemon.py,sha256=Y4ZbbH-cRHJk9cvpsb60UZFq_HVHWXtatb7T2vtlRKo,12973
37
37
  redisbench_admin/profilers/flamegraph.pl,sha256=Za5XE-1gb_U-nzqwoyRwfe1TB182c64gITa-2klWTTA,35898
38
- redisbench_admin/profilers/perf.py,sha256=OjHI9iKqCx9vZDqRqz0xOGoeKLTRps-ikBKVxSJqwC4,27756
38
+ redisbench_admin/profilers/perf.py,sha256=HtzzMVsXEJa1H7tOAfKlbFYDn2KnxAG_IU9yKPKZB7w,27772
39
39
  redisbench_admin/profilers/perf_daemon_caller.py,sha256=nD97cXmX3JytyafvNMmhUBq40uYrf6vtjdJ1TXZbvVY,4948
40
40
  redisbench_admin/profilers/pprof.py,sha256=g7oNC3AtNDTUOBIh_mIi5bFl_b0mL8tqBu6qKvAOrKw,3949
41
41
  redisbench_admin/profilers/profilers.py,sha256=4C1xaPyLoPydJ3eBAxW7IlSHG-3qj3A3BAKejiZXEK0,510
@@ -176,14 +176,14 @@ redisbench_admin/run/ann/pkg/test/test-jaccard.py,sha256=oIhaQCQKrQokwv3fvgLSwPl
176
176
  redisbench_admin/run/ann/pkg/test/test-metrics.py,sha256=vJdS8Kuk8bAnpB65Uqb-9rUUI35XrHwaO3cNwKX5gxc,3057
177
177
  redisbench_admin/run/args.py,sha256=tevHZrezJ4RreHp6K-MGHko3e1Gi_IdsS2Q0jD2ZSoU,8173
178
178
  redisbench_admin/run/cluster.py,sha256=_Y6a8Dbu1cJ7OxhgymKQSZcCmV8cZ3UpGEWL6b6O84Y,6363
179
- redisbench_admin/run/common.py,sha256=u1XM87EsqAa4pE8IMY_Pq1DaiwvcTG79xcv9IVaYMj0,28218
179
+ redisbench_admin/run/common.py,sha256=5TAxmHbUti7S3nzPzn7f-L4ls6gpStU2C4yvkiii7m4,28410
180
180
  redisbench_admin/run/ftsb/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
181
181
  redisbench_admin/run/ftsb/ftsb.py,sha256=NP-K_hCEagmX5ayN0pQVtOdQxDTwgxKrnzz9_MLT9qQ,2492
182
182
  redisbench_admin/run/git.py,sha256=6UYGcTN0MPzf4QDVoJnFkou0yZasLF6jLG7f0zoySq8,3064
183
183
  redisbench_admin/run/grafana.py,sha256=iMDgMyJKinpZMTD43rZ1IcRGkadjFjCxaB48mYWkvG4,9421
184
184
  redisbench_admin/run/memtier_benchmark/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
185
185
  redisbench_admin/run/memtier_benchmark/memtier_benchmark.py,sha256=wTd2olovvFBZ98mOSr6DM5BJsdaiuPteEZzBqeSgbkE,4246
186
- redisbench_admin/run/metrics.py,sha256=uOOwOC9vTC97y2hxSKwt_RJqxpkLJmSR3oOv_WWnS9o,7630
186
+ redisbench_admin/run/metrics.py,sha256=8EQdcZbCiFB_kIR1WtUQNOPV8y74bZ8Dj51Cv0aR4nk,7556
187
187
  redisbench_admin/run/modules.py,sha256=9To85oDw2tmUNmTDxOgvKls_46oZRcd2cCt6xNjIWiA,1691
188
188
  redisbench_admin/run/redis_benchmark/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
189
189
  redisbench_admin/run/redis_benchmark/redis_benchmark.py,sha256=e-Az2uTlt3z2W4uzlUsdxeT8GITpxpGb-Mjb6JxrSWc,6848
@@ -199,19 +199,19 @@ redisbench_admin/run/ycsb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
199
199
  redisbench_admin/run/ycsb/ycsb.py,sha256=cs5saVH7C4YpDvzhoa15PwEho59qTVR1E90v_FYjMVw,6873
200
200
  redisbench_admin/run_async/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
201
201
  redisbench_admin/run_async/async_env.py,sha256=tE1turaaZNHfOaSpGxh62EJWp88zoQFUf3sMbaS7JRA,2408
202
- redisbench_admin/run_async/async_terraform.py,sha256=fRzhUq9kIPs0EjQxrpctkoNiI2Q4fKTdNlVbBDpG1Ys,11223
202
+ redisbench_admin/run_async/async_terraform.py,sha256=ngOQnECUuC20pZwiJItaiBnzlwT2DiKciPTHtqLURe4,11299
203
203
  redisbench_admin/run_async/benchmark.py,sha256=S-dsaWGjgsPQxj8sXAACnbtNw5zlJnRFoo53ULbrMEY,1630
204
204
  redisbench_admin/run_async/log.py,sha256=cD7zfXt0VEmy0b7452HvcAxX_9kVj6Vm213yNdUHP20,95
205
- redisbench_admin/run_async/render_files.py,sha256=OMPy3-GnU14tQ4HNlF5utOnmzpRAXURwG_h8UDkTmYs,2674
205
+ redisbench_admin/run_async/render_files.py,sha256=NMagmx-2hsMET_XN8tkmQz55g-azqW7SjAqaq4GL8F0,2676
206
206
  redisbench_admin/run_async/run_async.py,sha256=g2ZOQqj9vXZYaRyNpJZtgfYyY9tMuRmEv3Hh3qWOUs8,14525
207
207
  redisbench_admin/run_local/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
208
- redisbench_admin/run_local/args.py,sha256=9Qr-IVQJ3TMqFkn9Jp597KjU2AGq3u0X5Eb82CWD7wk,1504
208
+ redisbench_admin/run_local/args.py,sha256=LPpqtx1cH1dkkeHjYlaFnAp_TijxnzPZFO2CmYD9ikU,1906
209
209
  redisbench_admin/run_local/local_client.py,sha256=gwawMDOBrf7m--uyxu8kMZC5LBiLjbUBSKvzVOdOAas,124
210
210
  redisbench_admin/run_local/local_db.py,sha256=9vINqKOs-wDMFEuEHT0I8KO9YnEo_h4NWNk5da3LwSY,7518
211
211
  redisbench_admin/run_local/local_helpers.py,sha256=JyqLW2-Sbm35BXjxxfOB1yK7ADdLfcVrq08NLNdIwac,7026
212
- redisbench_admin/run_local/run_local.py,sha256=j3FvRPP6ROk5COX7j5bCizXa7UOHd-A8EnxIxR6gQHU,31176
212
+ redisbench_admin/run_local/run_local.py,sha256=QHnGfVAaVuct7t0WrWyQpbirC3MWX7fQF5-kXU_pJBs,34834
213
213
  redisbench_admin/run_remote/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
214
- redisbench_admin/run_remote/args.py,sha256=vhV87avBwXL8c2QLqrAkIyWD53MYhN06F-3wRv3l5xE,3829
214
+ redisbench_admin/run_remote/args.py,sha256=VnZ20gqpk-NMzNxaaUHgedgl3B6fZm3Fewj6cPZ1iCg,4231
215
215
  redisbench_admin/run_remote/consts.py,sha256=bCMkwyeBD-EmOpoHKni7LjWy5WuaxGJhGhqpi4AL0RQ,386
216
216
  redisbench_admin/run_remote/log.py,sha256=cD7zfXt0VEmy0b7452HvcAxX_9kVj6Vm213yNdUHP20,95
217
217
  redisbench_admin/run_remote/notifications.py,sha256=-W9fLaftEFNfplBl2clHk37jbYxliDbHftQ62khN31k,2157
@@ -219,24 +219,24 @@ redisbench_admin/run_remote/remote_client.py,sha256=rRmDro1weto01wzqYpId8NMPoizE
219
219
  redisbench_admin/run_remote/remote_db.py,sha256=EEDeiOZk-godr5EINscEkOJLGWUN3gFfH6RaBzAKbak,14566
220
220
  redisbench_admin/run_remote/remote_env.py,sha256=Ux_0QT1unNRlKl3cakzjG5Px1uuxOOfBoF_pnalx_T8,4936
221
221
  redisbench_admin/run_remote/remote_failures.py,sha256=IOo6DyxarcwwMPCeN4gWB2JrhuC9iBLwq0nCROqr5ak,1567
222
- redisbench_admin/run_remote/remote_helpers.py,sha256=A3oyMg3PcXluB2g746jgfnVpq4_WIsDw0o_6-okAh4E,10552
223
- redisbench_admin/run_remote/run_remote.py,sha256=s4GEqNU-hyBVCrSJCb4Tg9jZXe6EymcZcZ7-p9viKE4,70198
222
+ redisbench_admin/run_remote/remote_helpers.py,sha256=skWeGyDJBmyx_UwUekT3N3_nOJvF2-Hvu-E7vKlO9gg,10598
223
+ redisbench_admin/run_remote/run_remote.py,sha256=_K7Bx9CWarf6UOUjt9CDKGJBqu-qgLjGYzPs80jOwnU,73789
224
224
  redisbench_admin/run_remote/standalone.py,sha256=BT_3ojr6w84FZGwTkFxDLnSGMth24FyaoXedKLhf5h4,12629
225
- redisbench_admin/run_remote/terraform.py,sha256=FCkE69h2nYK6ehfHGkoQqE-e2tPh2FWSYX1AoAei4GA,3988
225
+ redisbench_admin/run_remote/terraform.py,sha256=vV3eWXNwj7vsnFNqUgCir5ueZS4VYopEyzWiTtoSq0Q,4018
226
226
  redisbench_admin/utils/__init__.py,sha256=DtBXRp0Q01XgCFmY-1OIePMyyYihVNAjZ1Y8zwqSDN0,101
227
227
  redisbench_admin/utils/benchmark_config.py,sha256=bC2C6rnj89wkkSlOXyyfe0N15unn_M1t1zfskfVkb98,21387
228
228
  redisbench_admin/utils/local.py,sha256=zUvyVI9LZMT3qyxs1pO3mXL6Bt_1z9EZUGppaRcWNRA,3890
229
229
  redisbench_admin/utils/redisearch.py,sha256=lchUEzpt0zB1rHwlDlw9LLifAnxFWcLP-PePw7TjL-0,1602
230
230
  redisbench_admin/utils/redisgraph_benchmark_go.py,sha256=os7EJt6kBxsFJLKkSoANbjMT7-cEq4-Ns-49alk2Tf8,2048
231
- redisbench_admin/utils/remote.py,sha256=e9N1poTODH1zcrmyscOco9jdxHxOBZYpW00FBGWGfQY,40670
231
+ redisbench_admin/utils/remote.py,sha256=qlO49qdJh96rOEXnFMgFcpiwYjmrG0K060bEc3nyHD0,41643
232
232
  redisbench_admin/utils/results.py,sha256=uKk3uNJ--bSXlUj_HGQ2OaV6MVqmXJVM8xTzFV6EOw4,3267
233
233
  redisbench_admin/utils/ssh.py,sha256=QW4AwlocMHJt05QMdN_4f8WeDmxiEwR80ny8VBThq6k,6533
234
234
  redisbench_admin/utils/utils.py,sha256=XVSvo1_DdcYwk2jOxL3VPVPbnDnhGYt8ieYfANo6rTo,15085
235
235
  redisbench_admin/watchdog/__init__.py,sha256=cD7zfXt0VEmy0b7452HvcAxX_9kVj6Vm213yNdUHP20,95
236
236
  redisbench_admin/watchdog/args.py,sha256=nKsG1G6ATOZlAMHMtT9u3kXxduKCbejSZ5x8oB_ynZ8,1312
237
237
  redisbench_admin/watchdog/watchdog.py,sha256=0wWYge3x_OMxWrzazNhJif2NK4tKsI963HVZqjczRag,6189
238
- redisbench_admin-0.11.39.dist-info/LICENSE,sha256=AAMtfs82zOOvmG68vILivm6lxi2rcOlGObmA8jzxQvw,10768
239
- redisbench_admin-0.11.39.dist-info/METADATA,sha256=apkZr_qljuPHiCPAH3iWj8TBAYvEQHKgrW0pNSzlCOw,5389
240
- redisbench_admin-0.11.39.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
241
- redisbench_admin-0.11.39.dist-info/entry_points.txt,sha256=UUawXk_AS-PlieKJ1QxPQXGsRLb6OW_F0MtmA1W0KE8,113
242
- redisbench_admin-0.11.39.dist-info/RECORD,,
238
+ redisbench_admin-0.11.40.dist-info/LICENSE,sha256=AAMtfs82zOOvmG68vILivm6lxi2rcOlGObmA8jzxQvw,10768
239
+ redisbench_admin-0.11.40.dist-info/METADATA,sha256=olR0Y3plqHRTgl01WGkXOkyV5Iz_veIrpEG6vYaQksg,5596
240
+ redisbench_admin-0.11.40.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
241
+ redisbench_admin-0.11.40.dist-info/entry_points.txt,sha256=UUawXk_AS-PlieKJ1QxPQXGsRLb6OW_F0MtmA1W0KE8,113
242
+ redisbench_admin-0.11.40.dist-info/RECORD,,