redisbench-admin 0.11.38__py3-none-any.whl → 0.11.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redisbench_admin/compare/args.py +1 -1
- redisbench_admin/compare/compare.py +1496 -10
- redisbench_admin/environments/oss_cluster.py +37 -0
- redisbench_admin/run/cluster.py +6 -0
- redisbench_admin/run/metrics.py +0 -2
- redisbench_admin/run_local/args.py +12 -0
- redisbench_admin/run_local/run_local.py +108 -51
- redisbench_admin/run_remote/args.py +12 -0
- redisbench_admin/run_remote/remote_db.py +62 -23
- redisbench_admin/run_remote/remote_helpers.py +17 -0
- redisbench_admin/run_remote/run_remote.py +79 -1
- redisbench_admin/run_remote/standalone.py +136 -0
- redisbench_admin/utils/remote.py +28 -0
- redisbench_admin/utils/utils.py +42 -24
- {redisbench_admin-0.11.38.dist-info → redisbench_admin-0.11.40.dist-info}/METADATA +8 -2
- {redisbench_admin-0.11.38.dist-info → redisbench_admin-0.11.40.dist-info}/RECORD +19 -19
- {redisbench_admin-0.11.38.dist-info → redisbench_admin-0.11.40.dist-info}/LICENSE +0 -0
- {redisbench_admin-0.11.38.dist-info → redisbench_admin-0.11.40.dist-info}/WHEEL +0 -0
- {redisbench_admin-0.11.38.dist-info → redisbench_admin-0.11.40.dist-info}/entry_points.txt +0 -0
|
@@ -89,6 +89,20 @@ def generate_meet_cmds(shard_count, shard_host, start_port):
|
|
|
89
89
|
def setup_oss_cluster_from_conns(meet_cmds, redis_conns, shard_count):
|
|
90
90
|
status = False
|
|
91
91
|
try:
|
|
92
|
+
# Pre-setup validation: check uptime and cluster mode
|
|
93
|
+
for primary_pos, redis_conn in enumerate(redis_conns):
|
|
94
|
+
redis_conn.ping()
|
|
95
|
+
|
|
96
|
+
server_info = redis_conn.info("server")
|
|
97
|
+
uptime = server_info.get("uptime_in_seconds", 0)
|
|
98
|
+
cluster_enabled = server_info.get("cluster_enabled", 0)
|
|
99
|
+
tcp_port = server_info.get("tcp_port", "n/a")
|
|
100
|
+
|
|
101
|
+
logging.info(
|
|
102
|
+
f"Node {primary_pos} ({tcp_port}): uptime={uptime}s cluster_enabled={cluster_enabled}"
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Send meet commands
|
|
92
106
|
for primary_pos, redis_conn in enumerate(redis_conns):
|
|
93
107
|
logging.info(
|
|
94
108
|
"Sending to primary #{} a total of {} MEET commands".format(
|
|
@@ -138,6 +152,29 @@ def setup_oss_cluster_from_conns(meet_cmds, redis_conns, shard_count):
|
|
|
138
152
|
)
|
|
139
153
|
logging.info("Node {}: cluster_state {}".format(n, cluster_state_ok))
|
|
140
154
|
sleep(1)
|
|
155
|
+
|
|
156
|
+
# Post-setup validation: check uptime and cluster mode
|
|
157
|
+
sleep(10)
|
|
158
|
+
for primary_pos, redis_conn in enumerate(redis_conns):
|
|
159
|
+
redis_conn.ping()
|
|
160
|
+
|
|
161
|
+
server_info = redis_conn.info("server")
|
|
162
|
+
uptime = server_info.get("uptime_in_seconds", 0)
|
|
163
|
+
server_info = redis_conn.info("cluster")
|
|
164
|
+
cluster_enabled = server_info.get("cluster_enabled", -1)
|
|
165
|
+
tcp_port = server_info.get("tcp_port", "n/a")
|
|
166
|
+
|
|
167
|
+
logging.info(
|
|
168
|
+
f"Node {primary_pos} ({tcp_port}): uptime={uptime}s cluster_enabled={cluster_enabled}"
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
if cluster_enabled != 1:
|
|
172
|
+
logging.error(
|
|
173
|
+
"Node {}: cluster mode is not enabled (cluster_enabled={})".format(
|
|
174
|
+
primary_pos, cluster_enabled
|
|
175
|
+
)
|
|
176
|
+
)
|
|
177
|
+
return False
|
|
141
178
|
status = True
|
|
142
179
|
except redis.exceptions.RedisError as e:
|
|
143
180
|
logging.warning("Received an error {}".format(e.__str__()))
|
redisbench_admin/run/cluster.py
CHANGED
|
@@ -115,6 +115,12 @@ def spin_up_redis_cluster_remote_redis(
|
|
|
115
115
|
logname,
|
|
116
116
|
redis_7=True,
|
|
117
117
|
):
|
|
118
|
+
# Import the function from standalone module
|
|
119
|
+
from redisbench_admin.run_remote.standalone import ensure_redis_server_available
|
|
120
|
+
|
|
121
|
+
# Ensure redis-server is available before trying to start cluster
|
|
122
|
+
ensure_redis_server_available(server_public_ip, username, private_key, ssh_port)
|
|
123
|
+
|
|
118
124
|
logging.info("Generating the remote redis-server command arguments")
|
|
119
125
|
redis_process_commands = []
|
|
120
126
|
logfiles = []
|
redisbench_admin/run/metrics.py
CHANGED
|
@@ -40,4 +40,16 @@ def create_run_local_arguments(parser):
|
|
|
40
40
|
default=IGNORE_KEYSPACE_ERRORS,
|
|
41
41
|
help="Ignore keyspace check errors. Will still log them as errors",
|
|
42
42
|
)
|
|
43
|
+
parser.add_argument(
|
|
44
|
+
"--dry-run",
|
|
45
|
+
default=False,
|
|
46
|
+
action="store_true",
|
|
47
|
+
help="Setup environment and test connectivity without running benchmarks",
|
|
48
|
+
)
|
|
49
|
+
parser.add_argument(
|
|
50
|
+
"--dry-run-with-preload",
|
|
51
|
+
default=False,
|
|
52
|
+
action="store_true",
|
|
53
|
+
help="Setup environment, preload data, and test connectivity without running benchmarks",
|
|
54
|
+
)
|
|
43
55
|
return parser
|
|
@@ -16,6 +16,7 @@ from redisbench_admin.run.git import git_vars_crosscheck
|
|
|
16
16
|
from redisbench_admin.utils.remote import (
|
|
17
17
|
get_project_ts_tags,
|
|
18
18
|
push_data_to_redistimeseries,
|
|
19
|
+
perform_connectivity_test,
|
|
19
20
|
)
|
|
20
21
|
|
|
21
22
|
import redisbench_admin.run.metrics
|
|
@@ -326,6 +327,7 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
326
327
|
)
|
|
327
328
|
|
|
328
329
|
# run the benchmark
|
|
330
|
+
|
|
329
331
|
cpu_stats_thread = threading.Thread(
|
|
330
332
|
target=collect_cpu_data,
|
|
331
333
|
args=(redis_conns, 5.0, 1.0),
|
|
@@ -335,9 +337,60 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
335
337
|
)
|
|
336
338
|
cpu_stats_thread.start()
|
|
337
339
|
benchmark_start_time = datetime.datetime.now()
|
|
338
|
-
|
|
339
|
-
|
|
340
|
+
logging.info(
|
|
341
|
+
"Running benchmark command: {}".format(command)
|
|
340
342
|
)
|
|
343
|
+
# Handle dry-run modes
|
|
344
|
+
if args.dry_run or args.dry_run_with_preload:
|
|
345
|
+
logging.info(
|
|
346
|
+
"🏃 Dry-run mode detected - performing connectivity tests"
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# Test basic connectivity after setup
|
|
350
|
+
connectivity_success = perform_connectivity_test(
|
|
351
|
+
redis_conns, "after local environment setup"
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
if args.dry_run_with_preload:
|
|
355
|
+
logging.info(
|
|
356
|
+
"📦 Dry-run with preload - data loading already completed during setup"
|
|
357
|
+
)
|
|
358
|
+
# Test connectivity after preload (data was loaded during local_db_spin)
|
|
359
|
+
connectivity_success = (
|
|
360
|
+
perform_connectivity_test(
|
|
361
|
+
redis_conns, "after data preloading"
|
|
362
|
+
)
|
|
363
|
+
and connectivity_success
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
# Print dry-run summary
|
|
367
|
+
logging.info("=" * 50)
|
|
368
|
+
logging.info("🎯 DRY-RUN SUMMARY")
|
|
369
|
+
logging.info("=" * 50)
|
|
370
|
+
logging.info(
|
|
371
|
+
f"✅ Database: {setup_type} ({'cluster' if cluster_api_enabled else 'standalone'}) started locally"
|
|
372
|
+
)
|
|
373
|
+
logging.info(
|
|
374
|
+
f"✅ Client tools: {benchmark_tool} available"
|
|
375
|
+
)
|
|
376
|
+
logging.info(
|
|
377
|
+
f"{'✅' if connectivity_success else '❌'} Connectivity: {len(redis_conns)} connection(s) tested"
|
|
378
|
+
)
|
|
379
|
+
if args.dry_run_with_preload:
|
|
380
|
+
logging.info(
|
|
381
|
+
"✅ Data preload: Completed during setup"
|
|
382
|
+
)
|
|
383
|
+
logging.info("🏁 Dry-run completed successfully")
|
|
384
|
+
logging.info(
|
|
385
|
+
"⏭️ Benchmark execution skipped (dry-run mode)"
|
|
386
|
+
)
|
|
387
|
+
logging.info("=" * 50)
|
|
388
|
+
|
|
389
|
+
# Skip benchmark execution and continue to next test
|
|
390
|
+
else:
|
|
391
|
+
stdout, stderr = run_local_benchmark(
|
|
392
|
+
benchmark_tool, command
|
|
393
|
+
)
|
|
341
394
|
benchmark_end_time = datetime.datetime.now()
|
|
342
395
|
redisbench_admin.run.metrics.BENCHMARK_RUNNING_GLOBAL = (
|
|
343
396
|
False
|
|
@@ -364,10 +417,10 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
364
417
|
benchmark_end_time, benchmark_start_time
|
|
365
418
|
)
|
|
366
419
|
)
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
420
|
+
if args.dry_run is False:
|
|
421
|
+
logging.info("Extracting the benchmark results")
|
|
422
|
+
logging.info("stdout: {}".format(stdout))
|
|
423
|
+
logging.info("stderr: {}".format(stderr))
|
|
371
424
|
|
|
372
425
|
(
|
|
373
426
|
_,
|
|
@@ -420,53 +473,54 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
420
473
|
test_name,
|
|
421
474
|
tf_triggering_env,
|
|
422
475
|
)
|
|
423
|
-
|
|
424
|
-
post_process_benchmark_results(
|
|
425
|
-
benchmark_tool,
|
|
426
|
-
local_benchmark_output_filename,
|
|
427
|
-
start_time_ms,
|
|
428
|
-
start_time_str,
|
|
429
|
-
stdout,
|
|
430
|
-
)
|
|
431
476
|
results_dict = {}
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
results_dict,
|
|
440
|
-
setup_name,
|
|
441
|
-
setup_type,
|
|
442
|
-
test_name,
|
|
443
|
-
total_shards_cpu_usage,
|
|
444
|
-
overall_end_time_metrics,
|
|
445
|
-
[
|
|
446
|
-
"memory_used_memory",
|
|
447
|
-
"memory_used_memory_dataset",
|
|
448
|
-
],
|
|
449
|
-
)
|
|
450
|
-
export_redis_metrics(
|
|
451
|
-
artifact_version,
|
|
452
|
-
end_time_ms,
|
|
453
|
-
overall_end_time_metrics,
|
|
454
|
-
rts,
|
|
455
|
-
setup_name,
|
|
456
|
-
setup_type,
|
|
457
|
-
test_name,
|
|
458
|
-
tf_github_branch,
|
|
459
|
-
tf_github_org,
|
|
460
|
-
tf_github_repo,
|
|
461
|
-
tf_triggering_env,
|
|
462
|
-
{"metric-type": "redis-metrics"},
|
|
463
|
-
0,
|
|
477
|
+
if args.dry_run is False:
|
|
478
|
+
post_process_benchmark_results(
|
|
479
|
+
benchmark_tool,
|
|
480
|
+
local_benchmark_output_filename,
|
|
481
|
+
start_time_ms,
|
|
482
|
+
start_time_str,
|
|
483
|
+
stdout,
|
|
464
484
|
)
|
|
465
485
|
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
486
|
+
with open(
|
|
487
|
+
local_benchmark_output_filename, "r"
|
|
488
|
+
) as json_file:
|
|
489
|
+
results_dict = json.load(json_file)
|
|
490
|
+
print_results_table_stdout(
|
|
491
|
+
benchmark_config,
|
|
492
|
+
default_metrics,
|
|
493
|
+
results_dict,
|
|
494
|
+
setup_name,
|
|
495
|
+
setup_type,
|
|
496
|
+
test_name,
|
|
497
|
+
total_shards_cpu_usage,
|
|
498
|
+
overall_end_time_metrics,
|
|
499
|
+
[
|
|
500
|
+
"memory_used_memory",
|
|
501
|
+
"memory_used_memory_dataset",
|
|
502
|
+
],
|
|
503
|
+
)
|
|
504
|
+
export_redis_metrics(
|
|
505
|
+
artifact_version,
|
|
506
|
+
end_time_ms,
|
|
507
|
+
overall_end_time_metrics,
|
|
508
|
+
rts,
|
|
509
|
+
setup_name,
|
|
510
|
+
setup_type,
|
|
511
|
+
test_name,
|
|
512
|
+
tf_github_branch,
|
|
513
|
+
tf_github_org,
|
|
514
|
+
tf_github_repo,
|
|
515
|
+
tf_triggering_env,
|
|
516
|
+
{"metric-type": "redis-metrics"},
|
|
517
|
+
0,
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
# check KPIs
|
|
521
|
+
return_code = results_dict_kpi_check(
|
|
522
|
+
benchmark_config, results_dict, return_code
|
|
523
|
+
)
|
|
470
524
|
|
|
471
525
|
metadata_tags = get_metadata_tags(benchmark_config)
|
|
472
526
|
(
|
|
@@ -508,7 +562,10 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
508
562
|
"Some unexpected exception was caught "
|
|
509
563
|
"during local work. Failing test...."
|
|
510
564
|
)
|
|
511
|
-
|
|
565
|
+
if len(sys.exc_info()) > 0:
|
|
566
|
+
logging.critical(sys.exc_info()[0])
|
|
567
|
+
else:
|
|
568
|
+
logging.critical(sys.exc_info())
|
|
512
569
|
print("-" * 60)
|
|
513
570
|
traceback.print_exc(file=sys.stdout)
|
|
514
571
|
print("-" * 60)
|
|
@@ -112,6 +112,18 @@ def create_run_remote_arguments(parser):
|
|
|
112
112
|
action="store_true",
|
|
113
113
|
help="skip environment variables check",
|
|
114
114
|
)
|
|
115
|
+
parser.add_argument(
|
|
116
|
+
"--dry-run",
|
|
117
|
+
default=False,
|
|
118
|
+
action="store_true",
|
|
119
|
+
help="Setup environment and test connectivity without running benchmarks",
|
|
120
|
+
)
|
|
121
|
+
parser.add_argument(
|
|
122
|
+
"--dry-run-with-preload",
|
|
123
|
+
default=False,
|
|
124
|
+
action="store_true",
|
|
125
|
+
help="Setup environment, preload data, and test connectivity without running benchmarks",
|
|
126
|
+
)
|
|
115
127
|
parser.add_argument(
|
|
116
128
|
"--continue-on-module-check-error",
|
|
117
129
|
default=False,
|
|
@@ -406,9 +406,17 @@ def db_error_artifacts(
|
|
|
406
406
|
upload_s3,
|
|
407
407
|
username,
|
|
408
408
|
):
|
|
409
|
+
# Import the zip check function
|
|
410
|
+
from redisbench_admin.run_remote.standalone import ensure_zip_available
|
|
411
|
+
|
|
412
|
+
# Ensure zip is available before trying to use it
|
|
413
|
+
ensure_zip_available(server_public_ip, username, private_key, db_ssh_port)
|
|
414
|
+
|
|
409
415
|
local_zipfile = "{}.zip".format(logname)
|
|
410
416
|
remote_zipfile = "/home/{}/{}".format(username, local_zipfile)
|
|
411
|
-
|
|
417
|
+
|
|
418
|
+
# Create zip file
|
|
419
|
+
zip_result = execute_remote_commands(
|
|
412
420
|
server_public_ip,
|
|
413
421
|
username,
|
|
414
422
|
private_key,
|
|
@@ -417,26 +425,57 @@ def db_error_artifacts(
|
|
|
417
425
|
],
|
|
418
426
|
db_ssh_port,
|
|
419
427
|
)
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
428
|
+
|
|
429
|
+
# Check if zip creation was successful
|
|
430
|
+
zip_success = True
|
|
431
|
+
for pos, res_pos in enumerate(zip_result):
|
|
432
|
+
[recv_exit_status, stdout, stderr] = res_pos
|
|
433
|
+
if recv_exit_status != 0:
|
|
434
|
+
logging.warning(
|
|
435
|
+
"Zip creation failed with exit code {}. stdout: {}. stderr: {}".format(
|
|
436
|
+
recv_exit_status, stdout, stderr
|
|
437
|
+
)
|
|
438
|
+
)
|
|
439
|
+
zip_success = False
|
|
440
|
+
|
|
441
|
+
# Only try to upload if zip was created successfully
|
|
442
|
+
if zip_success:
|
|
443
|
+
try:
|
|
444
|
+
failed_remote_run_artifact_store(
|
|
445
|
+
upload_s3,
|
|
446
|
+
server_public_ip,
|
|
447
|
+
dirname,
|
|
448
|
+
remote_zipfile,
|
|
449
|
+
local_zipfile,
|
|
450
|
+
s3_bucket_name,
|
|
451
|
+
s3_bucket_path,
|
|
452
|
+
username,
|
|
453
|
+
private_key,
|
|
454
|
+
)
|
|
455
|
+
except Exception as e:
|
|
456
|
+
logging.warning(
|
|
457
|
+
"Failed to upload zip file to S3: {}. Continuing without upload.".format(
|
|
458
|
+
e
|
|
459
|
+
)
|
|
460
|
+
)
|
|
461
|
+
else:
|
|
462
|
+
logging.warning("Skipping S3 upload due to zip creation failure")
|
|
431
463
|
if len(full_logfiles) > 0:
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
464
|
+
try:
|
|
465
|
+
failed_remote_run_artifact_store(
|
|
466
|
+
upload_s3,
|
|
467
|
+
server_public_ip,
|
|
468
|
+
dirname,
|
|
469
|
+
full_logfiles[0],
|
|
470
|
+
logname,
|
|
471
|
+
s3_bucket_name,
|
|
472
|
+
s3_bucket_path,
|
|
473
|
+
username,
|
|
474
|
+
private_key,
|
|
475
|
+
)
|
|
476
|
+
except Exception as e:
|
|
477
|
+
logging.warning(
|
|
478
|
+
"Failed to upload logfile to S3: {}. Continuing without upload.".format(
|
|
479
|
+
e
|
|
480
|
+
)
|
|
481
|
+
)
|
|
@@ -71,6 +71,23 @@ def remote_tool_pre_bench_step(
|
|
|
71
71
|
logging.info(
|
|
72
72
|
f"Settting up remote tool {benchmark_tool} requirements. architecture ={architecture}"
|
|
73
73
|
)
|
|
74
|
+
|
|
75
|
+
# Check and install benchmark tools if needed
|
|
76
|
+
if benchmark_tool == "memtier_benchmark":
|
|
77
|
+
from redisbench_admin.run_remote.standalone import (
|
|
78
|
+
ensure_memtier_benchmark_available,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
ensure_memtier_benchmark_available(
|
|
82
|
+
client_public_ip, username, private_key, client_ssh_port
|
|
83
|
+
)
|
|
84
|
+
elif benchmark_tool == "redis-benchmark":
|
|
85
|
+
from redisbench_admin.run_remote.standalone import ensure_redis_server_available
|
|
86
|
+
|
|
87
|
+
# redis-benchmark comes with redis-server, so ensure redis-server is installed
|
|
88
|
+
ensure_redis_server_available(
|
|
89
|
+
client_public_ip, username, private_key, client_ssh_port
|
|
90
|
+
)
|
|
74
91
|
if benchmark_tool == "redisgraph-benchmark-go":
|
|
75
92
|
setup_remote_benchmark_tool_redisgraph_benchmark_go(
|
|
76
93
|
client_public_ip,
|
|
@@ -69,6 +69,7 @@ from redisbench_admin.utils.remote import (
|
|
|
69
69
|
get_project_ts_tags,
|
|
70
70
|
push_data_to_redistimeseries,
|
|
71
71
|
fetch_remote_id_from_config,
|
|
72
|
+
perform_connectivity_test,
|
|
72
73
|
)
|
|
73
74
|
|
|
74
75
|
from redisbench_admin.utils.utils import (
|
|
@@ -161,7 +162,24 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
161
162
|
)
|
|
162
163
|
webhook_client_slack = WebhookClient(webhook_url)
|
|
163
164
|
|
|
164
|
-
|
|
165
|
+
# Only check AWS credentials when actually needed
|
|
166
|
+
needs_aws_for_infrastructure = (
|
|
167
|
+
args.inventory is None
|
|
168
|
+
) # No inventory = need to deploy with Terraform
|
|
169
|
+
needs_aws_for_s3 = args.upload_results_s3 # S3 upload enabled
|
|
170
|
+
|
|
171
|
+
if args.skip_env_vars_verify is False and (
|
|
172
|
+
needs_aws_for_infrastructure or needs_aws_for_s3
|
|
173
|
+
):
|
|
174
|
+
# Log why AWS credentials are being checked
|
|
175
|
+
aws_reasons = []
|
|
176
|
+
if needs_aws_for_infrastructure:
|
|
177
|
+
aws_reasons.append("infrastructure deployment (no --inventory provided)")
|
|
178
|
+
if needs_aws_for_s3:
|
|
179
|
+
aws_reasons.append("S3 upload (--upload_results_s3 enabled)")
|
|
180
|
+
|
|
181
|
+
logging.info("AWS credentials required for: {}".format(", ".join(aws_reasons)))
|
|
182
|
+
|
|
165
183
|
env_check_status, failure_reason = check_ec2_env()
|
|
166
184
|
if env_check_status is False:
|
|
167
185
|
if webhook_notifications_active:
|
|
@@ -177,6 +195,10 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
177
195
|
)
|
|
178
196
|
logging.critical("{}. Exiting right away!".format(failure_reason))
|
|
179
197
|
exit(1)
|
|
198
|
+
elif args.skip_env_vars_verify is False:
|
|
199
|
+
logging.info(
|
|
200
|
+
"AWS credentials check skipped (using --inventory and S3 upload disabled)"
|
|
201
|
+
)
|
|
180
202
|
|
|
181
203
|
continue_on_module_check_error = args.continue_on_module_check_error
|
|
182
204
|
module_check_status, error_message = redis_modules_check(local_module_files)
|
|
@@ -674,6 +696,62 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
674
696
|
)
|
|
675
697
|
)
|
|
676
698
|
|
|
699
|
+
# Handle dry-run modes
|
|
700
|
+
if args.dry_run or args.dry_run_with_preload:
|
|
701
|
+
logging.info(
|
|
702
|
+
"🏃 Dry-run mode detected - performing connectivity tests"
|
|
703
|
+
)
|
|
704
|
+
|
|
705
|
+
# Test basic connectivity after setup
|
|
706
|
+
connectivity_success = (
|
|
707
|
+
perform_connectivity_test(
|
|
708
|
+
redis_conns, "after environment setup"
|
|
709
|
+
)
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
if args.dry_run_with_preload:
|
|
713
|
+
logging.info(
|
|
714
|
+
"📦 Dry-run with preload - data loading already completed during setup"
|
|
715
|
+
)
|
|
716
|
+
# Test connectivity after preload (data was loaded during remote_db_spin)
|
|
717
|
+
connectivity_success = (
|
|
718
|
+
perform_connectivity_test(
|
|
719
|
+
redis_conns, "after data preloading"
|
|
720
|
+
)
|
|
721
|
+
and connectivity_success
|
|
722
|
+
)
|
|
723
|
+
|
|
724
|
+
# Print dry-run summary
|
|
725
|
+
logging.info("=" * 50)
|
|
726
|
+
logging.info("🎯 DRY-RUN SUMMARY")
|
|
727
|
+
logging.info("=" * 50)
|
|
728
|
+
logging.info(
|
|
729
|
+
f"✅ Infrastructure: {'Deployed' if args.inventory is None else 'Using existing'}"
|
|
730
|
+
)
|
|
731
|
+
logging.info(
|
|
732
|
+
f"✅ Database: {setup_type} ({'cluster' if cluster_enabled else 'standalone'}) started"
|
|
733
|
+
)
|
|
734
|
+
logging.info(
|
|
735
|
+
f"✅ Client tools: Setup completed on {client_public_ip}"
|
|
736
|
+
)
|
|
737
|
+
logging.info(
|
|
738
|
+
f"{'✅' if connectivity_success else '❌'} Connectivity: {len(redis_conns)} connection(s) tested"
|
|
739
|
+
)
|
|
740
|
+
if args.dry_run_with_preload:
|
|
741
|
+
logging.info(
|
|
742
|
+
"✅ Data preload: Completed during setup"
|
|
743
|
+
)
|
|
744
|
+
logging.info(
|
|
745
|
+
"🏁 Dry-run completed successfully"
|
|
746
|
+
)
|
|
747
|
+
logging.info(
|
|
748
|
+
"⏭️ Benchmark execution skipped (dry-run mode)"
|
|
749
|
+
)
|
|
750
|
+
logging.info("=" * 50)
|
|
751
|
+
|
|
752
|
+
# Skip benchmark execution and continue to next test
|
|
753
|
+
continue
|
|
754
|
+
|
|
677
755
|
logging.info(
|
|
678
756
|
"Will store benchmark json output to local file {}".format(
|
|
679
757
|
local_bench_fname
|