redisbench-admin 0.11.39__py3-none-any.whl → 0.11.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redisbench_admin/compare/args.py +5 -0
- redisbench_admin/compare/compare.py +25 -25
- redisbench_admin/deploy/deploy.py +9 -1
- redisbench_admin/environments/oss_cluster.py +37 -0
- redisbench_admin/export/export.py +7 -1
- redisbench_admin/profilers/perf.py +24 -24
- redisbench_admin/run/common.py +24 -6
- redisbench_admin/run/metrics.py +0 -2
- redisbench_admin/run_async/async_terraform.py +10 -2
- redisbench_admin/run_async/render_files.py +3 -3
- redisbench_admin/run_local/args.py +12 -0
- redisbench_admin/run_local/run_local.py +120 -63
- redisbench_admin/run_remote/args.py +32 -0
- redisbench_admin/run_remote/remote_helpers.py +5 -1
- redisbench_admin/run_remote/run_remote.py +123 -18
- redisbench_admin/run_remote/standalone.py +421 -2
- redisbench_admin/run_remote/terraform.py +5 -1
- redisbench_admin/utils/remote.py +44 -4
- {redisbench_admin-0.11.39.dist-info → redisbench_admin-0.11.41.dist-info}/METADATA +8 -2
- {redisbench_admin-0.11.39.dist-info → redisbench_admin-0.11.41.dist-info}/RECORD +23 -23
- {redisbench_admin-0.11.39.dist-info → redisbench_admin-0.11.41.dist-info}/LICENSE +0 -0
- {redisbench_admin-0.11.39.dist-info → redisbench_admin-0.11.41.dist-info}/WHEEL +0 -0
- {redisbench_admin-0.11.39.dist-info → redisbench_admin-0.11.41.dist-info}/entry_points.txt +0 -0
|
@@ -16,6 +16,7 @@ from redisbench_admin.run.git import git_vars_crosscheck
|
|
|
16
16
|
from redisbench_admin.utils.remote import (
|
|
17
17
|
get_project_ts_tags,
|
|
18
18
|
push_data_to_redistimeseries,
|
|
19
|
+
perform_connectivity_test,
|
|
19
20
|
)
|
|
20
21
|
|
|
21
22
|
import redisbench_admin.run.metrics
|
|
@@ -326,6 +327,7 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
326
327
|
)
|
|
327
328
|
|
|
328
329
|
# run the benchmark
|
|
330
|
+
|
|
329
331
|
cpu_stats_thread = threading.Thread(
|
|
330
332
|
target=collect_cpu_data,
|
|
331
333
|
args=(redis_conns, 5.0, 1.0),
|
|
@@ -335,9 +337,60 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
335
337
|
)
|
|
336
338
|
cpu_stats_thread.start()
|
|
337
339
|
benchmark_start_time = datetime.datetime.now()
|
|
338
|
-
|
|
339
|
-
|
|
340
|
+
logging.info(
|
|
341
|
+
"Running benchmark command: {}".format(command)
|
|
340
342
|
)
|
|
343
|
+
# Handle dry-run modes
|
|
344
|
+
if args.dry_run or args.dry_run_with_preload:
|
|
345
|
+
logging.info(
|
|
346
|
+
"🏃 Dry-run mode detected - performing connectivity tests"
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
# Test basic connectivity after setup
|
|
350
|
+
connectivity_success = perform_connectivity_test(
|
|
351
|
+
redis_conns, "after local environment setup"
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
if args.dry_run_with_preload:
|
|
355
|
+
logging.info(
|
|
356
|
+
"📦 Dry-run with preload - data loading already completed during setup"
|
|
357
|
+
)
|
|
358
|
+
# Test connectivity after preload (data was loaded during local_db_spin)
|
|
359
|
+
connectivity_success = (
|
|
360
|
+
perform_connectivity_test(
|
|
361
|
+
redis_conns, "after data preloading"
|
|
362
|
+
)
|
|
363
|
+
and connectivity_success
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
# Print dry-run summary
|
|
367
|
+
logging.info("=" * 50)
|
|
368
|
+
logging.info("🎯 DRY-RUN SUMMARY")
|
|
369
|
+
logging.info("=" * 50)
|
|
370
|
+
logging.info(
|
|
371
|
+
f"✅ Database: {setup_type} ({'cluster' if cluster_api_enabled else 'standalone'}) started locally"
|
|
372
|
+
)
|
|
373
|
+
logging.info(
|
|
374
|
+
f"✅ Client tools: {benchmark_tool} available"
|
|
375
|
+
)
|
|
376
|
+
logging.info(
|
|
377
|
+
f"{'✅' if connectivity_success else '❌'} Connectivity: {len(redis_conns)} connection(s) tested"
|
|
378
|
+
)
|
|
379
|
+
if args.dry_run_with_preload:
|
|
380
|
+
logging.info(
|
|
381
|
+
"✅ Data preload: Completed during setup"
|
|
382
|
+
)
|
|
383
|
+
logging.info("🏁 Dry-run completed successfully")
|
|
384
|
+
logging.info(
|
|
385
|
+
"⏭️ Benchmark execution skipped (dry-run mode)"
|
|
386
|
+
)
|
|
387
|
+
logging.info("=" * 50)
|
|
388
|
+
|
|
389
|
+
# Skip benchmark execution and continue to next test
|
|
390
|
+
else:
|
|
391
|
+
stdout, stderr = run_local_benchmark(
|
|
392
|
+
benchmark_tool, command
|
|
393
|
+
)
|
|
341
394
|
benchmark_end_time = datetime.datetime.now()
|
|
342
395
|
redisbench_admin.run.metrics.BENCHMARK_RUNNING_GLOBAL = (
|
|
343
396
|
False
|
|
@@ -364,10 +417,10 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
364
417
|
benchmark_end_time, benchmark_start_time
|
|
365
418
|
)
|
|
366
419
|
)
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
420
|
+
if args.dry_run is False:
|
|
421
|
+
logging.info("Extracting the benchmark results")
|
|
422
|
+
logging.info("stdout: {}".format(stdout))
|
|
423
|
+
logging.info("stderr: {}".format(stderr))
|
|
371
424
|
|
|
372
425
|
(
|
|
373
426
|
_,
|
|
@@ -420,53 +473,54 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
420
473
|
test_name,
|
|
421
474
|
tf_triggering_env,
|
|
422
475
|
)
|
|
423
|
-
|
|
424
|
-
post_process_benchmark_results(
|
|
425
|
-
benchmark_tool,
|
|
426
|
-
local_benchmark_output_filename,
|
|
427
|
-
start_time_ms,
|
|
428
|
-
start_time_str,
|
|
429
|
-
stdout,
|
|
430
|
-
)
|
|
431
476
|
results_dict = {}
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
results_dict,
|
|
440
|
-
setup_name,
|
|
441
|
-
setup_type,
|
|
442
|
-
test_name,
|
|
443
|
-
total_shards_cpu_usage,
|
|
444
|
-
overall_end_time_metrics,
|
|
445
|
-
[
|
|
446
|
-
"memory_used_memory",
|
|
447
|
-
"memory_used_memory_dataset",
|
|
448
|
-
],
|
|
449
|
-
)
|
|
450
|
-
export_redis_metrics(
|
|
451
|
-
artifact_version,
|
|
452
|
-
end_time_ms,
|
|
453
|
-
overall_end_time_metrics,
|
|
454
|
-
rts,
|
|
455
|
-
setup_name,
|
|
456
|
-
setup_type,
|
|
457
|
-
test_name,
|
|
458
|
-
tf_github_branch,
|
|
459
|
-
tf_github_org,
|
|
460
|
-
tf_github_repo,
|
|
461
|
-
tf_triggering_env,
|
|
462
|
-
{"metric-type": "redis-metrics"},
|
|
463
|
-
0,
|
|
477
|
+
if args.dry_run is False:
|
|
478
|
+
post_process_benchmark_results(
|
|
479
|
+
benchmark_tool,
|
|
480
|
+
local_benchmark_output_filename,
|
|
481
|
+
start_time_ms,
|
|
482
|
+
start_time_str,
|
|
483
|
+
stdout,
|
|
464
484
|
)
|
|
465
485
|
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
486
|
+
with open(
|
|
487
|
+
local_benchmark_output_filename, "r"
|
|
488
|
+
) as json_file:
|
|
489
|
+
results_dict = json.load(json_file)
|
|
490
|
+
print_results_table_stdout(
|
|
491
|
+
benchmark_config,
|
|
492
|
+
default_metrics,
|
|
493
|
+
results_dict,
|
|
494
|
+
setup_name,
|
|
495
|
+
setup_type,
|
|
496
|
+
test_name,
|
|
497
|
+
total_shards_cpu_usage,
|
|
498
|
+
overall_end_time_metrics,
|
|
499
|
+
[
|
|
500
|
+
"memory_used_memory",
|
|
501
|
+
"memory_used_memory_dataset",
|
|
502
|
+
],
|
|
503
|
+
)
|
|
504
|
+
export_redis_metrics(
|
|
505
|
+
artifact_version,
|
|
506
|
+
end_time_ms,
|
|
507
|
+
overall_end_time_metrics,
|
|
508
|
+
rts,
|
|
509
|
+
setup_name,
|
|
510
|
+
setup_type,
|
|
511
|
+
test_name,
|
|
512
|
+
tf_github_branch,
|
|
513
|
+
tf_github_org,
|
|
514
|
+
tf_github_repo,
|
|
515
|
+
tf_triggering_env,
|
|
516
|
+
{"metric-type": "redis-metrics"},
|
|
517
|
+
0,
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
# check KPIs
|
|
521
|
+
return_code = results_dict_kpi_check(
|
|
522
|
+
benchmark_config, results_dict, return_code
|
|
523
|
+
)
|
|
470
524
|
|
|
471
525
|
metadata_tags = get_metadata_tags(benchmark_config)
|
|
472
526
|
(
|
|
@@ -508,7 +562,10 @@ def run_local_command_logic(args, project_name, project_version):
|
|
|
508
562
|
"Some unexpected exception was caught "
|
|
509
563
|
"during local work. Failing test...."
|
|
510
564
|
)
|
|
511
|
-
|
|
565
|
+
if len(sys.exc_info()) > 0:
|
|
566
|
+
logging.critical(sys.exc_info()[0])
|
|
567
|
+
else:
|
|
568
|
+
logging.critical(sys.exc_info())
|
|
512
569
|
print("-" * 60)
|
|
513
570
|
traceback.print_exc(file=sys.stdout)
|
|
514
571
|
print("-" * 60)
|
|
@@ -687,17 +744,17 @@ def commandstats_latencystats_process_name(
|
|
|
687
744
|
branch = variant_labels_dict["branch"]
|
|
688
745
|
|
|
689
746
|
if version is not None:
|
|
690
|
-
variant_labels_dict[
|
|
691
|
-
"
|
|
692
|
-
|
|
693
|
-
variant_labels_dict[
|
|
694
|
-
"
|
|
695
|
-
|
|
747
|
+
variant_labels_dict["command_and_metric_and_version"] = (
|
|
748
|
+
"{} - {} - {}".format(command, metric, version)
|
|
749
|
+
)
|
|
750
|
+
variant_labels_dict["command_and_metric_and_setup_and_version"] = (
|
|
751
|
+
"{} - {} - {} - {}".format(command, metric, setup_name, version)
|
|
752
|
+
)
|
|
696
753
|
|
|
697
754
|
if branch is not None:
|
|
698
|
-
variant_labels_dict[
|
|
699
|
-
"
|
|
700
|
-
|
|
701
|
-
variant_labels_dict[
|
|
702
|
-
"
|
|
703
|
-
|
|
755
|
+
variant_labels_dict["command_and_metric_and_branch"] = (
|
|
756
|
+
"{} - {} - {}".format(command, metric, branch)
|
|
757
|
+
)
|
|
758
|
+
variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
|
|
759
|
+
"{} - {} - {} - {}".format(command, metric, setup_name, branch)
|
|
760
|
+
)
|
|
@@ -112,11 +112,43 @@ def create_run_remote_arguments(parser):
|
|
|
112
112
|
action="store_true",
|
|
113
113
|
help="skip environment variables check",
|
|
114
114
|
)
|
|
115
|
+
parser.add_argument(
|
|
116
|
+
"--dry-run",
|
|
117
|
+
default=False,
|
|
118
|
+
action="store_true",
|
|
119
|
+
help="Setup environment and test connectivity without running benchmarks",
|
|
120
|
+
)
|
|
121
|
+
parser.add_argument(
|
|
122
|
+
"--dry-run-with-preload",
|
|
123
|
+
default=False,
|
|
124
|
+
action="store_true",
|
|
125
|
+
help="Setup environment, preload data, and test connectivity without running benchmarks",
|
|
126
|
+
)
|
|
115
127
|
parser.add_argument(
|
|
116
128
|
"--continue-on-module-check-error",
|
|
117
129
|
default=False,
|
|
118
130
|
action="store_true",
|
|
119
131
|
help="Continue running benchmarks even if module check failed",
|
|
120
132
|
)
|
|
133
|
+
parser.add_argument(
|
|
134
|
+
"--redis-conf",
|
|
135
|
+
required=False,
|
|
136
|
+
default=None,
|
|
137
|
+
type=str,
|
|
138
|
+
help="Path to custom redis.conf file to copy to remote host",
|
|
139
|
+
)
|
|
140
|
+
parser.add_argument(
|
|
141
|
+
"--redis-server-binary",
|
|
142
|
+
required=False,
|
|
143
|
+
default=None,
|
|
144
|
+
type=str,
|
|
145
|
+
help="Path to custom redis-server binary to copy to remote host",
|
|
146
|
+
)
|
|
147
|
+
parser.add_argument(
|
|
148
|
+
"--spin-test",
|
|
149
|
+
default=False,
|
|
150
|
+
action="store_true",
|
|
151
|
+
help="Setup standalone Redis server, run INFO SERVER, print output as markdown and exit",
|
|
152
|
+
)
|
|
121
153
|
|
|
122
154
|
return parser
|
|
@@ -106,7 +106,11 @@ def remote_tool_pre_bench_step(
|
|
|
106
106
|
)
|
|
107
107
|
|
|
108
108
|
if "ftsb_" in benchmark_tool:
|
|
109
|
-
(
|
|
109
|
+
(
|
|
110
|
+
queries_file_link,
|
|
111
|
+
remote_tool_link,
|
|
112
|
+
tool_link,
|
|
113
|
+
) = extract_ftsb_extra_links(
|
|
110
114
|
benchmark_config, benchmark_tool, config_key, architecture
|
|
111
115
|
)
|
|
112
116
|
logging.info(
|
|
@@ -51,6 +51,7 @@ from redisbench_admin.run_remote.remote_db import (
|
|
|
51
51
|
remote_db_spin,
|
|
52
52
|
db_error_artifacts,
|
|
53
53
|
)
|
|
54
|
+
from redisbench_admin.run_remote.standalone import spin_test_standalone_redis
|
|
54
55
|
from redisbench_admin.run_remote.remote_env import remote_env_setup
|
|
55
56
|
from redisbench_admin.run_remote.remote_failures import failed_remote_run_artifact_store
|
|
56
57
|
from redisbench_admin.run_remote.terraform import (
|
|
@@ -69,6 +70,7 @@ from redisbench_admin.utils.remote import (
|
|
|
69
70
|
get_project_ts_tags,
|
|
70
71
|
push_data_to_redistimeseries,
|
|
71
72
|
fetch_remote_id_from_config,
|
|
73
|
+
perform_connectivity_test,
|
|
72
74
|
)
|
|
73
75
|
|
|
74
76
|
from redisbench_admin.utils.utils import (
|
|
@@ -121,12 +123,15 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
121
123
|
tf_setup_name_sufix = "{}-{}".format(args.setup_name_sufix, tf_github_sha)
|
|
122
124
|
s3_bucket_name = args.s3_bucket_name
|
|
123
125
|
local_module_files = args.module_path
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
126
|
+
if local_module_files is not None:
|
|
127
|
+
for pos, module_file in enumerate(local_module_files):
|
|
128
|
+
if " " in module_file:
|
|
129
|
+
logging.info(
|
|
130
|
+
"Detected multiple files in single module path {}".format(
|
|
131
|
+
module_file
|
|
132
|
+
)
|
|
133
|
+
)
|
|
134
|
+
local_module_files[pos] = module_file.split(" ")
|
|
130
135
|
dbdir_folder = args.dbdir_folder
|
|
131
136
|
private_key = args.private_key
|
|
132
137
|
grafana_profile_dashboard = args.grafana_profile_dashboard
|
|
@@ -237,6 +242,50 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
237
242
|
|
|
238
243
|
ssh_pem_check(EC2_PRIVATE_PEM, private_key)
|
|
239
244
|
|
|
245
|
+
# Handle spin-test mode
|
|
246
|
+
if args.spin_test:
|
|
247
|
+
logging.info(
|
|
248
|
+
"🚀 Spin-test mode detected - setting up standalone Redis and running INFO SERVER"
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
# Parse inventory to get server details
|
|
252
|
+
if args.inventory is None:
|
|
253
|
+
logging.error(
|
|
254
|
+
"❌ --spin-test requires --inventory to specify the remote server"
|
|
255
|
+
)
|
|
256
|
+
exit(1)
|
|
257
|
+
|
|
258
|
+
# Parse inventory string
|
|
259
|
+
inventory_parts = args.inventory.split(",")
|
|
260
|
+
server_public_ip = None
|
|
261
|
+
|
|
262
|
+
for part in inventory_parts:
|
|
263
|
+
if "=" in part:
|
|
264
|
+
key, value = part.split("=", 1)
|
|
265
|
+
if key.strip() == "server_public_ip":
|
|
266
|
+
server_public_ip = value.strip()
|
|
267
|
+
break
|
|
268
|
+
|
|
269
|
+
if server_public_ip is None:
|
|
270
|
+
logging.error("❌ --spin-test requires server_public_ip in --inventory")
|
|
271
|
+
exit(1)
|
|
272
|
+
|
|
273
|
+
# Run spin test
|
|
274
|
+
success = spin_test_standalone_redis(
|
|
275
|
+
server_public_ip=server_public_ip,
|
|
276
|
+
username=args.user,
|
|
277
|
+
private_key=private_key,
|
|
278
|
+
db_ssh_port=args.db_ssh_port,
|
|
279
|
+
redis_port=args.db_port,
|
|
280
|
+
local_module_files=local_module_files,
|
|
281
|
+
redis_configuration_parameters=None,
|
|
282
|
+
modules_configuration_parameters_map=None,
|
|
283
|
+
custom_redis_conf_path=args.redis_conf,
|
|
284
|
+
custom_redis_server_path=args.redis_server_binary,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
exit(0 if success else 1)
|
|
288
|
+
|
|
240
289
|
(
|
|
241
290
|
benchmark_defs_result,
|
|
242
291
|
benchmark_definitions,
|
|
@@ -695,6 +744,62 @@ def run_remote_command_logic(args, project_name, project_version):
|
|
|
695
744
|
)
|
|
696
745
|
)
|
|
697
746
|
|
|
747
|
+
# Handle dry-run modes
|
|
748
|
+
if args.dry_run or args.dry_run_with_preload:
|
|
749
|
+
logging.info(
|
|
750
|
+
"🏃 Dry-run mode detected - performing connectivity tests"
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
# Test basic connectivity after setup
|
|
754
|
+
connectivity_success = (
|
|
755
|
+
perform_connectivity_test(
|
|
756
|
+
redis_conns, "after environment setup"
|
|
757
|
+
)
|
|
758
|
+
)
|
|
759
|
+
|
|
760
|
+
if args.dry_run_with_preload:
|
|
761
|
+
logging.info(
|
|
762
|
+
"📦 Dry-run with preload - data loading already completed during setup"
|
|
763
|
+
)
|
|
764
|
+
# Test connectivity after preload (data was loaded during remote_db_spin)
|
|
765
|
+
connectivity_success = (
|
|
766
|
+
perform_connectivity_test(
|
|
767
|
+
redis_conns, "after data preloading"
|
|
768
|
+
)
|
|
769
|
+
and connectivity_success
|
|
770
|
+
)
|
|
771
|
+
|
|
772
|
+
# Print dry-run summary
|
|
773
|
+
logging.info("=" * 50)
|
|
774
|
+
logging.info("🎯 DRY-RUN SUMMARY")
|
|
775
|
+
logging.info("=" * 50)
|
|
776
|
+
logging.info(
|
|
777
|
+
f"✅ Infrastructure: {'Deployed' if args.inventory is None else 'Using existing'}"
|
|
778
|
+
)
|
|
779
|
+
logging.info(
|
|
780
|
+
f"✅ Database: {setup_type} ({'cluster' if cluster_enabled else 'standalone'}) started"
|
|
781
|
+
)
|
|
782
|
+
logging.info(
|
|
783
|
+
f"✅ Client tools: Setup completed on {client_public_ip}"
|
|
784
|
+
)
|
|
785
|
+
logging.info(
|
|
786
|
+
f"{'✅' if connectivity_success else '❌'} Connectivity: {len(redis_conns)} connection(s) tested"
|
|
787
|
+
)
|
|
788
|
+
if args.dry_run_with_preload:
|
|
789
|
+
logging.info(
|
|
790
|
+
"✅ Data preload: Completed during setup"
|
|
791
|
+
)
|
|
792
|
+
logging.info(
|
|
793
|
+
"🏁 Dry-run completed successfully"
|
|
794
|
+
)
|
|
795
|
+
logging.info(
|
|
796
|
+
"⏭️ Benchmark execution skipped (dry-run mode)"
|
|
797
|
+
)
|
|
798
|
+
logging.info("=" * 50)
|
|
799
|
+
|
|
800
|
+
# Skip benchmark execution and continue to next test
|
|
801
|
+
continue
|
|
802
|
+
|
|
698
803
|
logging.info(
|
|
699
804
|
"Will store benchmark json output to local file {}".format(
|
|
700
805
|
local_bench_fname
|
|
@@ -1429,20 +1534,20 @@ def commandstats_latencystats_process_name(
|
|
|
1429
1534
|
branch = variant_labels_dict["branch"]
|
|
1430
1535
|
|
|
1431
1536
|
if version is not None:
|
|
1432
|
-
variant_labels_dict[
|
|
1433
|
-
"
|
|
1434
|
-
|
|
1435
|
-
variant_labels_dict[
|
|
1436
|
-
"
|
|
1437
|
-
|
|
1537
|
+
variant_labels_dict["command_and_metric_and_version"] = (
|
|
1538
|
+
"{} - {} - {}".format(command, metric, version)
|
|
1539
|
+
)
|
|
1540
|
+
variant_labels_dict["command_and_metric_and_setup_and_version"] = (
|
|
1541
|
+
"{} - {} - {} - {}".format(command, metric, setup_name, version)
|
|
1542
|
+
)
|
|
1438
1543
|
|
|
1439
1544
|
if branch is not None:
|
|
1440
|
-
variant_labels_dict[
|
|
1441
|
-
"
|
|
1442
|
-
|
|
1443
|
-
variant_labels_dict[
|
|
1444
|
-
"
|
|
1445
|
-
|
|
1545
|
+
variant_labels_dict["command_and_metric_and_branch"] = (
|
|
1546
|
+
"{} - {} - {}".format(command, metric, branch)
|
|
1547
|
+
)
|
|
1548
|
+
variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
|
|
1549
|
+
"{} - {} - {} - {}".format(command, metric, setup_name, branch)
|
|
1550
|
+
)
|
|
1446
1551
|
|
|
1447
1552
|
|
|
1448
1553
|
def shutdown_remote_redis(redis_conns, ssh_tunnel):
|