redisbench-admin 0.11.19__py3-none-any.whl → 0.11.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. redisbench_admin/compare/args.py +19 -3
  2. redisbench_admin/compare/compare.py +42 -45
  3. redisbench_admin/deploy/deploy.py +10 -5
  4. redisbench_admin/export/export.py +7 -1
  5. redisbench_admin/profilers/perf.py +24 -24
  6. redisbench_admin/run/args.py +11 -0
  7. redisbench_admin/run/common.py +57 -32
  8. redisbench_admin/run/ftsb/ftsb.py +0 -1
  9. redisbench_admin/run/ssh.py +0 -15
  10. redisbench_admin/run_async/async_terraform.py +10 -2
  11. redisbench_admin/run_async/render_files.py +3 -3
  12. redisbench_admin/run_local/args.py +2 -0
  13. redisbench_admin/run_local/local_db.py +6 -2
  14. redisbench_admin/run_local/run_local.py +18 -12
  15. redisbench_admin/run_remote/remote_db.py +1 -0
  16. redisbench_admin/run_remote/remote_env.py +12 -0
  17. redisbench_admin/run_remote/remote_helpers.py +0 -1
  18. redisbench_admin/run_remote/run_remote.py +25 -18
  19. redisbench_admin/run_remote/standalone.py +5 -6
  20. redisbench_admin/run_remote/terraform.py +5 -1
  21. redisbench_admin/utils/benchmark_config.py +0 -8
  22. redisbench_admin/utils/remote.py +19 -56
  23. redisbench_admin/utils/utils.py +13 -2
  24. redisbench_admin/watchdog/watchdog.py +8 -9
  25. {redisbench_admin-0.11.19.dist-info → redisbench_admin-0.11.22.dist-info}/METADATA +2 -3
  26. {redisbench_admin-0.11.19.dist-info → redisbench_admin-0.11.22.dist-info}/RECORD +29 -29
  27. {redisbench_admin-0.11.19.dist-info → redisbench_admin-0.11.22.dist-info}/LICENSE +0 -0
  28. {redisbench_admin-0.11.19.dist-info → redisbench_admin-0.11.22.dist-info}/WHEEL +0 -0
  29. {redisbench_admin-0.11.19.dist-info → redisbench_admin-0.11.22.dist-info}/entry_points.txt +0 -0
@@ -158,7 +158,7 @@ def local_db_spin(
158
158
  logging.info("Skipping DB spin step...")
159
159
 
160
160
  if setup_type == "oss-standalone":
161
- r = redis.Redis(port=args.port, host=args.host)
161
+ r = redis.Redis(port=args.port, host=args.host, password=args.password)
162
162
  r.ping()
163
163
  r.client_setname("redisbench-admin-standalone")
164
164
  redis_conns.append(r)
@@ -192,6 +192,11 @@ def local_db_spin(
192
192
  benchmark_tool_workdir,
193
193
  cluster_api_enabled,
194
194
  "dbconfig",
195
+ None,
196
+ None,
197
+ None,
198
+ None,
199
+ args.password,
195
200
  )
196
201
 
197
202
  # run the benchmark
@@ -206,7 +211,6 @@ def local_db_spin(
206
211
  load_via_benchmark_duration_seconds
207
212
  )
208
213
  )
209
-
210
214
  dbconfig_keyspacelen_check(benchmark_config, redis_conns, ignore_keyspace_errors)
211
215
 
212
216
  artifact_version = run_redis_pre_steps(
@@ -298,6 +298,12 @@ def run_local_command_logic(args, project_name, project_version):
298
298
  False,
299
299
  benchmark_tool_workdir,
300
300
  cluster_api_enabled,
301
+ "clientconfig",
302
+ None,
303
+ None,
304
+ None,
305
+ None,
306
+ args.password,
301
307
  )
302
308
  redis_pids = [
303
309
  redis_process.pid
@@ -680,17 +686,17 @@ def commandstats_latencystats_process_name(
680
686
  branch = variant_labels_dict["branch"]
681
687
 
682
688
  if version is not None:
683
- variant_labels_dict[
684
- "command_and_metric_and_version"
685
- ] = "{} - {} - {}".format(command, metric, version)
686
- variant_labels_dict[
687
- "command_and_metric_and_setup_and_version"
688
- ] = "{} - {} - {} - {}".format(command, metric, setup_name, version)
689
+ variant_labels_dict["command_and_metric_and_version"] = (
690
+ "{} - {} - {}".format(command, metric, version)
691
+ )
692
+ variant_labels_dict["command_and_metric_and_setup_and_version"] = (
693
+ "{} - {} - {} - {}".format(command, metric, setup_name, version)
694
+ )
689
695
 
690
696
  if branch is not None:
691
- variant_labels_dict[
692
- "command_and_metric_and_branch"
693
- ] = "{} - {} - {}".format(command, metric, branch)
694
- variant_labels_dict[
695
- "command_and_metric_and_setup_and_branch"
696
- ] = "{} - {} - {} - {}".format(command, metric, setup_name, branch)
697
+ variant_labels_dict["command_and_metric_and_branch"] = (
698
+ "{} - {} - {}".format(command, metric, branch)
699
+ )
700
+ variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
701
+ "{} - {} - {} - {}".format(command, metric, setup_name, branch)
702
+ )
@@ -372,6 +372,7 @@ def remote_db_spin(
372
372
  benchmark_config,
373
373
  redis_conns,
374
374
  ignore_keyspace_errors,
375
+ keyspace_check_timeout,
375
376
  )
376
377
  artifact_version = run_redis_pre_steps(
377
378
  benchmark_config, redis_conns[0], required_modules
@@ -7,6 +7,7 @@ import logging
7
7
 
8
8
  from python_terraform import TerraformCommandError
9
9
 
10
+ from redisbench_admin.run.args import ARCH_X86, ARCH_ARM
10
11
  from redisbench_admin.run_remote.terraform import (
11
12
  retrieve_inventory_info,
12
13
  terraform_spin_or_reuse_env,
@@ -33,11 +34,22 @@ def remote_env_setup(
33
34
  spot_instance_error=False,
34
35
  spot_price_counter=0,
35
36
  full_price_counter=0,
37
+ architecture=ARCH_X86,
36
38
  ):
37
39
  server_plaintext_port = args.db_port
38
40
  db_ssh_port = args.db_ssh_port
39
41
  client_ssh_port = args.client_ssh_port
40
42
  username = args.user
43
+ if architecture != ARCH_X86 and tf_folder_path is not None:
44
+ logging.info(
45
+ f"Checking if the architecture info is specified on the terraform path {tf_folder_path}"
46
+ )
47
+ if architecture is ARCH_ARM and ARCH_ARM not in tf_folder_path:
48
+ logging.info(f"adding suffix '-{ARCH_ARM}' to {tf_folder_path}")
49
+ tf_folder_path = "{tf_folder_path}-{ARCH_ARM}"
50
+ else:
51
+ logging.info(f"'-{ARCH_ARM}' suffix already in {tf_folder_path}")
52
+
41
53
  if args.inventory is not None:
42
54
  (
43
55
  status,
@@ -274,7 +274,6 @@ def post_process_remote_run(
274
274
  tmp,
275
275
  result_csv_filename="result.csv",
276
276
  ):
277
- results_dict = {}
278
277
  if benchmark_tool == "redis-benchmark":
279
278
  local_benchmark_output_filename = tmp
280
279
  with open(result_csv_filename, "r", encoding="utf-8") as txt_file:
@@ -21,7 +21,7 @@ from redisbench_admin.profilers.perf_daemon_caller import (
21
21
  PerfDaemonRemoteCaller,
22
22
  PERF_DAEMON_LOGNAME,
23
23
  )
24
- from redisbench_admin.run.args import PROFILE_FREQ
24
+ from redisbench_admin.run.args import PROFILE_FREQ, VALID_ARCHS
25
25
  from redisbench_admin.run.common import (
26
26
  get_start_time_vars,
27
27
  BENCHMARK_REPETITIONS,
@@ -300,6 +300,14 @@ def run_remote_command_logic(args, project_name, project_version):
300
300
  benchmark_artifacts_table_name = "Benchmark client artifacts"
301
301
  benchmark_artifacts_table_headers = ["Setup", "Test-case", "Artifact", "link"]
302
302
  benchmark_artifacts_links = []
303
+ architecture = args.architecture
304
+ if architecture not in VALID_ARCHS:
305
+ logging.critical(
306
+ f"The specified architecture {architecture} is not valid. Specify one of {VALID_ARCHS}"
307
+ )
308
+ exit(1)
309
+ else:
310
+ logging.info("Running benchmark for architecture {architecture}")
303
311
 
304
312
  # contains the overall target-tables ( if any target is defined )
305
313
  overall_tables = {}
@@ -346,10 +354,7 @@ def run_remote_command_logic(args, project_name, project_version):
346
354
 
347
355
  # map from setup name to overall target-tables ( if any target is defined )
348
356
  overall_tables[setup_name] = {}
349
- total_benchmarks = len(benchmarks_map.keys())
350
- import tqdm
351
357
 
352
- pbar = tqdm.tqdm(total=total_benchmarks, unit="benchmarks")
353
358
  for test_name, benchmark_config in benchmarks_map.items():
354
359
  if return_code != 0 and args.fail_fast:
355
360
  logging.warning(
@@ -374,7 +379,9 @@ def run_remote_command_logic(args, project_name, project_version):
374
379
  continue
375
380
  remote_perf = None
376
381
  logging.info(
377
- f"Repetition {repetition} of {BENCHMARK_REPETITIONS}. Running test {test_name}. Total benchmarks {total_benchmarks}"
382
+ "Repetition {} of {}. Running test {}".format(
383
+ repetition, BENCHMARK_REPETITIONS, test_name
384
+ )
378
385
  )
379
386
  (
380
387
  setup_name,
@@ -442,6 +449,7 @@ def run_remote_command_logic(args, project_name, project_version):
442
449
  spot_instance_error,
443
450
  0,
444
451
  0,
452
+ architecture,
445
453
  )
446
454
 
447
455
  # after we've created the env, even on error we should always teardown
@@ -1093,7 +1101,6 @@ def run_remote_command_logic(args, project_name, project_version):
1093
1101
  f"Test {test_name} does not have remote config. Skipping test."
1094
1102
  )
1095
1103
 
1096
- pbar.update()
1097
1104
  if len(benchmark_artifacts_links) > 0:
1098
1105
  writer = MarkdownTableWriter(
1099
1106
  table_name=benchmark_artifacts_table_name,
@@ -1376,20 +1383,20 @@ def commandstats_latencystats_process_name(
1376
1383
  branch = variant_labels_dict["branch"]
1377
1384
 
1378
1385
  if version is not None:
1379
- variant_labels_dict[
1380
- "command_and_metric_and_version"
1381
- ] = "{} - {} - {}".format(command, metric, version)
1382
- variant_labels_dict[
1383
- "command_and_metric_and_setup_and_version"
1384
- ] = "{} - {} - {} - {}".format(command, metric, setup_name, version)
1386
+ variant_labels_dict["command_and_metric_and_version"] = (
1387
+ "{} - {} - {}".format(command, metric, version)
1388
+ )
1389
+ variant_labels_dict["command_and_metric_and_setup_and_version"] = (
1390
+ "{} - {} - {} - {}".format(command, metric, setup_name, version)
1391
+ )
1385
1392
 
1386
1393
  if branch is not None:
1387
- variant_labels_dict[
1388
- "command_and_metric_and_branch"
1389
- ] = "{} - {} - {}".format(command, metric, branch)
1390
- variant_labels_dict[
1391
- "command_and_metric_and_setup_and_branch"
1392
- ] = "{} - {} - {} - {}".format(command, metric, setup_name, branch)
1394
+ variant_labels_dict["command_and_metric_and_branch"] = (
1395
+ "{} - {} - {}".format(command, metric, branch)
1396
+ )
1397
+ variant_labels_dict["command_and_metric_and_setup_and_branch"] = (
1398
+ "{} - {} - {} - {}".format(command, metric, setup_name, branch)
1399
+ )
1393
1400
 
1394
1401
 
1395
1402
  def shutdown_remote_redis(redis_conns, ssh_tunnel):
@@ -76,7 +76,6 @@ def remote_module_files_cp(
76
76
  ):
77
77
  remote_module_files = []
78
78
  if local_module_files is not None:
79
- logging.info(f"local_module_files: {local_module_files}")
80
79
  for local_module_file in local_module_files:
81
80
  splitted_module_and_plugins = []
82
81
  if type(local_module_file) is str:
@@ -136,10 +135,7 @@ def remote_module_files_cp(
136
135
  if pos > 1:
137
136
  remote_module_files_in = remote_module_files_in + " "
138
137
  remote_module_files_in = remote_module_files_in + remote_module_file
139
- logging.info(
140
- f"appending to {remote_module_files} remote file {remote_module_files_in}"
141
- )
142
- remote_module_files.append(remote_module_files_in)
138
+ remote_module_files.append(remote_module_files_in)
143
139
  logging.info(
144
140
  "There are a total of {} remote files {}".format(
145
141
  len(remote_module_files), remote_module_files
@@ -157,9 +153,12 @@ def generate_remote_standalone_redis_cmd(
157
153
  enable_redis_7_config_directives=True,
158
154
  enable_debug_command="yes",
159
155
  ):
160
- initial_redis_cmd = "redis-server --save '' --logfile {} --dir {} --daemonize yes --protected-mode no --enable-debug-command yes ".format(
156
+ initial_redis_cmd = "redis-server --save '' --logfile {} --dir {} --daemonize yes --protected-mode no ".format(
161
157
  logfile, temporary_dir
162
158
  )
159
+ if enable_redis_7_config_directives:
160
+ extra_str = " --enable-debug-command {} ".format(enable_debug_command)
161
+ initial_redis_cmd = initial_redis_cmd + extra_str
163
162
  full_logfile = "{}/{}".format(temporary_dir, logfile)
164
163
  if redis_configuration_parameters is not None:
165
164
  for (
@@ -31,7 +31,11 @@ def terraform_spin_or_reuse_env(
31
31
  tf_override_name=None,
32
32
  tf_folder_path=None,
33
33
  ):
34
- (remote_setup, deployment_type, remote_id,) = fetch_remote_setup_from_config(
34
+ (
35
+ remote_setup,
36
+ deployment_type,
37
+ remote_id,
38
+ ) = fetch_remote_setup_from_config(
35
39
  benchmark_config["remote"],
36
40
  "https://github.com/redis-performance/testing-infrastructure.git",
37
41
  "master",
@@ -565,7 +565,6 @@ def get_testfiles_to_process(
565
565
 
566
566
 
567
567
  def check_required_modules(module_names, required_modules):
568
- position = -1
569
568
  if required_modules is not None:
570
569
  if len(required_modules) > 0:
571
570
  logging.info(
@@ -573,12 +572,6 @@ def check_required_modules(module_names, required_modules):
573
572
  required_modules
574
573
  )
575
574
  )
576
- first_module = required_modules[0]
577
- if first_module in module_names:
578
- position = module_names.index(first_module)
579
- logging.info(
580
- f"POSITION of 1st required module :{first_module} in the module names list {module_names}: pos={position} "
581
- )
582
575
  for required_module in required_modules:
583
576
  if required_module not in module_names:
584
577
  raise Exception(
@@ -587,7 +580,6 @@ def check_required_modules(module_names, required_modules):
587
580
  module_names,
588
581
  )
589
582
  )
590
- return position
591
583
 
592
584
 
593
585
  def results_dict_kpi_check(benchmark_config, results_dict, return_code):
@@ -21,6 +21,7 @@ from tqdm import tqdm
21
21
 
22
22
  from redisbench_admin.environments.oss_cluster import get_cluster_dbfilename
23
23
  from redisbench_admin.run.metrics import extract_results_table
24
+ from redisbench_admin.run.args import ARCH_X86
24
25
  from redisbench_admin.utils.local import check_dataset_local_requirements
25
26
  from redisbench_admin.utils.utils import (
26
27
  get_ts_metric_name,
@@ -142,7 +143,7 @@ def execute_remote_commands(
142
143
  c = connect_remote_ssh(port, private_key, server_public_ip, username)
143
144
  for command in commands:
144
145
  logging.info('Executing remote command "{}"'.format(command))
145
- stdin, stdout, stderr = c.exec_command(command, get_pty=get_pty, timeout=180)
146
+ stdin, stdout, stderr = c.exec_command(command, get_pty=get_pty)
146
147
  recv_exit_status = stdout.channel.recv_exit_status() # status is 0
147
148
  stdout = stdout.readlines()
148
149
  stderr = stderr.readlines()
@@ -158,46 +159,13 @@ def execute_remote_commands(
158
159
 
159
160
 
160
161
  def connect_remote_ssh(port, private_key, server_public_ip, username):
161
- import time
162
- import socket
163
-
164
162
  k = paramiko.RSAKey.from_private_key_file(private_key)
165
163
  c = paramiko.SSHClient()
166
164
  c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
167
-
168
- max_retries = 20
169
- base_delay = 8 # Initial backoff time in seconds
170
- max_delay = 900 # Maximum delay in seconds
171
-
172
- for attempt in range(max_retries):
173
- try:
174
- logging.info(
175
- f"Connecting to remote server {server_public_ip}, attempt {attempt + 1}"
176
- )
177
- c.connect(
178
- hostname=server_public_ip,
179
- port=port,
180
- username=username,
181
- pkey=k,
182
- timeout=300,
183
- )
184
- logging.info(f"Connected to remote server {server_public_ip}")
185
- return c
186
- except (
187
- paramiko.ssh_exception.NoValidConnectionsError,
188
- socket.timeout,
189
- paramiko.ssh_exception.SSHException,
190
- ) as e:
191
- wait_time = min(base_delay * (2**attempt), max_delay)
192
- logging.warning(
193
- f"Connection attempt {attempt + 1} failed: {e}. Retrying in {wait_time} seconds..."
194
- )
195
- time.sleep(wait_time)
196
-
197
- logging.error("Failed to connect after multiple attempts.")
198
- raise Exception(
199
- f"Could not connect to {server_public_ip} after {max_retries} attempts."
200
- )
165
+ logging.info("Connecting to remote server {}".format(server_public_ip))
166
+ c.connect(hostname=server_public_ip, port=port, username=username, pkey=k)
167
+ logging.info("Connected to remote server {}".format(server_public_ip))
168
+ return c
201
169
 
202
170
 
203
171
  def check_dataset_remote_requirements(
@@ -303,9 +271,7 @@ def setup_remote_environment(
303
271
  _, _, _ = tf.init(
304
272
  capture_output=True,
305
273
  backend_config={
306
- "key": "benchmarks/infrastructure/{}.tfstate".format(tf_setup_name).replace(
307
- "/", "-"
308
- )
274
+ "key": "benchmarks/infrastructure/{}.tfstate".format(tf_setup_name)
309
275
  },
310
276
  )
311
277
  _, _, _ = tf.refresh()
@@ -579,21 +545,12 @@ def common_tf(branch, path, repo, temporary_dir=None, destroy=False):
579
545
  temporary_dir = tempfile.mkdtemp()
580
546
  if destroy is False:
581
547
  logging.info(
582
- "Fetching infrastructure definition from git repo {}{} (branch={}). Using local dir {} to store state".format(
548
+ "Fetching infrastructure definition from git repo {}/{} (branch={}). Using local dir {} to store state".format(
583
549
  repo, path, branch, temporary_dir
584
550
  )
585
551
  )
586
552
  git.Repo.clone_from(repo, temporary_dir, branch=branch, depth=1)
587
- logging.info(f"ensuring folder exists: {temporary_dir}")
588
- assert os.path.exists(temporary_dir) and os.path.isdir(
589
- temporary_dir
590
- ), f"Folder '{temporary_dir}' does not exist"
591
- if path[0] != "/":
592
- temporary_dir = temporary_dir + "/"
593
553
  terraform_working_dir = temporary_dir + path
594
- logging.info(
595
- f"terraform_working_dir={terraform_working_dir}. temporary_dir={temporary_dir}. path={path}"
596
- )
597
554
  return terraform_working_dir
598
555
 
599
556
 
@@ -605,7 +562,7 @@ def check_remote_setup_spot_instance(
605
562
  contains_spot_instance = False
606
563
  for remote_setup_property in remote_setup_config:
607
564
  if "spot_instance" in remote_setup_property:
608
- spot_path = "terraform/" + remote_setup_property["spot_instance"]
565
+ spot_path = "/terraform/" + remote_setup_property["spot_instance"]
609
566
  contains_spot_instance = True
610
567
  logging.info(f"Detected spot instance config. Setup path: {spot_path}")
611
568
 
@@ -775,7 +732,10 @@ def extract_perversion_timeseries_from_results(
775
732
  ):
776
733
  break_by_key = "version"
777
734
  break_by_str = "by.{}".format(break_by_key)
778
- (branch_time_series_dict, target_tables,) = common_timeseries_extraction(
735
+ (
736
+ branch_time_series_dict,
737
+ target_tables,
738
+ ) = common_timeseries_extraction(
779
739
  break_by_key,
780
740
  break_by_str,
781
741
  datapoints_timestamp,
@@ -946,9 +906,9 @@ def from_metric_kv_to_timeserie(
946
906
 
947
907
  target_table_dict[target_name] = target_value
948
908
 
949
- target_table_dict[
950
- "{}:percent {}".format(target_name, comparison_type)
951
- ] = target_value_pct_str
909
+ target_table_dict["{}:percent {}".format(target_name, comparison_type)] = (
910
+ target_value_pct_str
911
+ )
952
912
  return target_table_keyname, target_table_dict
953
913
 
954
914
 
@@ -970,6 +930,7 @@ def get_ts_tags_and_name(
970
930
  tf_github_repo,
971
931
  tf_triggering_env,
972
932
  use_metric_context_path,
933
+ arch=ARCH_X86,
973
934
  ):
974
935
  # prepare tags
975
936
  timeserie_tags = get_project_ts_tags(
@@ -997,6 +958,7 @@ def get_ts_tags_and_name(
997
958
  )
998
959
  timeserie_tags["metric"] = str(metric_name)
999
960
  timeserie_tags["metric_name"] = metric_name
961
+ timeserie_tags["arch"] = arch
1000
962
  timeserie_tags["metric_context_path"] = metric_context_path
1001
963
  if metric_context_path is not None:
1002
964
  timeserie_tags["test_name:metric_context_path"] = "{}:{}".format(
@@ -1019,6 +981,7 @@ def get_ts_tags_and_name(
1019
981
  use_metric_context_path,
1020
982
  build_variant_name,
1021
983
  running_platform,
984
+ arch,
1022
985
  )
1023
986
  return timeserie_tags, ts_name
1024
987
 
@@ -100,12 +100,17 @@ def generate_common_server_args(
100
100
  "''",
101
101
  "--port",
102
102
  "{}".format(port),
103
- "--enable-debug-command",
104
- "yes",
105
103
  "--dir",
106
104
  dbdir,
107
105
  ]
108
106
  )
107
+ if enable_redis_7_config_directives:
108
+ command.extend(
109
+ [
110
+ "--enable-debug-command",
111
+ enable_debug_command,
112
+ ]
113
+ )
109
114
 
110
115
  return command
111
116
 
@@ -326,6 +331,7 @@ def get_ts_metric_name(
326
331
  use_metric_context_path=False,
327
332
  build_variant_name=None,
328
333
  running_platform=None,
334
+ arch="x86_64",
329
335
  ):
330
336
  if use_metric_context_path:
331
337
  metric_name = "{}/{}".format(metric_name, metric_context_path)
@@ -356,6 +362,11 @@ def get_ts_metric_name(
356
362
  metric=metric_name,
357
363
  )
358
364
  )
365
+ if arch != "x86_64":
366
+ logging.info(
367
+ f"Extending timeseries name with architecture given it's not x86. arch={arch}"
368
+ )
369
+ ts_name = ts_name + f"arch={arch}"
359
370
  return ts_name
360
371
 
361
372
 
@@ -31,15 +31,14 @@ def get_ci_ec2_instances_by_state(ec2_client, ci_machines_prefix, requested_stat
31
31
  instances = group["Instances"]
32
32
  for instance in instances:
33
33
  state = instance["State"]["Name"]
34
- if "Tags" in instance:
35
- for tag_dict in instance["Tags"]:
36
- key = tag_dict["Key"]
37
- key_v = tag_dict["Value"]
38
- if key == "Name":
39
- if ci_machines_prefix in key_v:
40
- if state == requested_state:
41
- count = count + 1
42
- state_instances.append(instance)
34
+ for tag_dict in instance["Tags"]:
35
+ key = tag_dict["Key"]
36
+ key_v = tag_dict["Value"]
37
+ if key == "Name":
38
+ if ci_machines_prefix in key_v:
39
+ if state == requested_state:
40
+ count = count + 1
41
+ state_instances.append(instance)
43
42
  return count, state_instances
44
43
 
45
44
 
@@ -1,12 +1,11 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: redisbench-admin
3
- Version: 0.11.19
3
+ Version: 0.11.22
4
4
  Summary: Redis benchmark run helper. A wrapper around Redis and Redis Modules benchmark tools ( ftsb_redisearch, memtier_benchmark, redis-benchmark, aibench, etc... ).
5
5
  Author: filipecosta90
6
6
  Author-email: filipecosta.90@gmail.com
7
- Requires-Python: >=3.9.0,<4.0.0
7
+ Requires-Python: >=3.10.0,<4.0.0
8
8
  Classifier: Programming Language :: Python :: 3
9
- Classifier: Programming Language :: Python :: 3.9
10
9
  Classifier: Programming Language :: Python :: 3.10
11
10
  Classifier: Programming Language :: Python :: 3.11
12
11
  Classifier: Programming Language :: Python :: 3.12